Compare commits
1 Commits
michaelkay
...
travis/gro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b024acffea |
@@ -15,7 +15,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from synapse.storage.engines import create_engine
|
||||
|
||||
logger = logging.getLogger("create_postgres_db")
|
||||
|
||||
@@ -6,11 +6,8 @@
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
|
||||
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev tox
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
# Prevent virtualenv from auto-updating pip to an incompatible version
|
||||
export VIRTUALENV_NO_DOWNLOAD=1
|
||||
|
||||
exec tox -e py35-old,combine
|
||||
|
||||
Binary file not shown.
@@ -1,10 +1,41 @@
|
||||
# This file serves as a blacklist for SyTest tests that we expect will fail in
|
||||
# Synapse when run under worker mode. For more details, see sytest-blacklist.
|
||||
|
||||
Message history can be paginated
|
||||
|
||||
Can re-join room if re-invited
|
||||
|
||||
The only membership state included in an initial sync is for all the senders in the timeline
|
||||
|
||||
Local device key changes get to remote servers
|
||||
|
||||
If remote user leaves room we no longer receive device updates
|
||||
|
||||
Forgotten room messages cannot be paginated
|
||||
|
||||
Inbound federation can get public room list
|
||||
|
||||
Members from the gap are included in gappy incr LL sync
|
||||
|
||||
Leaves are present in non-gapped incremental syncs
|
||||
|
||||
Old leaves are present in gapped incremental syncs
|
||||
|
||||
User sees updates to presence from other users in the incremental sync.
|
||||
|
||||
Gapped incremental syncs include all state changes
|
||||
|
||||
Old members are included in gappy incr LL sync if they start speaking
|
||||
|
||||
# new failures as of https://github.com/matrix-org/sytest/pull/732
|
||||
Device list doesn't change if remote server is down
|
||||
Remote servers cannot set power levels in rooms without existing powerlevels
|
||||
Remote servers should reject attempts by non-creators to set the power levels
|
||||
|
||||
# https://buildkite.com/matrix-dot-org/synapse/builds/6134#6f67bf47-e234-474d-80e8-c6e1868b15c5
|
||||
Server correctly handles incoming m.device_list_update
|
||||
|
||||
# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
|
||||
Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
|
||||
|
||||
Can get rooms/{roomId}/members at a given point
|
||||
|
||||
@@ -1,35 +1,22 @@
|
||||
version: 2.1
|
||||
version: 2
|
||||
jobs:
|
||||
dockerhubuploadrelease:
|
||||
docker:
|
||||
- image: docker:git
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- docker_prepare
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
# for release builds, we want to get the amd64 image out asap, so first
|
||||
# we do an amd64-only build, before following up with a multiarch build.
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
platforms: linux/amd64
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
dockerhubuploadlatest:
|
||||
docker:
|
||||
- image: docker:git
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- docker_prepare
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
# for `latest`, we don't want the arm images to disappear, so don't update the tag
|
||||
# until all of the platforms are built.
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- run: docker push matrixdotorg/synapse:latest
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build:
|
||||
jobs:
|
||||
- dockerhubuploadrelease:
|
||||
@@ -42,37 +29,3 @@ workflows:
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
|
||||
commands:
|
||||
docker_prepare:
|
||||
description: Sets up a remote docker server, downloads the buildx cli plugin, and enables multiarch images
|
||||
parameters:
|
||||
buildx_version:
|
||||
type: string
|
||||
default: "v0.4.1"
|
||||
steps:
|
||||
- setup_remote_docker:
|
||||
# 19.03.13 was the most recent available on circleci at the time of
|
||||
# writing.
|
||||
version: 19.03.13
|
||||
- run: apk add --no-cache curl
|
||||
- run: mkdir -vp ~/.docker/cli-plugins/ ~/dockercache
|
||||
- run: curl --silent -L "https://github.com/docker/buildx/releases/download/<< parameters.buildx_version >>/buildx-<< parameters.buildx_version >>.linux-amd64" > ~/.docker/cli-plugins/docker-buildx
|
||||
- run: chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||
# install qemu links in /proc/sys/fs/binfmt_misc on the docker instance running the circleci job
|
||||
- run: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||
# create a context named `builder` for the builds
|
||||
- run: docker context create builder
|
||||
# create a buildx builder using the new context, and set it as the default
|
||||
- run: docker buildx create builder --use
|
||||
|
||||
docker_build:
|
||||
description: Builds and pushed images to dockerhub using buildx
|
||||
parameters:
|
||||
platforms:
|
||||
type: string
|
||||
default: linux/amd64
|
||||
tag:
|
||||
type: string
|
||||
steps:
|
||||
- run: docker buildx build -f docker/Dockerfile --push --platform << parameters.platforms >> --label gitsha1=${CIRCLE_SHA1} << parameters.tag >> --progress=plain .
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -12,18 +12,15 @@
|
||||
_trial_temp/
|
||||
_trial_temp*/
|
||||
/out
|
||||
.DS_Store
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.db
|
||||
/*.log
|
||||
/*.log.*
|
||||
/*.log.config
|
||||
/*.pid
|
||||
/.python-version
|
||||
/*.signing.key
|
||||
/env/
|
||||
/.venv*/
|
||||
/homeserver*.yaml
|
||||
/logs
|
||||
/media_store/
|
||||
|
||||
1542
CHANGES.md
1542
CHANGES.md
File diff suppressed because it is too large
Load Diff
255
CONTRIBUTING.md
255
CONTRIBUTING.md
@@ -1,31 +1,4 @@
|
||||
Welcome to Synapse
|
||||
|
||||
This document aims to get you started with contributing to this repo!
|
||||
|
||||
- [1. Who can contribute to Synapse?](#1-who-can-contribute-to-synapse)
|
||||
- [2. What do I need?](#2-what-do-i-need)
|
||||
- [3. Get the source.](#3-get-the-source)
|
||||
- [4. Install the dependencies](#4-install-the-dependencies)
|
||||
* [Under Unix (macOS, Linux, BSD, ...)](#under-unix-macos-linux-bsd-)
|
||||
* [Under Windows](#under-windows)
|
||||
- [5. Get in touch.](#5-get-in-touch)
|
||||
- [6. Pick an issue.](#6-pick-an-issue)
|
||||
- [7. Turn coffee and documentation into code and documentation!](#7-turn-coffee-and-documentation-into-code-and-documentation)
|
||||
- [8. Test, test, test!](#8-test-test-test)
|
||||
* [Run the linters.](#run-the-linters)
|
||||
* [Run the unit tests.](#run-the-unit-tests)
|
||||
* [Run the integration tests.](#run-the-integration-tests)
|
||||
- [9. Submit your patch.](#9-submit-your-patch)
|
||||
* [Changelog](#changelog)
|
||||
+ [How do I know what to call the changelog file before I create the PR?](#how-do-i-know-what-to-call-the-changelog-file-before-i-create-the-pr)
|
||||
+ [Debian changelog](#debian-changelog)
|
||||
* [Sign off](#sign-off)
|
||||
- [10. Turn feedback into better code.](#10-turn-feedback-into-better-code)
|
||||
- [11. Find a new issue.](#11-find-a-new-issue)
|
||||
- [Notes for maintainers on merging PRs etc](#notes-for-maintainers-on-merging-prs-etc)
|
||||
- [Conclusion](#conclusion)
|
||||
|
||||
# 1. Who can contribute to Synapse?
|
||||
# Contributing code to Synapse
|
||||
|
||||
Everyone is welcome to contribute code to [matrix.org
|
||||
projects](https://github.com/matrix-org), provided that they are willing to
|
||||
@@ -36,179 +9,66 @@ license the code under the same terms as the project's overall 'outbound'
|
||||
license - in our case, this is almost always Apache Software License v2 (see
|
||||
[LICENSE](LICENSE)).
|
||||
|
||||
# 2. What do I need?
|
||||
|
||||
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download).
|
||||
|
||||
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
|
||||
# 3. Get the source.
|
||||
## How to contribute
|
||||
|
||||
The preferred and easiest way to contribute changes is to fork the relevant
|
||||
project on GitHub, and then [create a pull request](
|
||||
project on github, and then [create a pull request](
|
||||
https://help.github.com/articles/using-pull-requests/) to ask us to pull your
|
||||
changes into our repo.
|
||||
|
||||
Please base your changes on the `develop` branch.
|
||||
Some other points to follow:
|
||||
|
||||
* Please base your changes on the `develop` branch.
|
||||
|
||||
* Please follow the [code style requirements](#code-style).
|
||||
|
||||
```sh
|
||||
git clone git@github.com:YOUR_GITHUB_USER_NAME/synapse.git
|
||||
git checkout develop
|
||||
```
|
||||
* Please include a [changelog entry](#changelog) with each PR.
|
||||
|
||||
If you need help getting started with git, this is beyond the scope of the document, but you
|
||||
can find many good git tutorials on the web.
|
||||
* Please [sign off](#sign-off) your contribution.
|
||||
|
||||
# 4. Install the dependencies
|
||||
* Please keep an eye on the pull request for feedback from the [continuous
|
||||
integration system](#continuous-integration-and-testing) and try to fix any
|
||||
errors that come up.
|
||||
|
||||
## Under Unix (macOS, Linux, BSD, ...)
|
||||
* If you need to [update your PR](#updating-your-pull-request), just add new
|
||||
commits to your branch rather than rebasing.
|
||||
|
||||
Once you have installed Python 3 and added the source, please open a terminal and
|
||||
setup a *virtualenv*, as follows:
|
||||
|
||||
```sh
|
||||
cd path/where/you/have/cloned/the/repository
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install -e ".[all,lint,mypy,test]"
|
||||
pip install tox
|
||||
```
|
||||
|
||||
This will install the developer dependencies for the project.
|
||||
|
||||
## Under Windows
|
||||
|
||||
TBD
|
||||
|
||||
|
||||
# 5. Get in touch.
|
||||
|
||||
Join our developer community on Matrix: #synapse-dev:matrix.org !
|
||||
|
||||
|
||||
# 6. Pick an issue.
|
||||
|
||||
Fix your favorite problem or perhaps find a [Good First Issue](https://github.com/matrix-org/synapse/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22)
|
||||
to work on.
|
||||
|
||||
|
||||
# 7. Turn coffee and documentation into code and documentation!
|
||||
## Code style
|
||||
|
||||
Synapse's code style is documented [here](docs/code_style.md). Please follow
|
||||
it, including the conventions for the [sample configuration
|
||||
file](docs/code_style.md#configuration-file-format).
|
||||
|
||||
There is a growing amount of documentation located in the [docs](docs)
|
||||
directory. This documentation is intended primarily for sysadmins running their
|
||||
own Synapse instance, as well as developers interacting externally with
|
||||
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
|
||||
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
|
||||
regarding Synapse's Admin API, which is used mostly by sysadmins and external
|
||||
service developers.
|
||||
Many of the conventions are enforced by scripts which are run as part of the
|
||||
[continuous integration system](#continuous-integration-and-testing). To help
|
||||
check if you have followed the code style, you can run `scripts-dev/lint.sh`
|
||||
locally. You'll need python 3.6 or later, and to install a number of tools:
|
||||
|
||||
If you add new files added to either of these folders, please use [GitHub-Flavoured
|
||||
Markdown](https://guides.github.com/features/mastering-markdown/).
|
||||
```
|
||||
# Install the dependencies
|
||||
pip install -U black flake8 flake8-comprehensions isort
|
||||
|
||||
Some documentation also exists in [Synapse's GitHub
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
|
||||
# 8. Test, test, test!
|
||||
<a name="test-test-test"></a>
|
||||
|
||||
While you're developing and before submitting a patch, you'll
|
||||
want to test your code.
|
||||
|
||||
## Run the linters.
|
||||
|
||||
The linters look at your code and do two things:
|
||||
|
||||
- ensure that your code follows the coding style adopted by the project;
|
||||
- catch a number of errors in your code.
|
||||
|
||||
They're pretty fast, don't hesitate!
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
# Run the linter script
|
||||
./scripts-dev/lint.sh
|
||||
```
|
||||
|
||||
Note that this script *will modify your files* to fix styling errors.
|
||||
Make sure that you have saved all your files.
|
||||
**Note that the script does not just test/check, but also reformats code, so you
|
||||
may wish to ensure any new code is committed first**.
|
||||
|
||||
If you wish to restrict the linters to only the files changed since the last commit
|
||||
(much faster!), you can instead run:
|
||||
By default, this script checks all files and can take some time; if you alter
|
||||
only certain files, you might wish to specify paths as arguments to reduce the
|
||||
run-time:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh -d
|
||||
```
|
||||
|
||||
Or if you know exactly which files you wish to lint, you can instead run:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||
```
|
||||
|
||||
## Run the unit tests.
|
||||
|
||||
The unit tests run parts of Synapse, including your changes, to see if anything
|
||||
was broken. They are slower than the linters but will typically catch more errors.
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests
|
||||
```
|
||||
|
||||
If you wish to only run *some* unit tests, you may specify
|
||||
another module instead of `tests` - or a test class or a method:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
|
||||
```
|
||||
|
||||
If your tests fail, you may wish to look at the logs:
|
||||
|
||||
```sh
|
||||
less _trial_temp/test.log
|
||||
```
|
||||
|
||||
## Run the integration tests.
|
||||
|
||||
The integration tests are a more comprehensive suite of tests. They
|
||||
run a full version of Synapse, including your changes, to check if
|
||||
anything was broken. They are slower than the unit tests but will
|
||||
typically catch more errors.
|
||||
|
||||
The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:py37
|
||||
```
|
||||
|
||||
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
||||
|
||||
|
||||
# 9. Submit your patch.
|
||||
|
||||
Once you're happy with your patch, it's time to prepare a Pull Request.
|
||||
|
||||
To prepare a Pull Request, please:
|
||||
|
||||
1. verify that [all the tests pass](#test-test-test), including the coding style;
|
||||
2. [sign off](#sign-off) your contribution;
|
||||
3. `git push` your commit to your fork of Synapse;
|
||||
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
|
||||
5. add a [changelog entry](#changelog) and push it to your Pull Request;
|
||||
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
|
||||
Before pushing new changes, ensure they don't produce linting errors. Commit any
|
||||
files that were corrected.
|
||||
|
||||
Please ensure your changes match the cosmetic style of the existing project,
|
||||
and **never** mix cosmetic and functional changes in the same commit, as it
|
||||
makes it horribly hard to review otherwise.
|
||||
|
||||
## Changelog
|
||||
|
||||
@@ -358,36 +218,47 @@ Git allows you to add this signoff automatically when using the `-s`
|
||||
flag to `git commit`, which uses the name and email set in your
|
||||
`user.name` and `user.email` git configs.
|
||||
|
||||
## Continuous integration and testing
|
||||
|
||||
# 10. Turn feedback into better code.
|
||||
[Buildkite](https://buildkite.com/matrix-dot-org/synapse) will automatically
|
||||
run a series of checks and tests against any PR which is opened against the
|
||||
project; if your change breaks the build, this will be shown in GitHub, with
|
||||
links to the build results. If your build fails, please try to fix the errors
|
||||
and update your branch.
|
||||
|
||||
Once the Pull Request is opened, you will see a few things:
|
||||
To run unit tests in a local development environment, you can use:
|
||||
|
||||
1. our automated CI (Continuous Integration) pipeline will run (again) the linters, the unit tests, the integration tests and more;
|
||||
2. one or more of the developers will take a look at your Pull Request and offer feedback.
|
||||
- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
|
||||
for SQLite-backed Synapse on Python 3.5.
|
||||
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
|
||||
- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
|
||||
(requires a running local PostgreSQL with access to create databases).
|
||||
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
|
||||
(requires Docker). Entirely self-contained, recommended if you don't want to
|
||||
set up PostgreSQL yourself.
|
||||
|
||||
From this point, you should:
|
||||
Docker images are available for running the integration tests (SyTest) locally,
|
||||
see the [documentation in the SyTest repo](
|
||||
https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
|
||||
information.
|
||||
|
||||
1. Look at the results of the CI pipeline.
|
||||
- If there is any error, fix the error.
|
||||
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
|
||||
3. Create a new commit with the changes.
|
||||
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
|
||||
- Push this commits to your Pull Request.
|
||||
4. Back to 1.
|
||||
## Updating your pull request
|
||||
|
||||
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
|
||||
If you decide to make changes to your pull request - perhaps to address issues
|
||||
raised in a review, or to fix problems highlighted by [continuous
|
||||
integration](#continuous-integration-and-testing) - just add new commits to your
|
||||
branch, and push to GitHub. The pull request will automatically be updated.
|
||||
|
||||
# 11. Find a new issue.
|
||||
Please **avoid** rebasing your branch, especially once the PR has been
|
||||
reviewed: doing so makes it very difficult for a reviewer to see what has
|
||||
changed since a previous review.
|
||||
|
||||
By now, you know the drill!
|
||||
|
||||
# Notes for maintainers on merging PRs etc
|
||||
## Notes for maintainers on merging PRs etc
|
||||
|
||||
There are some notes for those with commit access to the project on how we
|
||||
manage git [here](docs/dev/git.md).
|
||||
|
||||
# Conclusion
|
||||
## Conclusion
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect
|
||||
given our obsession with open communication. If we're going to successfully
|
||||
|
||||
293
INSTALL.md
293
INSTALL.md
@@ -1,44 +1,19 @@
|
||||
# Installation Instructions
|
||||
- [Choosing your server name](#choosing-your-server-name)
|
||||
- [Picking a database engine](#picking-a-database-engine)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
- [Client Well-Known URI](#client-well-known-uri)
|
||||
- [Email](#email)
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
- [URL previews](#url-previews)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
|
||||
There are 3 steps to follow under **Installation Instructions**.
|
||||
|
||||
- [Installation Instructions](#installation-instructions)
|
||||
- [Choosing your server name](#choosing-your-server-name)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
- [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
|
||||
- [ArchLinux](#archlinux)
|
||||
- [CentOS/Fedora](#centosfedora)
|
||||
- [macOS](#macos)
|
||||
- [OpenSUSE](#opensuse)
|
||||
- [OpenBSD](#openbsd)
|
||||
- [Windows](#windows)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Docker images and Ansible playbooks](#docker-images-and-ansible-playbooks)
|
||||
- [Debian/Ubuntu](#debianubuntu)
|
||||
- [Matrix.org packages](#matrixorg-packages)
|
||||
- [Downstream Debian packages](#downstream-debian-packages)
|
||||
- [Downstream Ubuntu packages](#downstream-ubuntu-packages)
|
||||
- [Fedora](#fedora)
|
||||
- [OpenSUSE](#opensuse-1)
|
||||
- [SUSE Linux Enterprise Server](#suse-linux-enterprise-server)
|
||||
- [ArchLinux](#archlinux-1)
|
||||
- [Void Linux](#void-linux)
|
||||
- [FreeBSD](#freebsd)
|
||||
- [OpenBSD](#openbsd-1)
|
||||
- [NixOS](#nixos)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [Using PostgreSQL](#using-postgresql)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
- [Client Well-Known URI](#client-well-known-uri)
|
||||
- [Email](#email)
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
- [URL previews](#url-previews)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
|
||||
## Choosing your server name
|
||||
# Choosing your server name
|
||||
|
||||
It is important to choose the name for your server before you install Synapse,
|
||||
because it cannot be changed later.
|
||||
@@ -54,16 +29,35 @@ that your email address is probably `user@example.com` rather than
|
||||
`user@email.example.com`) - but doing so may require more advanced setup: see
|
||||
[Setting up Federation](docs/federate.md).
|
||||
|
||||
## Installing Synapse
|
||||
# Picking a database engine
|
||||
|
||||
### Installing from source
|
||||
Synapse offers two database engines:
|
||||
* [PostgreSQL](https://www.postgresql.org)
|
||||
* [SQLite](https://sqlite.org/)
|
||||
|
||||
Almost all installations should opt to use PostgreSQL. Advantages include:
|
||||
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
[docs/postgres.md](docs/postgres.md)
|
||||
|
||||
By default Synapse uses SQLite and in doing so trades performance for convenience.
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
light workloads.
|
||||
|
||||
# Installing Synapse
|
||||
|
||||
## Installing from source
|
||||
|
||||
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
|
||||
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.5.2 or later, up to Python 3.9.
|
||||
- Python 3.5.2 or later, up to Python 3.8.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
@@ -74,7 +68,7 @@ these on various platforms.
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
```sh
|
||||
```
|
||||
mkdir -p ~/synapse
|
||||
virtualenv -p python3 ~/synapse/env
|
||||
source ~/synapse/env/bin/activate
|
||||
@@ -91,7 +85,7 @@ prefer.
|
||||
This Synapse installation can then be later upgraded by using pip again with the
|
||||
update flag:
|
||||
|
||||
```sh
|
||||
```
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install -U matrix-synapse
|
||||
```
|
||||
@@ -99,7 +93,7 @@ pip install -U matrix-synapse
|
||||
Before you can start Synapse, you will need to generate a configuration
|
||||
file. To do this, run (in your virtualenv, as before):
|
||||
|
||||
```sh
|
||||
```
|
||||
cd ~/synapse
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name my.domain.name \
|
||||
@@ -117,54 +111,70 @@ wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your homeserver's keys, you may find that other homeserver have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the `<server name>.signing.key` file (the second word) to something
|
||||
different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
|
||||
different. See the
|
||||
[spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys)
|
||||
for more information on key management).
|
||||
|
||||
To actually run your new homeserver, pick a working directory for Synapse to
|
||||
run (e.g. `~/synapse`), and:
|
||||
|
||||
```sh
|
||||
```
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start
|
||||
```
|
||||
|
||||
#### Platform-Specific Instructions
|
||||
### Platform-Specific Instructions
|
||||
|
||||
##### Debian/Ubuntu/Raspbian
|
||||
#### Debian/Ubuntu/Raspbian
|
||||
|
||||
Installing prerequisites on Ubuntu or Debian:
|
||||
|
||||
```sh
|
||||
sudo apt install build-essential python3-dev libffi-dev \
|
||||
```
|
||||
sudo apt-get install build-essential python3-dev libffi-dev \
|
||||
python3-pip python3-setuptools sqlite3 \
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev
|
||||
```
|
||||
|
||||
##### ArchLinux
|
||||
#### ArchLinux
|
||||
|
||||
Installing prerequisites on ArchLinux:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo pacman -S base-devel python python-pip \
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
```
|
||||
|
||||
##### CentOS/Fedora
|
||||
#### CentOS/Fedora
|
||||
|
||||
Installing prerequisites on CentOS or Fedora Linux:
|
||||
Installing prerequisites on CentOS 8 or Fedora>26:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel
|
||||
libwebp-devel tk-devel redhat-rpm-config \
|
||||
python3-virtualenv libffi-devel openssl-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
##### macOS
|
||||
Installing prerequisites on CentOS 7 or Fedora<=25:
|
||||
|
||||
```
|
||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||
python3-virtualenv libffi-devel openssl-devel
|
||||
sudo yum groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
Note that Synapse does not support versions of SQLite before 3.11, and CentOS 7
|
||||
uses SQLite 3.7. You may be able to work around this by installing a more
|
||||
recent SQLite version, but it is recommended that you instead use a Postgres
|
||||
database: see [docs/postgres.md](docs/postgres.md).
|
||||
|
||||
#### macOS
|
||||
|
||||
Installing prerequisites on macOS:
|
||||
|
||||
```sh
|
||||
```
|
||||
xcode-select --install
|
||||
sudo easy_install pip
|
||||
sudo pip install virtualenv
|
||||
@@ -174,23 +184,22 @@ brew install pkg-config libffi
|
||||
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
|
||||
via brew and inform `pip` about it so that `psycopg2` builds:
|
||||
|
||||
```sh
|
||||
```
|
||||
brew install openssl@1.1
|
||||
export LDFLAGS="-L/usr/local/opt/openssl/lib"
|
||||
export CPPFLAGS="-I/usr/local/opt/openssl/include"
|
||||
export LDFLAGS=-L/usr/local/Cellar/openssl\@1.1/1.1.1d/lib/
|
||||
```
|
||||
|
||||
##### OpenSUSE
|
||||
#### OpenSUSE
|
||||
|
||||
Installing prerequisites on openSUSE:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
```
|
||||
|
||||
##### OpenBSD
|
||||
#### OpenBSD
|
||||
|
||||
A port of Synapse is available under `net/synapse`. The filesystem
|
||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||
@@ -204,72 +213,73 @@ mounted with `wxallowed` (cf. `mount(8)`).
|
||||
Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
|
||||
default OpenBSD installation is mounted with `wxallowed`):
|
||||
|
||||
```sh
|
||||
```
|
||||
doas mkdir /usr/local/pobj_wxallowed
|
||||
```
|
||||
|
||||
Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
|
||||
configured in `/etc/mk.conf`:
|
||||
|
||||
```sh
|
||||
```
|
||||
doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
|
||||
```
|
||||
|
||||
Setting the `WRKOBJDIR` for building python:
|
||||
|
||||
```sh
|
||||
```
|
||||
echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
|
||||
```
|
||||
|
||||
Building Synapse:
|
||||
|
||||
```sh
|
||||
```
|
||||
cd /usr/ports/net/synapse
|
||||
make install
|
||||
```
|
||||
|
||||
##### Windows
|
||||
#### Windows
|
||||
|
||||
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
|
||||
Linux provides a Linux environment on Windows 10 which is capable of using the
|
||||
Debian, Fedora, or source installation methods. More information about WSL can
|
||||
be found at <https://docs.microsoft.com/en-us/windows/wsl/install-win10> for
|
||||
Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
|
||||
be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
|
||||
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
|
||||
for Windows Server.
|
||||
|
||||
### Prebuilt packages
|
||||
## Prebuilt packages
|
||||
|
||||
As an alternative to installing from source, prebuilt packages are available
|
||||
for a number of platforms.
|
||||
|
||||
#### Docker images and Ansible playbooks
|
||||
### Docker images and Ansible playbooks
|
||||
|
||||
There is an official synapse image available at
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
||||
There is an offical synapse image available at
|
||||
https://hub.docker.com/r/matrixdotorg/synapse which can be used with
|
||||
the docker-compose file available at [contrib/docker](contrib/docker). Further
|
||||
information on this including configuration options is available in the README
|
||||
on hub.docker.com.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
<https://hub.docker.com/r/avhost/docker-matrix/tags/>
|
||||
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Slavi Pantaleev has created an Ansible playbook,
|
||||
which installs the offical Docker image of Matrix Synapse
|
||||
along with many other Matrix-related services (Postgres database, Element, coturn,
|
||||
ma1sd, SSL support, etc.).
|
||||
For more details, see
|
||||
<https://github.com/spantaleev/matrix-docker-ansible-deploy>
|
||||
https://github.com/spantaleev/matrix-docker-ansible-deploy
|
||||
|
||||
#### Debian/Ubuntu
|
||||
|
||||
##### Matrix.org packages
|
||||
### Debian/Ubuntu
|
||||
|
||||
#### Matrix.org packages
|
||||
|
||||
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
|
||||
Synapse via <https://packages.matrix.org/debian/>. They are available for Debian
|
||||
Synapse via https://packages.matrix.org/debian/. They are available for Debian
|
||||
9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo apt install -y lsb-release wget apt-transport-https
|
||||
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
|
||||
@@ -289,7 +299,7 @@ The fingerprint of the repository signing key (as shown by `gpg
|
||||
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
|
||||
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
|
||||
|
||||
##### Downstream Debian packages
|
||||
#### Downstream Debian packages
|
||||
|
||||
We do not recommend using the packages from the default Debian `buster`
|
||||
repository at this time, as they are old and suffer from known security
|
||||
@@ -301,49 +311,49 @@ for information on how to use backports.
|
||||
If you are using Debian `sid` or testing, Synapse is available in the default
|
||||
repositories and it should be possible to install it simply with:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo apt install matrix-synapse
|
||||
```
|
||||
|
||||
##### Downstream Ubuntu packages
|
||||
#### Downstream Ubuntu packages
|
||||
|
||||
We do not recommend using the packages in the default Ubuntu repository
|
||||
at this time, as they are old and suffer from known security vulnerabilities.
|
||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||
|
||||
#### Fedora
|
||||
### Fedora
|
||||
|
||||
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo dnf install matrix-synapse
|
||||
```
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
|
||||
#### OpenSUSE
|
||||
### OpenSUSE
|
||||
|
||||
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo zypper install matrix-synapse
|
||||
```
|
||||
|
||||
#### SUSE Linux Enterprise Server
|
||||
### SUSE Linux Enterprise Server
|
||||
|
||||
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
|
||||
<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
|
||||
https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
|
||||
|
||||
#### ArchLinux
|
||||
### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
|
||||
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo pip install --upgrade pip
|
||||
```
|
||||
|
||||
@@ -352,28 +362,28 @@ ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||
compile it under the right architecture. (This should not be needed if
|
||||
installing under virtualenv):
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo pip uninstall py-bcrypt
|
||||
sudo pip install py-bcrypt
|
||||
```
|
||||
|
||||
#### Void Linux
|
||||
### Void Linux
|
||||
|
||||
Synapse can be found in the void repositories as 'synapse':
|
||||
|
||||
```sh
|
||||
```
|
||||
xbps-install -Su
|
||||
xbps-install -S synapse
|
||||
```
|
||||
|
||||
#### FreeBSD
|
||||
### FreeBSD
|
||||
|
||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||
|
||||
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||
- Packages: `pkg install py37-matrix-synapse`
|
||||
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||
- Packages: `pkg install py37-matrix-synapse`
|
||||
|
||||
#### OpenBSD
|
||||
### OpenBSD
|
||||
|
||||
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
|
||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||
@@ -382,35 +392,20 @@ and mounting it to `/var/synapse` should be taken into consideration.
|
||||
|
||||
Installing Synapse:
|
||||
|
||||
```sh
|
||||
```
|
||||
doas pkg_add synapse
|
||||
```
|
||||
|
||||
#### NixOS
|
||||
### NixOS
|
||||
|
||||
Robin Lambertz has packaged Synapse for NixOS at:
|
||||
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
|
||||
https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix
|
||||
|
||||
## Setting up Synapse
|
||||
# Setting up Synapse
|
||||
|
||||
Once you have installed synapse as above, you will need to configure it.
|
||||
|
||||
### Using PostgreSQL
|
||||
|
||||
By default Synapse uses [SQLite](https://sqlite.org/) and in doing so trades performance for convenience.
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
very light workloads.
|
||||
|
||||
Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org). Advantages include:
|
||||
|
||||
- significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
- allowing the DB to be run on separate hardware
|
||||
|
||||
For information on how to install and use PostgreSQL in Synapse, please see
|
||||
[docs/postgres.md](docs/postgres.md)
|
||||
|
||||
### TLS certificates
|
||||
## TLS certificates
|
||||
|
||||
The default configuration exposes a single HTTP port on the local
|
||||
interface: `http://localhost:8008`. It is suitable for local testing,
|
||||
@@ -424,19 +419,19 @@ The recommended way to do so is to set up a reverse proxy on port
|
||||
Alternatively, you can configure Synapse to expose an HTTPS port. To do
|
||||
so, you will need to edit `homeserver.yaml`, as follows:
|
||||
|
||||
- First, under the `listeners` section, uncomment the configuration for the
|
||||
* First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
each line). The relevant lines are like this:
|
||||
|
||||
```yaml
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
- You will also need to uncomment the `tls_certificate_path` and
|
||||
* You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You will need to manage
|
||||
provisioning of these certificates yourself — Synapse had built-in ACME
|
||||
support, but the ACMEv1 protocol Synapse implements is deprecated, not
|
||||
@@ -451,7 +446,7 @@ so, you will need to edit `homeserver.yaml`, as follows:
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
[federate.md](docs/federate.md).
|
||||
|
||||
### Client Well-Known URI
|
||||
## Client Well-Known URI
|
||||
|
||||
Setting up the client Well-Known URI is optional but if you set it up, it will
|
||||
allow users to enter their full username (e.g. `@user:<server_name>`) into clients
|
||||
@@ -462,7 +457,7 @@ about the actual homeserver URL you are using.
|
||||
The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
|
||||
the following format.
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
@@ -472,7 +467,7 @@ the following format.
|
||||
|
||||
It can optionally contain identity server information as well.
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
@@ -489,11 +484,10 @@ Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
|
||||
view it.
|
||||
|
||||
In nginx this would be something like:
|
||||
|
||||
```nginx
|
||||
```
|
||||
location /.well-known/matrix/client {
|
||||
return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
|
||||
default_type application/json;
|
||||
add_header Content-Type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
}
|
||||
```
|
||||
@@ -503,11 +497,11 @@ correctly. `public_baseurl` should be set to the URL that clients will use to
|
||||
connect to your server. This is the same URL you put for the `m.homeserver`
|
||||
`base_url` above.
|
||||
|
||||
```yaml
|
||||
```
|
||||
public_baseurl: "https://<matrix.example.com>"
|
||||
```
|
||||
|
||||
### Email
|
||||
## Email
|
||||
|
||||
It is desirable for Synapse to have the capability to send email. This allows
|
||||
Synapse to send password reset emails, send verifications when an email address
|
||||
@@ -522,7 +516,7 @@ and `notif_from` fields filled out. You may also need to set `smtp_user`,
|
||||
If email is not configured, password reset, registration and notifications via
|
||||
email will be disabled.
|
||||
|
||||
### Registering a user
|
||||
## Registering a user
|
||||
|
||||
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||
|
||||
@@ -530,7 +524,7 @@ Alternatively you can do so from the command line if you have installed via pip.
|
||||
|
||||
This can be done as follows:
|
||||
|
||||
```sh
|
||||
```
|
||||
$ source ~/synapse/env/bin/activate
|
||||
$ synctl start # if not already running
|
||||
$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
@@ -548,12 +542,12 @@ value is generated by `--generate-config`), but it should be kept secret, as
|
||||
anyone with knowledge of it can register users, including admin accounts,
|
||||
on your server even if `enable_registration` is `false`.
|
||||
|
||||
### Setting up a TURN server
|
||||
## Setting up a TURN server
|
||||
|
||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||
a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
|
||||
|
||||
### URL previews
|
||||
## URL previews
|
||||
|
||||
Synapse includes support for previewing URLs, which is disabled by default. To
|
||||
turn it on you must enable the `url_preview_enabled: True` config parameter
|
||||
@@ -563,18 +557,19 @@ This is critical from a security perspective to stop arbitrary Matrix users
|
||||
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||
your loopback and RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional `lxml` python dependency to be installed. This
|
||||
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
|
||||
means `apt-get install libxml2-dev`, or equivalent for your OS.
|
||||
This also requires the optional `lxml` and `netaddr` python dependencies to be
|
||||
installed. This in turn requires the `libxml2` library to be available - on
|
||||
Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for
|
||||
your OS.
|
||||
|
||||
### Troubleshooting Installation
|
||||
# Troubleshooting Installation
|
||||
|
||||
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||
happens, you will have to individually install the dependencies which are
|
||||
failing, e.g.:
|
||||
|
||||
```sh
|
||||
```
|
||||
pip install twisted
|
||||
```
|
||||
|
||||
|
||||
88
README.rst
88
README.rst
@@ -1,6 +1,10 @@
|
||||
=========================================================
|
||||
Synapse |support| |development| |license| |pypi| |python|
|
||||
=========================================================
|
||||
================
|
||||
Synapse |shield|
|
||||
================
|
||||
|
||||
.. |shield| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
|
||||
:alt: (get support on #synapse:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. contents::
|
||||
|
||||
@@ -243,8 +247,6 @@ Then update the ``users`` table in the database::
|
||||
Synapse Development
|
||||
===================
|
||||
|
||||
Join our developer community on Matrix: `#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Installing from source <INSTALL.md#installing-from-source>`_.
|
||||
@@ -258,48 +260,23 @@ directory of your choice::
|
||||
Synapse has a number of external dependencies, that are easiest
|
||||
to install using pip and a virtualenv::
|
||||
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install -e ".[all,test]"
|
||||
virtualenv -p python3 env
|
||||
source env/bin/activate
|
||||
python -m pip install --no-use-pep517 -e ".[all]"
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env. If any dependencies fail to install,
|
||||
try installing the failing modules individually::
|
||||
dependencies into a virtual env.
|
||||
|
||||
pip install -e "module-name"
|
||||
|
||||
Once this is done, you may wish to run Synapse's unit tests to
|
||||
check that everything is installed correctly::
|
||||
Once this is done, you may wish to run Synapse's unit tests, to
|
||||
check that everything is installed as it should be::
|
||||
|
||||
python -m twisted.trial tests
|
||||
|
||||
This should end with a 'PASSED' result (note that exact numbers will
|
||||
differ)::
|
||||
|
||||
Ran 1337 tests in 716.064s
|
||||
|
||||
PASSED (skips=15, successes=1322)
|
||||
|
||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
|
||||
|
||||
./demo/start.sh
|
||||
|
||||
(to stop, you can use `./demo/stop.sh`)
|
||||
|
||||
If you just want to start a single instance of the app and run it directly::
|
||||
|
||||
# Create the homeserver.yaml config once
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
|
||||
# Start the app
|
||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
This should end with a 'PASSED' result::
|
||||
|
||||
Ran 143 tests in 0.601s
|
||||
|
||||
PASSED (successes=143)
|
||||
|
||||
Running the Integration Tests
|
||||
=============================
|
||||
@@ -313,6 +290,19 @@ Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `installation instructions
|
||||
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
Building Internal API Documentation
|
||||
===================================
|
||||
|
||||
Before building internal API documentation install sphinx and
|
||||
sphinxcontrib-napoleon::
|
||||
|
||||
pip install sphinx
|
||||
pip install sphinxcontrib-napoleon
|
||||
|
||||
Building internal API documentation::
|
||||
|
||||
python setup.py build_sphinx
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
@@ -397,23 +387,3 @@ something like the following in their logs::
|
||||
|
||||
This is normally caused by a misconfiguration in your reverse-proxy. See
|
||||
`<docs/reverse_proxy.md>`_ and double-check that your settings are correct.
|
||||
|
||||
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
|
||||
:alt: (get support on #synapse:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix
|
||||
:alt: (discuss development on #synapse-dev:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse-dev:matrix.org
|
||||
|
||||
.. |license| image:: https://img.shields.io/github/license/matrix-org/synapse
|
||||
:alt: (check license in LICENSE file)
|
||||
:target: LICENSE
|
||||
|
||||
.. |pypi| image:: https://img.shields.io/pypi/v/matrix-synapse
|
||||
:alt: (latest version released on PyPi)
|
||||
:target: https://pypi.org/project/matrix-synapse
|
||||
|
||||
.. |python| image:: https://img.shields.io/pypi/pyversions/matrix-synapse
|
||||
:alt: (supported python versions)
|
||||
:target: https://pypi.org/project/matrix-synapse
|
||||
|
||||
304
UPGRADE.rst
304
UPGRADE.rst
@@ -5,16 +5,6 @@ Before upgrading check if any special steps are required to upgrade from the
|
||||
version you currently have installed to the current version of Synapse. The extra
|
||||
instructions that may be required are listed later in this document.
|
||||
|
||||
* Check that your versions of Python and PostgreSQL are still supported.
|
||||
|
||||
Synapse follows upstream lifecycles for `Python`_ and `PostgreSQL`_, and
|
||||
removes support for versions which are no longer maintained.
|
||||
|
||||
The website https://endoflife.date also offers convenient summaries.
|
||||
|
||||
.. _Python: https://devguide.python.org/devcycle/#end-of-life-branches
|
||||
.. _PostgreSQL: https://www.postgresql.org/support/versioning/
|
||||
|
||||
* If Synapse was installed using `prebuilt packages
|
||||
<INSTALL.md#prebuilt-packages>`_, you will need to follow the normal process
|
||||
for upgrading those packages.
|
||||
@@ -85,300 +75,6 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.29.0
|
||||
====================
|
||||
|
||||
Requirement for X-Forwarded-Proto header
|
||||
----------------------------------------
|
||||
|
||||
When using Synapse with a reverse proxy (in particular, when using the
|
||||
`x_forwarded` option on an HTTP listener), Synapse now expects to receive an
|
||||
`X-Forwarded-Proto` header on incoming HTTP requests. If it is not set, Synapse
|
||||
will log a warning on each received request.
|
||||
|
||||
To avoid the warning, administrators using a reverse proxy should ensure that
|
||||
the reverse proxy sets `X-Forwarded-Proto` header to `https` or `http` to
|
||||
indicate the protocol used by the client. See the [reverse proxy
|
||||
documentation](docs/reverse_proxy.md), where the example configurations have
|
||||
been updated to show how to set this header.
|
||||
|
||||
(Users of `Caddy <https://caddyserver.com/>`_ are unaffected, since we believe it
|
||||
sets `X-Forwarded-Proto` by default.)
|
||||
|
||||
Upgrading to v1.27.0
|
||||
====================
|
||||
|
||||
Changes to callback URI for OAuth2 / OpenID Connect and SAML2
|
||||
-------------------------------------------------------------
|
||||
|
||||
This version changes the URI used for callbacks from OAuth2 and SAML2 identity providers:
|
||||
|
||||
* If your server is configured for single sign-on via an OpenID Connect or OAuth2 identity
|
||||
provider, you will need to add ``[synapse public baseurl]/_synapse/client/oidc/callback``
|
||||
to the list of permitted "redirect URIs" at the identity provider.
|
||||
|
||||
See `docs/openid.md <docs/openid.md>`_ for more information on setting up OpenID
|
||||
Connect.
|
||||
|
||||
* If your server is configured for single sign-on via a SAML2 identity provider, you will
|
||||
need to add ``[synapse public baseurl]/_synapse/client/saml2/authn_response`` as a permitted
|
||||
"ACS location" (also known as "allowed callback URLs") at the identity provider.
|
||||
|
||||
Changes to HTML templates
|
||||
-------------------------
|
||||
|
||||
The HTML templates for SSO and email notifications now have `Jinja2's autoescape <https://jinja.palletsprojects.com/en/2.11.x/api/#autoescaping>`_
|
||||
enabled for files ending in ``.html``, ``.htm``, and ``.xml``. If you have customised
|
||||
these templates and see issues when viewing them you might need to update them.
|
||||
It is expected that most configurations will need no changes.
|
||||
|
||||
If you have customised the templates *names* for these templates, it is recommended
|
||||
to verify they end in ``.html`` to ensure autoescape is enabled.
|
||||
|
||||
The above applies to the following templates:
|
||||
|
||||
* ``add_threepid.html``
|
||||
* ``add_threepid_failure.html``
|
||||
* ``add_threepid_success.html``
|
||||
* ``notice_expiry.html``
|
||||
* ``notice_expiry.html``
|
||||
* ``notif_mail.html`` (which, by default, includes ``room.html`` and ``notif.html``)
|
||||
* ``password_reset.html``
|
||||
* ``password_reset_confirmation.html``
|
||||
* ``password_reset_failure.html``
|
||||
* ``password_reset_success.html``
|
||||
* ``registration.html``
|
||||
* ``registration_failure.html``
|
||||
* ``registration_success.html``
|
||||
* ``sso_account_deactivated.html``
|
||||
* ``sso_auth_bad_user.html``
|
||||
* ``sso_auth_confirm.html``
|
||||
* ``sso_auth_success.html``
|
||||
* ``sso_error.html``
|
||||
* ``sso_login_idp_picker.html``
|
||||
* ``sso_redirect_confirm.html``
|
||||
|
||||
Upgrading to v1.26.0
|
||||
====================
|
||||
|
||||
Rolling back to v1.25.0 after a failed upgrade
|
||||
----------------------------------------------
|
||||
|
||||
v1.26.0 includes a lot of large changes. If something problematic occurs, you
|
||||
may want to roll-back to a previous version of Synapse. Because v1.26.0 also
|
||||
includes a new database schema version, reverting that version is also required
|
||||
alongside the generic rollback instructions mentioned above. In short, to roll
|
||||
back to v1.25.0 you need to:
|
||||
|
||||
1. Stop the server
|
||||
2. Decrease the schema version in the database:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
UPDATE schema_version SET version = 58;
|
||||
|
||||
3. Delete the ignored users & chain cover data:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
DROP TABLE IF EXISTS ignored_users;
|
||||
UPDATE rooms SET has_auth_chain_index = false;
|
||||
|
||||
For PostgreSQL run:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
TRUNCATE event_auth_chain_links;
|
||||
TRUNCATE event_auth_chains;
|
||||
|
||||
For SQLite run:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
DELETE FROM event_auth_chain_links;
|
||||
DELETE FROM event_auth_chains;
|
||||
|
||||
4. Mark the deltas as not run (so they will re-run on upgrade).
|
||||
|
||||
.. code:: sql
|
||||
|
||||
DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/01ignored_user.py";
|
||||
DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/06chain_cover_index.sql";
|
||||
|
||||
5. Downgrade Synapse by following the instructions for your installation method
|
||||
in the "Rolling back to older versions" section above.
|
||||
|
||||
Upgrading to v1.25.0
|
||||
====================
|
||||
|
||||
Last release supporting Python 3.5
|
||||
----------------------------------
|
||||
|
||||
This is the last release of Synapse which guarantees support with Python 3.5,
|
||||
which passed its upstream End of Life date several months ago.
|
||||
|
||||
We will attempt to maintain support through March 2021, but without guarantees.
|
||||
|
||||
In the future, Synapse will follow upstream schedules for ending support of
|
||||
older versions of Python and PostgreSQL. Please upgrade to at least Python 3.6
|
||||
and PostgreSQL 9.6 as soon as possible.
|
||||
|
||||
Blacklisting IP ranges
|
||||
----------------------
|
||||
|
||||
Synapse v1.25.0 includes new settings, ``ip_range_blacklist`` and
|
||||
``ip_range_whitelist``, for controlling outgoing requests from Synapse for federation,
|
||||
identity servers, push, and for checking key validity for third-party invite events.
|
||||
The previous setting, ``federation_ip_range_blacklist``, is deprecated. The new
|
||||
``ip_range_blacklist`` defaults to private IP ranges if it is not defined.
|
||||
|
||||
If you have never customised ``federation_ip_range_blacklist`` it is recommended
|
||||
that you remove that setting.
|
||||
|
||||
If you have customised ``federation_ip_range_blacklist`` you should update the
|
||||
setting name to ``ip_range_blacklist``.
|
||||
|
||||
If you have a custom push server that is reached via private IP space you may
|
||||
need to customise ``ip_range_blacklist`` or ``ip_range_whitelist``.
|
||||
|
||||
Upgrading to v1.24.0
|
||||
====================
|
||||
|
||||
Custom OpenID Connect mapping provider breaking change
|
||||
------------------------------------------------------
|
||||
|
||||
This release allows the OpenID Connect mapping provider to perform normalisation
|
||||
of the localpart of the Matrix ID. This allows for the mapping provider to
|
||||
specify different algorithms, instead of the [default way](https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets).
|
||||
|
||||
If your Synapse configuration uses a custom mapping provider
|
||||
(`oidc_config.user_mapping_provider.module` is specified and not equal to
|
||||
`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`) then you *must* ensure
|
||||
that `map_user_attributes` of the mapping provider performs some normalisation
|
||||
of the `localpart` returned. To match previous behaviour you can use the
|
||||
`map_username_to_mxid_localpart` function provided by Synapse. An example is
|
||||
shown below:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from synapse.types import map_username_to_mxid_localpart
|
||||
|
||||
class MyMappingProvider:
|
||||
def map_user_attributes(self, userinfo, token):
|
||||
# ... your custom logic ...
|
||||
sso_user_id = ...
|
||||
localpart = map_username_to_mxid_localpart(sso_user_id)
|
||||
|
||||
return {"localpart": localpart}
|
||||
|
||||
Removal historical Synapse Admin API
|
||||
------------------------------------
|
||||
|
||||
Historically, the Synapse Admin API has been accessible under:
|
||||
|
||||
* ``/_matrix/client/api/v1/admin``
|
||||
* ``/_matrix/client/unstable/admin``
|
||||
* ``/_matrix/client/r0/admin``
|
||||
* ``/_synapse/admin/v1``
|
||||
|
||||
The endpoints with ``/_matrix/client/*`` prefixes have been removed as of v1.24.0.
|
||||
The Admin API is now only accessible under:
|
||||
|
||||
* ``/_synapse/admin/v1``
|
||||
|
||||
The only exception is the `/admin/whois` endpoint, which is
|
||||
`also available via the client-server API <https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_.
|
||||
|
||||
The deprecation of the old endpoints was announced with Synapse 1.20.0 (released
|
||||
on 2020-09-22) and makes it easier for homeserver admins to lock down external
|
||||
access to the Admin API endpoints.
|
||||
|
||||
Upgrading to v1.23.0
|
||||
====================
|
||||
|
||||
Structured logging configuration breaking changes
|
||||
-------------------------------------------------
|
||||
|
||||
This release deprecates use of the ``structured: true`` logging configuration for
|
||||
structured logging. If your logging configuration contains ``structured: true``
|
||||
then it should be modified based on the `structured logging documentation
|
||||
<https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md>`_.
|
||||
|
||||
The ``structured`` and ``drains`` logging options are now deprecated and should
|
||||
be replaced by standard logging configuration of ``handlers`` and ``formatters``.
|
||||
|
||||
A future will release of Synapse will make using ``structured: true`` an error.
|
||||
|
||||
Upgrading to v1.22.0
|
||||
====================
|
||||
|
||||
ThirdPartyEventRules breaking changes
|
||||
-------------------------------------
|
||||
|
||||
This release introduces a backwards-incompatible change to modules making use of
|
||||
``ThirdPartyEventRules`` in Synapse. If you make use of a module defined under the
|
||||
``third_party_event_rules`` config option, please make sure it is updated to handle
|
||||
the below change:
|
||||
|
||||
The ``http_client`` argument is no longer passed to modules as they are initialised. Instead,
|
||||
modules are expected to make use of the ``http_client`` property on the ``ModuleApi`` class.
|
||||
Modules are now passed a ``module_api`` argument during initialisation, which is an instance of
|
||||
``ModuleApi``. ``ModuleApi`` instances have a ``http_client`` property which acts the same as
|
||||
the ``http_client`` argument previously passed to ``ThirdPartyEventRules`` modules.
|
||||
|
||||
Upgrading to v1.21.0
|
||||
====================
|
||||
|
||||
Forwarding ``/_synapse/client`` through your reverse proxy
|
||||
----------------------------------------------------------
|
||||
|
||||
The `reverse proxy documentation
|
||||
<https://github.com/matrix-org/synapse/blob/develop/docs/reverse_proxy.md>`_ has been updated
|
||||
to include reverse proxy directives for ``/_synapse/client/*`` endpoints. As the user password
|
||||
reset flow now uses endpoints under this prefix, **you must update your reverse proxy
|
||||
configurations for user password reset to work**.
|
||||
|
||||
Additionally, note that the `Synapse worker documentation
|
||||
<https://github.com/matrix-org/synapse/blob/develop/docs/workers.md>`_ has been updated to
|
||||
state that the ``/_synapse/client/password_reset/email/submit_token`` endpoint can be handled
|
||||
by all workers. If you make use of Synapse's worker feature, please update your reverse proxy
|
||||
configuration to reflect this change.
|
||||
|
||||
New HTML templates
|
||||
------------------
|
||||
|
||||
A new HTML template,
|
||||
`password_reset_confirmation.html <https://github.com/matrix-org/synapse/blob/develop/synapse/res/templates/password_reset_confirmation.html>`_,
|
||||
has been added to the ``synapse/res/templates`` directory. If you are using a
|
||||
custom template directory, you may want to copy the template over and modify it.
|
||||
|
||||
Note that as of v1.20.0, templates do not need to be included in custom template
|
||||
directories for Synapse to start. The default templates will be used if a custom
|
||||
template cannot be found.
|
||||
|
||||
This page will appear to the user after clicking a password reset link that has
|
||||
been emailed to them.
|
||||
|
||||
To complete password reset, the page must include a way to make a `POST`
|
||||
request to
|
||||
``/_synapse/client/password_reset/{medium}/submit_token``
|
||||
with the query parameters from the original link, presented as a URL-encoded form. See the file
|
||||
itself for more details.
|
||||
|
||||
Updated Single Sign-on HTML Templates
|
||||
-------------------------------------
|
||||
|
||||
The ``saml_error.html`` template was removed from Synapse and replaced with the
|
||||
``sso_error.html`` template. If your Synapse is configured to use SAML and a
|
||||
custom ``sso_redirect_confirm_template_dir`` configuration then any customisations
|
||||
of the ``saml_error.html`` template will need to be merged into the ``sso_error.html``
|
||||
template. These templates are similar, but the parameters are slightly different:
|
||||
|
||||
* The ``msg`` parameter should be renamed to ``error_description``.
|
||||
* There is no longer a ``code`` parameter for the response code.
|
||||
* A string ``error`` parameter is available that includes a short hint of why a
|
||||
user is seeing the error page.
|
||||
|
||||
Upgrading to v1.18.0
|
||||
====================
|
||||
|
||||
|
||||
1
changelog.d/7864.bugfix
Normal file
1
changelog.d/7864.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a memory leak by limiting the length of time that messages will be queued for a remote server that has been unreachable.
|
||||
1
changelog.d/8013.feature
Normal file
1
changelog.d/8013.feature
Normal file
@@ -0,0 +1 @@
|
||||
Iteratively encode JSON to avoid blocking the reactor.
|
||||
1
changelog.d/8037.feature
Normal file
1
changelog.d/8037.feature
Normal file
@@ -0,0 +1 @@
|
||||
Use the default template file when its equivalent is not found in a custom template directory.
|
||||
1
changelog.d/8072.misc
Normal file
1
changelog.d/8072.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8074.misc
Normal file
1
changelog.d/8074.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8075.misc
Normal file
1
changelog.d/8075.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8076.misc
Normal file
1
changelog.d/8076.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8081.bugfix
Normal file
1
changelog.d/8081.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix `Re-starting finished log context PUT-nnnn` warning when event persistence failed.
|
||||
1
changelog.d/8085.misc
Normal file
1
changelog.d/8085.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove some unused database functions.
|
||||
1
changelog.d/8087.misc
Normal file
1
changelog.d/8087.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8090.misc
Normal file
1
changelog.d/8090.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to `synapse.handlers.room`.
|
||||
1
changelog.d/8092.feature
Normal file
1
changelog.d/8092.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for shadow-banning users (ignoring any message send requests).
|
||||
1
changelog.d/8093.misc
Normal file
1
changelog.d/8093.misc
Normal file
@@ -0,0 +1 @@
|
||||
Return the previous stream token if a non-member event is a duplicate.
|
||||
1
changelog.d/8100.misc
Normal file
1
changelog.d/8100.misc
Normal file
@@ -0,0 +1 @@
|
||||
Convert various parts of the codebase to async/await.
|
||||
1
changelog.d/8101.bugfix
Normal file
1
changelog.d/8101.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Synapse now correctly enforces the valid characters in the `client_secret` parameter used in various endpoints.
|
||||
1
changelog.d/8107.feature
Normal file
1
changelog.d/8107.feature
Normal file
@@ -0,0 +1 @@
|
||||
Use the default template file when its equivalent is not found in a custom template directory.
|
||||
1
changelog.d/8111.doc
Normal file
1
changelog.d/8111.doc
Normal file
@@ -0,0 +1 @@
|
||||
Link to matrix-synapse-rest-password-provider in the password provider documentation.
|
||||
1
changelog.d/8112.misc
Normal file
1
changelog.d/8112.misc
Normal file
@@ -0,0 +1 @@
|
||||
Return the previous stream token if a non-member event is a duplicate.
|
||||
@@ -1 +0,0 @@
|
||||
Temporarily drop cross-user m.room_key_request to_device messages over performance concerns.
|
||||
@@ -1 +0,0 @@
|
||||
Add rate limiters to cross-user key sharing requests.
|
||||
@@ -1 +0,0 @@
|
||||
Add `order_by` to the admin API `GET /_synapse/admin/v1/users/<user_id>/media`. Contributed by @dklimpel.
|
||||
@@ -1 +0,0 @@
|
||||
Add some configuration settings to make users' profile data more private.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug where users' pushers were not all deleted when they deactivated their account.
|
||||
@@ -1 +0,0 @@
|
||||
Added a fix that invalidates cache for empty timed-out sync responses.
|
||||
@@ -1 +0,0 @@
|
||||
Add a configuration option, `user_directory.prefer_local_users`, which when enabled will make it more likely for users on the same server as you to appear above other users.
|
||||
@@ -1 +0,0 @@
|
||||
Add a configuration option, `user_directory.prefer_local_users`, which when enabled will make it more likely for users on the same server as you to appear above other users.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug where a lot of unnecessary presence updates were sent when joining a room.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug that caused multiple calls to the experimental `shared_rooms` endpoint to return stale results.
|
||||
@@ -1 +0,0 @@
|
||||
Add documentation and type hints to `parse_duration`.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug in single sign-on which could cause a "No session cookie found" error.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for regenerating thumbnails if they have been deleted but the original image is still stored.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug introduced in v1.27.0 where allowing a user to choose their own username when logging in via single sign-on did not work unless an `idp_icon` was defined.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in v1.26.0 where some sequences were not properly configured when running `synapse_port_db`.
|
||||
@@ -1 +0,0 @@
|
||||
Remove vestiges of `uploads_path` configuration setting.
|
||||
@@ -1 +0,0 @@
|
||||
Update the example systemd config to propagate reloads to individual units.
|
||||
@@ -1 +0,0 @@
|
||||
Add a comment about systemd-python.
|
||||
@@ -1 +0,0 @@
|
||||
Fix deleting pushers when using sharded pushers.
|
||||
@@ -1 +0,0 @@
|
||||
Fix deleting pushers when using sharded pushers.
|
||||
@@ -1 +0,0 @@
|
||||
Fix missing startup checks for the consistency of certain PostgreSQL sequences.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for `X-Forwarded-Proto` header when using a reverse proxy.
|
||||
@@ -1 +0,0 @@
|
||||
Fix deleting pushers when using sharded pushers.
|
||||
@@ -1 +0,0 @@
|
||||
Test that we require validated email for email pushers.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for `X-Forwarded-Proto` header when using a reverse proxy.
|
||||
@@ -15,6 +15,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
""" Starts a synapse client console. """
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import cmd
|
||||
import getpass
|
||||
@@ -92,7 +94,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
return self.config["user"].split(":")[1]
|
||||
|
||||
def do_config(self, line):
|
||||
"""Show the config for this client: "config"
|
||||
""" Show the config for this client: "config"
|
||||
Edit a key value mapping: "config key value" e.g. "config token 1234"
|
||||
Config variables:
|
||||
user: The username to auth with.
|
||||
@@ -360,7 +362,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
print(e)
|
||||
|
||||
def do_topic(self, line):
|
||||
""" "topic [set|get] <roomid> [<newtopic>]"
|
||||
""""topic [set|get] <roomid> [<newtopic>]"
|
||||
Set the topic for a room: topic set <roomid> <newtopic>
|
||||
Get the topic for a room: topic get <roomid>
|
||||
"""
|
||||
@@ -690,7 +692,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
self._do_presence_state(2, line)
|
||||
|
||||
def _parse(self, line, keys, force_keys=False):
|
||||
"""Parses the given line.
|
||||
""" Parses the given line.
|
||||
|
||||
Args:
|
||||
line : The line to parse
|
||||
@@ -721,7 +723,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
query_params={"access_token": None},
|
||||
alt_text=None,
|
||||
):
|
||||
"""Runs an HTTP request and pretty prints the output.
|
||||
""" Runs an HTTP request and pretty prints the output.
|
||||
|
||||
Args:
|
||||
method: HTTP method
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import json
|
||||
import urllib
|
||||
from pprint import pformat
|
||||
@@ -22,11 +24,12 @@ from twisted.web.client import Agent, readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
|
||||
class HttpClient:
|
||||
"""Interface for talking json over http"""
|
||||
class HttpClient(object):
|
||||
""" Interface for talking json over http
|
||||
"""
|
||||
|
||||
def put_json(self, url, data):
|
||||
"""Sends the specifed json data using PUT
|
||||
""" Sends the specifed json data using PUT
|
||||
|
||||
Args:
|
||||
url (str): The URL to PUT data to.
|
||||
@@ -40,7 +43,7 @@ class HttpClient:
|
||||
pass
|
||||
|
||||
def get_json(self, url, args=None):
|
||||
"""Gets some json from the given host homeserver and path
|
||||
""" Gets some json from the given host homeserver and path
|
||||
|
||||
Args:
|
||||
url (str): The URL to GET data from.
|
||||
@@ -57,7 +60,7 @@ class HttpClient:
|
||||
|
||||
|
||||
class TwistedHttpClient(HttpClient):
|
||||
"""Wrapper around the twisted HTTP client api.
|
||||
""" Wrapper around the twisted HTTP client api.
|
||||
|
||||
Attributes:
|
||||
agent (twisted.web.client.Agent): The twisted Agent used to send the
|
||||
@@ -86,7 +89,8 @@ class TwistedHttpClient(HttpClient):
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
def _create_put_request(self, url, json_data, headers_dict={}):
|
||||
"""Wrapper of _create_request to issue a PUT request"""
|
||||
""" Wrapper of _create_request to issue a PUT request
|
||||
"""
|
||||
|
||||
if "Content-Type" not in headers_dict:
|
||||
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
|
||||
@@ -96,7 +100,8 @@ class TwistedHttpClient(HttpClient):
|
||||
)
|
||||
|
||||
def _create_get_request(self, url, headers_dict={}):
|
||||
"""Wrapper of _create_request to issue a GET request"""
|
||||
""" Wrapper of _create_request to issue a GET request
|
||||
"""
|
||||
return self._create_request("GET", url, headers_dict=headers_dict)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -124,7 +129,8 @@ class TwistedHttpClient(HttpClient):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_request(self, method, url, producer=None, headers_dict={}):
|
||||
"""Creates and sends a request to the given url"""
|
||||
""" Creates and sends a request to the given url
|
||||
"""
|
||||
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
|
||||
|
||||
retries_left = 5
|
||||
@@ -163,7 +169,7 @@ class TwistedHttpClient(HttpClient):
|
||||
return d
|
||||
|
||||
|
||||
class _RawProducer:
|
||||
class _RawProducer(object):
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
self.body = data
|
||||
@@ -180,8 +186,9 @@ class _RawProducer:
|
||||
pass
|
||||
|
||||
|
||||
class _JsonProducer:
|
||||
"""Used by the twisted http client to create the HTTP body from json"""
|
||||
class _JsonProducer(object):
|
||||
""" Used by the twisted http client to create the HTTP body from json
|
||||
"""
|
||||
|
||||
def __init__(self, jsn):
|
||||
self.data = jsn
|
||||
|
||||
@@ -63,7 +63,8 @@ class CursesStdIO:
|
||||
self.redraw()
|
||||
|
||||
def redraw(self):
|
||||
"""method for redisplaying lines based on internal list of lines"""
|
||||
""" method for redisplaying lines
|
||||
based on internal list of lines """
|
||||
|
||||
self.stdscr.clear()
|
||||
self.paintStatus(self.statusText)
|
||||
@@ -140,7 +141,7 @@ class CursesStdIO:
|
||||
curses.endwin()
|
||||
|
||||
|
||||
class Callback:
|
||||
class Callback(object):
|
||||
def __init__(self, stdio):
|
||||
self.stdio = stdio
|
||||
|
||||
|
||||
@@ -55,8 +55,8 @@ def excpetion_errback(failure):
|
||||
logging.exception(failure)
|
||||
|
||||
|
||||
class InputOutput:
|
||||
"""This is responsible for basic I/O so that a user can interact with
|
||||
class InputOutput(object):
|
||||
""" This is responsible for basic I/O so that a user can interact with
|
||||
the example app.
|
||||
"""
|
||||
|
||||
@@ -68,7 +68,8 @@ class InputOutput:
|
||||
self.server = server
|
||||
|
||||
def on_line(self, line):
|
||||
"""This is where we process commands."""
|
||||
""" This is where we process commands.
|
||||
"""
|
||||
|
||||
try:
|
||||
m = re.match(r"^join (\S+)$", line)
|
||||
@@ -131,8 +132,8 @@ class IOLoggerHandler(logging.Handler):
|
||||
self.io.print_log(msg)
|
||||
|
||||
|
||||
class Room:
|
||||
"""Used to store (in memory) the current membership state of a room, and
|
||||
class Room(object):
|
||||
""" Used to store (in memory) the current membership state of a room, and
|
||||
which home servers we should send PDUs associated with the room to.
|
||||
"""
|
||||
|
||||
@@ -147,7 +148,8 @@ class Room:
|
||||
self.have_got_metadata = False
|
||||
|
||||
def add_participant(self, participant):
|
||||
"""Someone has joined the room"""
|
||||
""" Someone has joined the room
|
||||
"""
|
||||
self.participants.add(participant)
|
||||
self.invited.discard(participant)
|
||||
|
||||
@@ -158,13 +160,14 @@ class Room:
|
||||
self.oldest_server = server
|
||||
|
||||
def add_invited(self, invitee):
|
||||
"""Someone has been invited to the room"""
|
||||
""" Someone has been invited to the room
|
||||
"""
|
||||
self.invited.add(invitee)
|
||||
self.servers.add(origin_from_ucid(invitee))
|
||||
|
||||
|
||||
class HomeServer(ReplicationHandler):
|
||||
"""A very basic home server implentation that allows people to join a
|
||||
""" A very basic home server implentation that allows people to join a
|
||||
room and then invite other people.
|
||||
"""
|
||||
|
||||
@@ -178,7 +181,8 @@ class HomeServer(ReplicationHandler):
|
||||
self.output = output
|
||||
|
||||
def on_receive_pdu(self, pdu):
|
||||
"""We just received a PDU"""
|
||||
""" We just received a PDU
|
||||
"""
|
||||
pdu_type = pdu.pdu_type
|
||||
|
||||
if pdu_type == "sy.room.message":
|
||||
@@ -195,20 +199,23 @@ class HomeServer(ReplicationHandler):
|
||||
)
|
||||
|
||||
def _on_message(self, pdu):
|
||||
"""We received a message"""
|
||||
""" We received a message
|
||||
"""
|
||||
self.output.print_line(
|
||||
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
|
||||
)
|
||||
|
||||
def _on_join(self, context, joinee):
|
||||
"""Someone has joined a room, either a remote user or a local user"""
|
||||
""" Someone has joined a room, either a remote user or a local user
|
||||
"""
|
||||
room = self._get_or_create_room(context)
|
||||
room.add_participant(joinee)
|
||||
|
||||
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
|
||||
|
||||
def _on_invite(self, origin, context, invitee):
|
||||
"""Someone has been invited"""
|
||||
""" Someone has been invited
|
||||
"""
|
||||
room = self._get_or_create_room(context)
|
||||
room.add_invited(invitee)
|
||||
|
||||
@@ -221,7 +228,8 @@ class HomeServer(ReplicationHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_message(self, room_name, sender, body):
|
||||
"""Send a message to a room!"""
|
||||
""" Send a message to a room!
|
||||
"""
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
@@ -239,7 +247,8 @@ class HomeServer(ReplicationHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def join_room(self, room_name, sender, joinee):
|
||||
"""Join a room!"""
|
||||
""" Join a room!
|
||||
"""
|
||||
self._on_join(room_name, joinee)
|
||||
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
@@ -260,7 +269,8 @@ class HomeServer(ReplicationHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def invite_to_room(self, room_name, sender, invitee):
|
||||
"""Invite someone to a room!"""
|
||||
""" Invite someone to a room!
|
||||
"""
|
||||
self._on_invite(self.server_name, room_name, invitee)
|
||||
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
@@ -3,4 +3,4 @@
|
||||
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
|
||||
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
|
||||
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
|
||||
3. Set up required recording rules. https://github.com/matrix-org/synapse/tree/master/contrib/prometheus
|
||||
3. Set up additional recording rules
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import cgi
|
||||
import datetime
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import cgi
|
||||
import datetime
|
||||
|
||||
@@ -10,6 +10,8 @@ the bridge.
|
||||
Requires:
|
||||
npm install jquery jsdom
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import time
|
||||
@@ -193,12 +195,15 @@ class TrivialXmppClient:
|
||||
time.sleep(7)
|
||||
print("SSRC spammer started")
|
||||
while self.running:
|
||||
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
|
||||
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
||||
"nick": self.userId,
|
||||
"assrc": self.ssrcs["audio"],
|
||||
"vssrc": self.ssrcs["video"],
|
||||
}
|
||||
ssrcMsg = (
|
||||
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
|
||||
% {
|
||||
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
||||
"nick": self.userId,
|
||||
"assrc": self.ssrcs["audio"],
|
||||
"vssrc": self.ssrcs["video"],
|
||||
}
|
||||
)
|
||||
res = self.sendIq(ssrcMsg)
|
||||
print("reply from ssrc announce: ", res)
|
||||
time.sleep(10)
|
||||
|
||||
@@ -20,7 +20,6 @@ Add a new job to the main prometheus.conf file:
|
||||
```
|
||||
|
||||
### for Prometheus v2
|
||||
|
||||
Add a new job to the main prometheus.yml file:
|
||||
|
||||
```yaml
|
||||
@@ -30,17 +29,14 @@ Add a new job to the main prometheus.yml file:
|
||||
scheme: "https"
|
||||
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
- targets: ['SERVER.LOCATION:PORT']
|
||||
```
|
||||
|
||||
An example of a Prometheus configuration with workers can be found in
|
||||
[metrics-howto.md](https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md).
|
||||
|
||||
To use `synapse.rules` add
|
||||
|
||||
```yaml
|
||||
rule_files:
|
||||
- "/PATH/TO/synapse-v2.rules"
|
||||
rule_files:
|
||||
- "/PATH/TO/synapse-v2.rules"
|
||||
```
|
||||
|
||||
Metrics are disabled by default when running synapse; they must be enabled
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resource_utime"),
|
||||
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
||||
name: "[[job]]-[[index]]",
|
||||
name: "[[job]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
renderer: "line",
|
||||
@@ -22,12 +22,12 @@ new PromConsole.Graph({
|
||||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="process_resident_memory_bytes"></div>
|
||||
<div id="process_resource_maxrss"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resident_memory_bytes"),
|
||||
expr: "process_resident_memory_bytes",
|
||||
name: "[[job]]-[[index]]",
|
||||
node: document.querySelector("#process_resource_maxrss"),
|
||||
expr: "process_psutil_rss:max",
|
||||
name: "Maxrss",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
@@ -43,8 +43,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_fds"),
|
||||
expr: "process_open_fds",
|
||||
name: "[[job]]-[[index]]",
|
||||
expr: "process_open_fds{job='synapse'}",
|
||||
name: "FDs",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
@@ -62,8 +62,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_total_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time_sum[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
||||
name: "time",
|
||||
max: 1,
|
||||
min: 0,
|
||||
renderer: "area",
|
||||
@@ -80,8 +80,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_average_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time_sum[2m]) / rate(python_twisted_reactor_tick_time_count[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
||||
name: "time",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
@@ -97,14 +97,14 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_pending_calls"),
|
||||
expr: "rate(python_twisted_reactor_pending_calls_sum[30s]) / rate(python_twisted_reactor_pending_calls_count[30s])",
|
||||
name: "[[job]]-[[index]]",
|
||||
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
||||
name: "calls",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yTitle: "Pending Calls"
|
||||
yTitle: "Pending Cals"
|
||||
})
|
||||
</script>
|
||||
|
||||
@@ -115,7 +115,7 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_query_time"),
|
||||
expr: "sum(rate(synapse_storage_query_time_count[2m])) by (verb)",
|
||||
expr: "rate(synapse_storage_query_time:count[2m])",
|
||||
name: "[[verb]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
@@ -129,8 +129,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transaction_time"),
|
||||
expr: "topk(10, rate(synapse_storage_transaction_time_count[2m]))",
|
||||
name: "[[job]]-[[index]] [[desc]]",
|
||||
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
||||
name: "[[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
@@ -140,12 +140,12 @@ new PromConsole.Graph({
|
||||
</script>
|
||||
|
||||
<h3>Transaction execution time</h3>
|
||||
<div id="synapse_storage_transactions_time_sec"></div>
|
||||
<div id="synapse_storage_transactions_time_msec"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transactions_time_sec"),
|
||||
expr: "rate(synapse_storage_transaction_time_sum[2m])",
|
||||
name: "[[job]]-[[index]] [[desc]]",
|
||||
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
||||
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
||||
name: "[[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
@@ -154,33 +154,34 @@ new PromConsole.Graph({
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Average time waiting for database connection</h3>
|
||||
<div id="synapse_storage_avg_waiting_time"></div>
|
||||
<h3>Database scheduling latency</h3>
|
||||
<div id="synapse_storage_schedule_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_avg_waiting_time"),
|
||||
expr: "rate(synapse_storage_schedule_time_sum[2m]) / rate(synapse_storage_schedule_time_count[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
node: document.querySelector("#synapse_storage_schedule_time"),
|
||||
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
||||
name: "Total latency",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s",
|
||||
yTitle: "Time"
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Cache request rate</h3>
|
||||
<div id="synapse_cache_request_rate"></div>
|
||||
<h3>Cache hit ratio</h3>
|
||||
<div id="synapse_cache_ratio"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_request_rate"),
|
||||
expr: "rate(synapse_util_caches_cache:total[2m])",
|
||||
name: "[[job]]-[[index]] [[name]]",
|
||||
node: document.querySelector("#synapse_cache_ratio"),
|
||||
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
||||
name: "[[name]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "rps",
|
||||
yTitle: "Cache request rate"
|
||||
yUnits: "%",
|
||||
yTitle: "Percentage"
|
||||
})
|
||||
</script>
|
||||
|
||||
@@ -190,7 +191,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_size"),
|
||||
expr: "synapse_util_caches_cache:size",
|
||||
name: "[[job]]-[[index]] [[name]]",
|
||||
name: "[[name]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
@@ -205,8 +206,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet"),
|
||||
expr: "rate(synapse_http_server_in_flight_requests_count[2m])",
|
||||
name: "[[job]]-[[index]] [[method]] [[servlet]]",
|
||||
expr: "rate(synapse_http_server_request_count:servlet[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
@@ -218,8 +219,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
|
||||
expr: "rate(synapse_http_server_in_flight_requests_count{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[job]]-[[index]] [[method]] [[servlet]]",
|
||||
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
@@ -232,8 +233,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time_seconds_sum[2m]) / rate(synapse_http_server_response_count[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
@@ -276,7 +277,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
@@ -291,7 +292,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
@@ -305,8 +306,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
@@ -322,7 +323,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_client_sent"),
|
||||
expr: "rate(synapse_federation_client_sent[2m])",
|
||||
name: "[[job]]-[[index]] [[type]]",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
@@ -336,7 +337,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_server_received"),
|
||||
expr: "rate(synapse_federation_server_received[2m])",
|
||||
name: "[[job]]-[[index]] [[type]]",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
@@ -366,7 +367,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_listeners"),
|
||||
expr: "synapse_notifier_listeners",
|
||||
name: "[[job]]-[[index]]",
|
||||
name: "listeners",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
@@ -381,7 +382,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_notified_events"),
|
||||
expr: "rate(synapse_notifier_notified_events[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
name: "events",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "events/s",
|
||||
|
||||
@@ -58,21 +58,3 @@ groups:
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
|
||||
labels:
|
||||
type: remote
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: local
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: bridges
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep)
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import print_function
|
||||
|
||||
import json
|
||||
import sys
|
||||
@@ -7,6 +8,11 @@ from argparse import ArgumentParser
|
||||
|
||||
import requests
|
||||
|
||||
try:
|
||||
raw_input
|
||||
except NameError: # Python 3
|
||||
raw_input = input
|
||||
|
||||
|
||||
def _mkurl(template, kws):
|
||||
for key in kws:
|
||||
@@ -52,7 +58,7 @@ def main(hs, room_id, access_token, user_id_prefix, why):
|
||||
print("The following user IDs will be kicked from %s" % room_name)
|
||||
for uid in kick_list:
|
||||
print(uid)
|
||||
doit = input("Continue? [Y]es\n")
|
||||
doit = raw_input("Continue? [Y]es\n")
|
||||
if len(doit) > 0 and doit.lower() == "y":
|
||||
print("Kicking members...")
|
||||
# encode them all
|
||||
|
||||
6
debian/build_virtualenv
vendored
6
debian/build_virtualenv
vendored
@@ -33,18 +33,16 @@ esac
|
||||
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
|
||||
# than the 2/3 compatible `virtualenv`.
|
||||
|
||||
# Pin pip to 20.3.4 to fix breakage in 21.0 on py3.5 (xenial)
|
||||
|
||||
dh_virtualenv \
|
||||
--install-suffix "matrix-synapse" \
|
||||
--builtin-venv \
|
||||
--python "$SNAKE" \
|
||||
--upgrade-pip-to="20.3.4" \
|
||||
--upgrade-pip \
|
||||
--preinstall="lxml" \
|
||||
--preinstall="mock" \
|
||||
--extra-pip-arg="--no-cache-dir" \
|
||||
--extra-pip-arg="--compile" \
|
||||
--extras="all,systemd,test"
|
||||
--extras="all,systemd"
|
||||
|
||||
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
|
||||
124
debian/changelog
vendored
124
debian/changelog
vendored
@@ -1,127 +1,3 @@
|
||||
matrix-synapse-py3 (1.28.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.28.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Feb 2021 10:21:57 +0000
|
||||
|
||||
matrix-synapse-py3 (1.27.0) stable; urgency=medium
|
||||
|
||||
[ Dan Callahan ]
|
||||
* Fix build on Ubuntu 16.04 LTS (Xenial).
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.27.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Feb 2021 13:11:28 +0000
|
||||
|
||||
matrix-synapse-py3 (1.26.0) stable; urgency=medium
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
* Remove dependency on `python3-distutils`.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.26.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 27 Jan 2021 12:43:35 -0500
|
||||
|
||||
matrix-synapse-py3 (1.25.0) stable; urgency=medium
|
||||
|
||||
[ Dan Callahan ]
|
||||
* Update dependencies to account for the removal of the transitional
|
||||
dh-systemd package from Debian Bullseye.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.25.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 13 Jan 2021 10:14:55 +0000
|
||||
|
||||
matrix-synapse-py3 (1.24.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.24.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 09 Dec 2020 10:14:30 +0000
|
||||
|
||||
matrix-synapse-py3 (1.23.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.23.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 09 Dec 2020 10:40:39 +0000
|
||||
|
||||
matrix-synapse-py3 (1.23.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.23.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 18 Nov 2020 11:41:28 +0000
|
||||
|
||||
matrix-synapse-py3 (1.22.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.22.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 30 Oct 2020 15:25:37 +0000
|
||||
|
||||
matrix-synapse-py3 (1.22.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.22.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Oct 2020 12:07:12 +0000
|
||||
|
||||
matrix-synapse-py3 (1.21.2) stable; urgency=medium
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.21.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 15 Oct 2020 09:23:27 -0400
|
||||
|
||||
matrix-synapse-py3 (1.21.1) stable; urgency=medium
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.21.1.
|
||||
|
||||
[ Andrew Morgan ]
|
||||
* Explicitly install "test" python dependencies.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Oct 2020 10:24:13 +0100
|
||||
|
||||
matrix-synapse-py3 (1.21.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.21.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 12 Oct 2020 15:47:44 +0100
|
||||
|
||||
matrix-synapse-py3 (1.20.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.20.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 24 Sep 2020 16:25:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.20.0) stable; urgency=medium
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.20.0.
|
||||
|
||||
[ Dexter Chua ]
|
||||
* Use Type=notify in systemd service
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Sep 2020 15:19:32 +0100
|
||||
|
||||
matrix-synapse-py3 (1.19.3) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.19.3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 18 Sep 2020 14:59:30 +0100
|
||||
|
||||
matrix-synapse-py3 (1.19.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.19.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 16 Sep 2020 12:50:30 +0100
|
||||
|
||||
matrix-synapse-py3 (1.19.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.19.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 27 Aug 2020 10:50:19 +0100
|
||||
|
||||
matrix-synapse-py3 (1.19.0) stable; urgency=medium
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
|
||||
7
debian/control
vendored
7
debian/control
vendored
@@ -3,11 +3,9 @@ Section: contrib/python
|
||||
Priority: extra
|
||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
||||
# TODO: Remove the dependency on dh-systemd after dropping support for Ubuntu xenial
|
||||
# On all other supported releases, it's merely a transitional package which
|
||||
# does nothing but depends on debhelper (> 9.20160709)
|
||||
Build-Depends:
|
||||
debhelper (>= 9.20160709) | dh-systemd,
|
||||
debhelper (>= 9),
|
||||
dh-systemd,
|
||||
dh-virtualenv (>= 1.1),
|
||||
libsystemd-dev,
|
||||
libpq-dev,
|
||||
@@ -31,6 +29,7 @@ Pre-Depends: dpkg (>= 1.16.1)
|
||||
Depends:
|
||||
adduser,
|
||||
debconf,
|
||||
python3-distutils|libpython3-stdlib (<< 3.6),
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
${synapse:pydepends},
|
||||
|
||||
2
debian/matrix-synapse.service
vendored
2
debian/matrix-synapse.service
vendored
@@ -2,7 +2,7 @@
|
||||
Description=Synapse Matrix homeserver
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
Type=simple
|
||||
User=matrix-synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=/etc/default/matrix-synapse
|
||||
|
||||
@@ -30,8 +30,6 @@ for port in 8080 8081 8082; do
|
||||
if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then
|
||||
printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
|
||||
|
||||
echo "public_baseurl: http://localhost:$port/" >> $DIR/etc/$port.config
|
||||
|
||||
echo 'enable_registration: true' >> $DIR/etc/$port.config
|
||||
|
||||
# Warning, this heredoc depends on the interaction of tabs and spaces. Please don't
|
||||
|
||||
59
demo/webserver.py
Normal file
59
demo/webserver.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import argparse
|
||||
import BaseHTTPServer
|
||||
import os
|
||||
import SimpleHTTPServer
|
||||
import cgi, logging
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
|
||||
class SimpleHTTPRequestHandlerWithPOST(SimpleHTTPServer.SimpleHTTPRequestHandler):
|
||||
UPLOAD_PATH = "upload"
|
||||
|
||||
"""
|
||||
Accept all post request as file upload
|
||||
"""
|
||||
|
||||
def do_POST(self):
|
||||
|
||||
path = os.path.join(self.UPLOAD_PATH, os.path.basename(self.path))
|
||||
length = self.headers["content-length"]
|
||||
data = self.rfile.read(int(length))
|
||||
|
||||
with open(path, "wb") as fh:
|
||||
fh.write(data)
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "application/json")
|
||||
self.end_headers()
|
||||
|
||||
# Return the absolute path of the uploaded file
|
||||
self.wfile.write('{"url":"/%s"}' % path)
|
||||
|
||||
|
||||
def setup():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("directory")
|
||||
parser.add_argument("-p", "--port", dest="port", type=int, default=8080)
|
||||
parser.add_argument("-P", "--pid-file", dest="pid", default="web.pid")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Get absolute path to directory to serve, as daemonize changes to '/'
|
||||
os.chdir(args.directory)
|
||||
dr = os.getcwd()
|
||||
|
||||
httpd = BaseHTTPServer.HTTPServer(("", args.port), SimpleHTTPRequestHandlerWithPOST)
|
||||
|
||||
def run():
|
||||
os.chdir(dr)
|
||||
httpd.serve_forever()
|
||||
|
||||
daemon = Daemonize(
|
||||
app="synapse-webclient", pid=args.pid, action=run, auto_close_fds=False
|
||||
)
|
||||
|
||||
daemon.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
setup()
|
||||
@@ -11,7 +11,7 @@
|
||||
# docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.6 .
|
||||
#
|
||||
|
||||
ARG PYTHON_VERSION=3.8
|
||||
ARG PYTHON_VERSION=3.7
|
||||
|
||||
###
|
||||
### Stage 0: builder
|
||||
@@ -19,27 +19,19 @@ ARG PYTHON_VERSION=3.8
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
|
||||
|
||||
# install the OS build deps
|
||||
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
libffi-dev \
|
||||
libjpeg-dev \
|
||||
libpq-dev \
|
||||
libssl-dev \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Build dependencies that are not available as wheels, to speed up rebuilds
|
||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
cryptography \
|
||||
frozendict \
|
||||
jaeger-client \
|
||||
opentracing \
|
||||
# Match the version constraints of Synapse
|
||||
"prometheus_client>=0.4.0" \
|
||||
prometheus-client \
|
||||
psycopg2 \
|
||||
pycparser \
|
||||
pyrsistent \
|
||||
@@ -63,12 +55,9 @@ RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
gosu \
|
||||
libjpeg62-turbo \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
gosu \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /install /usr/local
|
||||
@@ -80,6 +69,3 @@ VOLUME ["/data"]
|
||||
EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
|
||||
@@ -27,7 +27,6 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
wget
|
||||
|
||||
# fetch and unpack the package
|
||||
# TODO: Upgrade to 1.2.2 once xenial is dropped
|
||||
RUN mkdir /dh-virtualenv
|
||||
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
|
||||
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
|
||||
@@ -51,22 +50,17 @@ FROM ${distro}
|
||||
ARG distro=""
|
||||
ENV distro ${distro}
|
||||
|
||||
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
|
||||
# http://bugs.python.org/issue19846
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
# Install the build dependencies
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
# TODO: it would be nice to do that automatically.
|
||||
# TODO: Remove the dh-systemd stanza after dropping support for Ubuntu xenial
|
||||
# it's a transitional package on all other, more recent releases
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
debhelper \
|
||||
devscripts \
|
||||
dh-systemd \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
@@ -75,11 +69,7 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
xmlsec1 \
|
||||
&& ( env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
dh-systemd || true )
|
||||
libpq-dev
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ The image also does *not* provide a TURN server.
|
||||
By default, the image expects a single volume, located at ``/data``, that will hold:
|
||||
|
||||
* configuration files;
|
||||
* temporary files during uploads;
|
||||
* uploaded media and thumbnails;
|
||||
* the SQLite database if you do not configure postgres;
|
||||
* the appservices configuration.
|
||||
@@ -82,7 +83,7 @@ docker logs synapse
|
||||
If all is well, you should now be able to connect to http://localhost:8008 and
|
||||
see a confirmation message.
|
||||
|
||||
The following environment variables are supported in `run` mode:
|
||||
The following environment variables are supported in run mode:
|
||||
|
||||
* `SYNAPSE_CONFIG_DIR`: where additional config files are stored. Defaults to
|
||||
`/data`.
|
||||
@@ -93,20 +94,6 @@ The following environment variables are supported in `run` mode:
|
||||
* `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`.
|
||||
* `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`.
|
||||
|
||||
For more complex setups (e.g. for workers) you can also pass your args directly to synapse using `run` mode. For example like this:
|
||||
|
||||
```
|
||||
docker run -d --name synapse \
|
||||
--mount type=volume,src=synapse-data,dst=/data \
|
||||
-p 8008:8008 \
|
||||
matrixdotorg/synapse:latest run \
|
||||
-m synapse.app.generic_worker \
|
||||
--config-path=/data/homeserver.yaml \
|
||||
--config-path=/data/generic_worker.yaml
|
||||
```
|
||||
|
||||
If you do not provide `-m`, the value of the `SYNAPSE_WORKER` environment variable is used. If you do not provide at least one `--config-path` or `-c`, the value of the `SYNAPSE_CONFIG_PATH` environment variable is used instead.
|
||||
|
||||
## Generating an (admin) user
|
||||
|
||||
After synapse is running, you may wish to create a user via `register_new_matrix_user`.
|
||||
@@ -175,32 +162,3 @@ docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
|
||||
You can choose to build a different docker image by changing the value of the `-f` flag to
|
||||
point to another Dockerfile.
|
||||
|
||||
## Disabling the healthcheck
|
||||
|
||||
If you are using a non-standard port or tls inside docker you can disable the healthcheck
|
||||
whilst running the above `docker run` commands.
|
||||
|
||||
```
|
||||
--no-healthcheck
|
||||
```
|
||||
## Setting custom healthcheck on docker run
|
||||
|
||||
If you wish to point the healthcheck at a different port with docker command, add the following
|
||||
|
||||
```
|
||||
--health-cmd 'curl -fSs http://localhost:1234/health'
|
||||
```
|
||||
|
||||
## Setting the healthcheck in docker-compose file
|
||||
|
||||
You can add the following to set a custom healthcheck in a docker compose file.
|
||||
You will need version >2.1 for this to work.
|
||||
|
||||
```
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
@@ -89,7 +89,8 @@ federation_rc_concurrent: 3
|
||||
## Files ##
|
||||
|
||||
media_store_path: "/data/media"
|
||||
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "50M" }}"
|
||||
uploads_path: "/data/uploads"
|
||||
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}"
|
||||
max_image_pixels: "32M"
|
||||
dynamic_thumbnails: false
|
||||
|
||||
@@ -197,10 +198,12 @@ old_signing_keys: {}
|
||||
key_refresh_interval: "1d" # 1 Day.
|
||||
|
||||
# The trusted servers to download signing keys from.
|
||||
trusted_key_servers:
|
||||
- server_name: matrix.org
|
||||
verify_keys:
|
||||
"ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||
perspectives:
|
||||
servers:
|
||||
"matrix.org":
|
||||
verify_keys:
|
||||
"ed25519:auto":
|
||||
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||
|
||||
password_config:
|
||||
enabled: true
|
||||
|
||||
@@ -179,7 +179,7 @@ def run_generate_config(environ, ownership):
|
||||
|
||||
|
||||
def main(args, environ):
|
||||
mode = args[1] if len(args) > 1 else "run"
|
||||
mode = args[1] if len(args) > 1 else None
|
||||
desired_uid = int(environ.get("UID", "991"))
|
||||
desired_gid = int(environ.get("GID", "991"))
|
||||
synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver")
|
||||
@@ -205,47 +205,36 @@ def main(args, environ):
|
||||
config_dir, config_path, environ, ownership
|
||||
)
|
||||
|
||||
if mode != "run":
|
||||
if mode is not None:
|
||||
error("Unknown execution mode '%s'" % (mode,))
|
||||
|
||||
args = args[2:]
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
|
||||
|
||||
if "-m" not in args:
|
||||
args = ["-m", synapse_worker] + args
|
||||
|
||||
# if there are no config files passed to synapse, try adding the default file
|
||||
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
config_path = environ.get(
|
||||
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
|
||||
)
|
||||
|
||||
if not os.path.exists(config_path):
|
||||
if "SYNAPSE_SERVER_NAME" in environ:
|
||||
error(
|
||||
"""\
|
||||
if not os.path.exists(config_path):
|
||||
if "SYNAPSE_SERVER_NAME" in environ:
|
||||
error(
|
||||
"""\
|
||||
Config file '%s' does not exist.
|
||||
|
||||
The synapse docker image no longer supports generating a config file on-the-fly
|
||||
based on environment variables. You can migrate to a static config file by
|
||||
running with 'migrate_config'. See the README for more details.
|
||||
"""
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
error(
|
||||
"Config file '%s' does not exist. You should either create a new "
|
||||
"config file by running with the `generate` argument (and then edit "
|
||||
"the resulting file before restarting) or specify the path to an "
|
||||
"existing config file with the SYNAPSE_CONFIG_PATH variable."
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
args += ["--config-path", config_path]
|
||||
error(
|
||||
"Config file '%s' does not exist. You should either create a new "
|
||||
"config file by running with the `generate` argument (and then edit "
|
||||
"the resulting file before restarting) or specify the path to an "
|
||||
"existing config file with the SYNAPSE_CONFIG_PATH variable."
|
||||
% (config_path,)
|
||||
)
|
||||
|
||||
log("Starting synapse with args " + " ".join(args))
|
||||
log("Starting synapse with config file " + config_path)
|
||||
|
||||
args = ["python"] + args
|
||||
args = ["python", "-m", synapse_worker, "--config-path", config_path]
|
||||
if ownership is not None:
|
||||
args = ["gosu", ownership] + args
|
||||
os.execv("/usr/sbin/gosu", args)
|
||||
|
||||
@@ -1,172 +0,0 @@
|
||||
# Show reported events
|
||||
|
||||
This API returns information about reported events.
|
||||
|
||||
The api is:
|
||||
```
|
||||
GET /_synapse/admin/v1/event_reports?from=0&limit=10
|
||||
```
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"event_reports": [
|
||||
{
|
||||
"event_id": "$bNUFCwGzWca1meCGkjp-zwslF-GfVcXukvRLI1_FaVY",
|
||||
"id": 2,
|
||||
"reason": "foo",
|
||||
"score": -100,
|
||||
"received_ts": 1570897107409,
|
||||
"canonical_alias": "#alias1:matrix.org",
|
||||
"room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org",
|
||||
"name": "Matrix HQ",
|
||||
"sender": "@foobar:matrix.org",
|
||||
"user_id": "@foo:matrix.org"
|
||||
},
|
||||
{
|
||||
"event_id": "$3IcdZsDaN_En-S1DF4EMCy3v4gNRKeOJs8W5qTOKj4I",
|
||||
"id": 3,
|
||||
"reason": "bar",
|
||||
"score": -100,
|
||||
"received_ts": 1598889612059,
|
||||
"canonical_alias": "#alias2:matrix.org",
|
||||
"room_id": "!eGvUQuTCkHGVwNMOjv:matrix.org",
|
||||
"name": "Your room name here",
|
||||
"sender": "@foobar:matrix.org",
|
||||
"user_id": "@bar:matrix.org"
|
||||
}
|
||||
],
|
||||
"next_token": 2,
|
||||
"total": 4
|
||||
}
|
||||
```
|
||||
|
||||
To paginate, check for `next_token` and if present, call the endpoint again with `from`
|
||||
set to the value of `next_token`. This will return a new page.
|
||||
|
||||
If the endpoint does not return a `next_token` then there are no more reports to
|
||||
paginate through.
|
||||
|
||||
**URL parameters:**
|
||||
|
||||
* `limit`: integer - Is optional but is used for pagination, denoting the maximum number
|
||||
of items to return in this call. Defaults to `100`.
|
||||
* `from`: integer - Is optional but used for pagination, denoting the offset in the
|
||||
returned results. This should be treated as an opaque value and not explicitly set to
|
||||
anything other than the return value of `next_token` from a previous call. Defaults to `0`.
|
||||
* `dir`: string - Direction of event report order. Whether to fetch the most recent
|
||||
first (`b`) or the oldest first (`f`). Defaults to `b`.
|
||||
* `user_id`: string - Is optional and filters to only return users with user IDs that
|
||||
contain this value. This is the user who reported the event and wrote the reason.
|
||||
* `room_id`: string - Is optional and filters to only return rooms with room IDs that
|
||||
contain this value.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `id`: integer - ID of event report.
|
||||
* `received_ts`: integer - The timestamp (in milliseconds since the unix epoch) when this
|
||||
report was sent.
|
||||
* `room_id`: string - The ID of the room in which the event being reported is located.
|
||||
* `name`: string - The name of the room.
|
||||
* `event_id`: string - The ID of the reported event.
|
||||
* `user_id`: string - This is the user who reported the event and wrote the reason.
|
||||
* `reason`: string - Comment made by the `user_id` in this report. May be blank.
|
||||
* `score`: integer - Content is reported based upon a negative score, where -100 is
|
||||
"most offensive" and 0 is "inoffensive".
|
||||
* `sender`: string - This is the ID of the user who sent the original message/event that
|
||||
was reported.
|
||||
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
|
||||
have a canonical alias set.
|
||||
* `next_token`: integer - Indication for pagination. See above.
|
||||
* `total`: integer - Total number of event reports related to the query
|
||||
(`user_id` and `room_id`).
|
||||
|
||||
# Show details of a specific event report
|
||||
|
||||
This API returns information about a specific event report.
|
||||
|
||||
The api is:
|
||||
```
|
||||
GET /_synapse/admin/v1/event_reports/<report_id>
|
||||
```
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"event_id": "$bNUFCwGzWca1meCGkjp-zwslF-GfVcXukvRLI1_FaVY",
|
||||
"event_json": {
|
||||
"auth_events": [
|
||||
"$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M",
|
||||
"$oggsNXxzPFRE3y53SUNd7nsj69-QzKv03a1RucHu-ws"
|
||||
],
|
||||
"content": {
|
||||
"body": "matrix.org: This Week in Matrix",
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<strong>matrix.org</strong>:<br><a href=\"https://matrix.org/blog/\"><strong>This Week in Matrix</strong></a>",
|
||||
"msgtype": "m.notice"
|
||||
},
|
||||
"depth": 546,
|
||||
"hashes": {
|
||||
"sha256": "xK1//xnmvHJIOvbgXlkI8eEqdvoMmihVDJ9J4SNlsAw"
|
||||
},
|
||||
"origin": "matrix.org",
|
||||
"origin_server_ts": 1592291711430,
|
||||
"prev_events": [
|
||||
"$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M"
|
||||
],
|
||||
"prev_state": [],
|
||||
"room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org",
|
||||
"sender": "@foobar:matrix.org",
|
||||
"signatures": {
|
||||
"matrix.org": {
|
||||
"ed25519:a_JaEG": "cs+OUKW/iHx5pEidbWxh0UiNNHwe46Ai9LwNz+Ah16aWDNszVIe2gaAcVZfvNsBhakQTew51tlKmL2kspXk/Dg"
|
||||
}
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"unsigned": {
|
||||
"age_ts": 1592291711430,
|
||||
}
|
||||
},
|
||||
"id": <report_id>,
|
||||
"reason": "foo",
|
||||
"score": -100,
|
||||
"received_ts": 1570897107409,
|
||||
"canonical_alias": "#alias1:matrix.org",
|
||||
"room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org",
|
||||
"name": "Matrix HQ",
|
||||
"sender": "@foobar:matrix.org",
|
||||
"user_id": "@foo:matrix.org"
|
||||
}
|
||||
```
|
||||
|
||||
**URL parameters:**
|
||||
|
||||
* `report_id`: string - The ID of the event report.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `id`: integer - ID of event report.
|
||||
* `received_ts`: integer - The timestamp (in milliseconds since the unix epoch) when this
|
||||
report was sent.
|
||||
* `room_id`: string - The ID of the room in which the event being reported is located.
|
||||
* `name`: string - The name of the room.
|
||||
* `event_id`: string - The ID of the reported event.
|
||||
* `user_id`: string - This is the user who reported the event and wrote the reason.
|
||||
* `reason`: string - Comment made by the `user_id` in this report. May be blank.
|
||||
* `score`: integer - Content is reported based upon a negative score, where -100 is
|
||||
"most offensive" and 0 is "inoffensive".
|
||||
* `sender`: string - This is the ID of the user who sent the original message/event that
|
||||
was reported.
|
||||
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
|
||||
have a canonical alias set.
|
||||
* `event_json`: object - Details of the original event that was reported.
|
||||
@@ -1,19 +1,6 @@
|
||||
# Contents
|
||||
- [List all media in a room](#list-all-media-in-a-room)
|
||||
- [Quarantine media](#quarantine-media)
|
||||
* [Quarantining media by ID](#quarantining-media-by-id)
|
||||
* [Quarantining media in a room](#quarantining-media-in-a-room)
|
||||
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
|
||||
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
|
||||
- [Delete local media](#delete-local-media)
|
||||
* [Delete a specific local media](#delete-a-specific-local-media)
|
||||
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
||||
- [Purge Remote Media API](#purge-remote-media-api)
|
||||
|
||||
# List all media in a room
|
||||
|
||||
This API gets a list of known media in a room.
|
||||
However, it only shows media from unencrypted events or rooms.
|
||||
|
||||
The API is:
|
||||
```
|
||||
@@ -23,16 +10,16 @@ To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
|
||||
The API returns a JSON body like the following:
|
||||
```json
|
||||
```
|
||||
{
|
||||
"local": [
|
||||
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
|
||||
"mxc://localhost/abcdefghijklmnopqrstuvwx"
|
||||
],
|
||||
"remote": [
|
||||
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
|
||||
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
|
||||
]
|
||||
"local": [
|
||||
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
|
||||
"mxc://localhost/abcdefghijklmnopqrstuvwx"
|
||||
],
|
||||
"remote": [
|
||||
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
|
||||
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -60,7 +47,7 @@ form of `abcdefg12345...`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{}
|
||||
```
|
||||
|
||||
@@ -80,18 +67,14 @@ Where `room_id` is in the form of `!roomid12345:example.org`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"num_quarantined": 10
|
||||
"num_quarantined": 10 # The number of media items successfully quarantined
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `num_quarantined`: integer - The number of media items successfully quarantined
|
||||
|
||||
Note that there is a legacy endpoint, `POST
|
||||
/_synapse/admin/v1/quarantine_media/<room_id>`, that operates the same.
|
||||
/_synapse/admin/v1/quarantine_media/<room_id >`, that operates the same.
|
||||
However, it is deprecated and may be removed in a future release.
|
||||
|
||||
## Quarantining all media of a user
|
||||
@@ -108,155 +91,12 @@ POST /_synapse/admin/v1/user/<user_id>/media/quarantine
|
||||
{}
|
||||
```
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `user_id`: string - User ID in the form of `@bob:example.org`
|
||||
Where `user_id` is in the form of `@bob:example.org`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"num_quarantined": 10
|
||||
"num_quarantined": 10 # The number of media items successfully quarantined
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `num_quarantined`: integer - The number of media items successfully quarantined
|
||||
|
||||
## Protecting media from being quarantined
|
||||
|
||||
This API protects a single piece of local media from being quarantined using the
|
||||
above APIs. This is useful for sticker packs and other shared media which you do
|
||||
not want to get quarantined, especially when
|
||||
[quarantining media in a room](#quarantining-media-in-a-room).
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/media/protect/<media_id>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Where `media_id` is in the form of `abcdefg12345...`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{}
|
||||
```
|
||||
|
||||
# Delete local media
|
||||
This API deletes the *local* media from the disk of your own server.
|
||||
This includes any local thumbnails and copies of media downloaded from
|
||||
remote homeservers.
|
||||
This API will not affect media that has been uploaded to external
|
||||
media repositories (e.g https://github.com/turt2live/matrix-media-repo/).
|
||||
See also [Purge Remote Media API](#purge-remote-media-api).
|
||||
|
||||
## Delete a specific local media
|
||||
Delete a specific `media_id`.
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
DELETE /_synapse/admin/v1/media/<server_name>/<media_id>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `server_name`: string - The name of your local server (e.g `matrix.org`)
|
||||
* `media_id`: string - The ID of the media (e.g `abcdefghijklmnopqrstuvwx`)
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"deleted_media": [
|
||||
"abcdefghijklmnopqrstuvwx"
|
||||
],
|
||||
"total": 1
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `deleted_media`: an array of strings - List of deleted `media_id`
|
||||
* `total`: integer - Total number of deleted `media_id`
|
||||
|
||||
## Delete local media by date or size
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `server_name`: string - The name of your local server (e.g `matrix.org`).
|
||||
* `before_ts`: string representing a positive integer - Unix timestamp in ms.
|
||||
Files that were last used before this timestamp will be deleted. It is the timestamp of
|
||||
last access and not the timestamp creation.
|
||||
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
|
||||
Files that are larger will be deleted. Defaults to `0`.
|
||||
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files
|
||||
that are still used in image data (e.g user profile, room avatar).
|
||||
If `false` these files will be deleted. Defaults to `true`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"deleted_media": [
|
||||
"abcdefghijklmnopqrstuvwx",
|
||||
"abcdefghijklmnopqrstuvwz"
|
||||
],
|
||||
"total": 2
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `deleted_media`: an array of strings - List of deleted `media_id`
|
||||
* `total`: integer - Total number of deleted `media_id`
|
||||
|
||||
# Purge Remote Media API
|
||||
|
||||
The purge remote media API allows server admins to purge old cached remote media.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in ms.
|
||||
All cached media that was last accessed before this timestamp will be removed.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"deleted": 10
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `deleted`: integer - The number of media items successfully deleted
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
|
||||
If the user re-requests purged remote media, synapse will re-request the media
|
||||
from the originating server.
|
||||
|
||||
20
docs/admin_api/purge_remote_media.rst
Normal file
20
docs/admin_api/purge_remote_media.rst
Normal file
@@ -0,0 +1,20 @@
|
||||
Purge Remote Media API
|
||||
======================
|
||||
|
||||
The purge remote media API allows server admins to purge old cached remote
|
||||
media.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
|
||||
|
||||
{}
|
||||
|
||||
\... which will remove all cached media that was last accessed before
|
||||
``<unix_timestamp_in_ms>``.
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
If the user re-requests purged remote media, synapse will re-request the media
|
||||
from the originating server.
|
||||
@@ -1,13 +1,12 @@
|
||||
Deprecated: Purge room API
|
||||
==========================
|
||||
|
||||
**The old Purge room API is deprecated and will be removed in a future release.
|
||||
See the new [Delete Room API](rooms.md#delete-room-api) for more details.**
|
||||
Purge room API
|
||||
==============
|
||||
|
||||
This API will remove all trace of a room from your database.
|
||||
|
||||
All local users must have left the room before it can be removed.
|
||||
|
||||
See also: [Delete Room API](rooms.md#delete-room-api)
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
|
||||
@@ -18,8 +18,7 @@ To fetch the nonce, you need to request one from the API::
|
||||
|
||||
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
|
||||
body containing the nonce, username, password, whether they are an admin
|
||||
(optional, False by default), and a HMAC digest of the content. Also you can
|
||||
set the displayname (optional, ``username`` by default).
|
||||
(optional, False by default), and a HMAC digest of the content.
|
||||
|
||||
As an example::
|
||||
|
||||
@@ -27,7 +26,6 @@ As an example::
|
||||
> {
|
||||
"nonce": "thisisanonce",
|
||||
"username": "pepper_roni",
|
||||
"displayname": "Pepper Roni",
|
||||
"password": "pizza",
|
||||
"admin": true,
|
||||
"mac": "mac_digest_here"
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Contents
|
||||
- [List Room API](#list-room-api)
|
||||
* [Parameters](#parameters)
|
||||
* [Usage](#usage)
|
||||
- [Room Details API](#room-details-api)
|
||||
- [Room Members API](#room-members-api)
|
||||
- [Delete Room API](#delete-room-api)
|
||||
* [Parameters](#parameters-1)
|
||||
* [Response](#response)
|
||||
* [Undoing room shutdowns](#undoing-room-shutdowns)
|
||||
- [Make Room Admin API](#make-room-admin-api)
|
||||
- [Forward Extremities Admin API](#forward-extremities-admin-api)
|
||||
- [Event Context API](#event-context-api)
|
||||
|
||||
# List Room API
|
||||
|
||||
The List Room admin API allows server admins to get a list of rooms on their
|
||||
@@ -90,7 +76,7 @@ GET /_synapse/admin/v1/rooms
|
||||
|
||||
Response:
|
||||
|
||||
```jsonc
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
@@ -142,7 +128,7 @@ GET /_synapse/admin/v1/rooms?search_term=TWIM
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
@@ -177,7 +163,7 @@ GET /_synapse/admin/v1/rooms?order_by=size
|
||||
|
||||
Response:
|
||||
|
||||
```jsonc
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
@@ -233,14 +219,14 @@ GET /_synapse/admin/v1/rooms?order_by=size&from=100
|
||||
|
||||
Response:
|
||||
|
||||
```jsonc
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
"room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
|
||||
"name": "Music Theory",
|
||||
"canonical_alias": "#musictheory:matrix.org",
|
||||
"joined_members": 127,
|
||||
"joined_members": 127
|
||||
"joined_local_members": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
@@ -257,7 +243,7 @@ Response:
|
||||
"room_id": "!twcBhHVdZlQWuuxBhN:termina.org.uk",
|
||||
"name": "weechat-matrix",
|
||||
"canonical_alias": "#weechat-matrix:termina.org.uk",
|
||||
"joined_members": 137,
|
||||
"joined_members": 137
|
||||
"joined_local_members": 20,
|
||||
"version": "4",
|
||||
"creator": "@foo:termina.org.uk",
|
||||
@@ -279,20 +265,19 @@ Response:
|
||||
Once the `next_token` parameter is no longer present, we know we've reached the
|
||||
end of the list.
|
||||
|
||||
# Room Details API
|
||||
# DRAFT: Room Details API
|
||||
|
||||
The Room Details admin API allows server admins to get all details of a room.
|
||||
|
||||
This API is still a draft and details might change!
|
||||
|
||||
The following fields are possible in the JSON response body:
|
||||
|
||||
* `room_id` - The ID of the room.
|
||||
* `name` - The name of the room.
|
||||
* `topic` - The topic of the room.
|
||||
* `avatar` - The `mxc` URI to the avatar of the room.
|
||||
* `canonical_alias` - The canonical (main) alias address of the room.
|
||||
* `joined_members` - How many users are currently in the room.
|
||||
* `joined_local_members` - How many local users are currently in the room.
|
||||
* `joined_local_devices` - How many local devices are currently in the room.
|
||||
* `version` - The version of the room as a string.
|
||||
* `creator` - The `user_id` of the room creator.
|
||||
* `encryption` - Algorithm of end-to-end encryption of messages. Is `null` if encryption is not active.
|
||||
@@ -315,16 +300,13 @@ GET /_synapse/admin/v1/rooms/<room_id>
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
|
||||
"name": "Music Theory",
|
||||
"avatar": "mxc://matrix.org/AQDaVFlbkQoErdOgqWRgiGSV",
|
||||
"topic": "Theory, Composition, Notation, Analysis",
|
||||
"canonical_alias": "#musictheory:matrix.org",
|
||||
"joined_members": 127,
|
||||
"joined_members": 127
|
||||
"joined_local_members": 2,
|
||||
"joined_local_devices": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": null,
|
||||
@@ -358,51 +340,23 @@ GET /_synapse/admin/v1/rooms/<room_id>/members
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"members": [
|
||||
"@foo:matrix.org",
|
||||
"@bar:matrix.org",
|
||||
"@foobar:matrix.org"
|
||||
],
|
||||
"@foobar:matrix.org
|
||||
],
|
||||
"total": 3
|
||||
}
|
||||
```
|
||||
|
||||
# Room State API
|
||||
|
||||
The Room State admin API allows server admins to get a list of all state events in a room.
|
||||
|
||||
The response includes the following fields:
|
||||
|
||||
* `state` - The current state of the room at the time of request.
|
||||
|
||||
## Usage
|
||||
|
||||
A standard request:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/state
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"state": [
|
||||
{"type": "m.room.create", "state_key": "", "etc": true},
|
||||
{"type": "m.room.power_levels", "state_key": "", "etc": true},
|
||||
{"type": "m.room.name", "state_key": "", "etc": true}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# Delete Room API
|
||||
|
||||
The Delete Room admin API allows server admins to remove rooms from server
|
||||
and block these rooms.
|
||||
It is a combination and improvement of "[Shutdown room](shutdown_room.md)"
|
||||
and "[Purge room](purge_room.md)" API.
|
||||
|
||||
Shuts down a room. Moves all local users and room aliases automatically to a
|
||||
new room if `new_room_user_id` is set. Otherwise local users only
|
||||
@@ -426,7 +380,7 @@ the new room. Users on other servers will be unaffected.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
```json
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
```
|
||||
|
||||
@@ -483,10 +437,6 @@ The following JSON body parameters are available:
|
||||
future attempts to join the room. Defaults to `false`.
|
||||
* `purge` - Optional. If set to `true`, it will remove all traces of the room from your database.
|
||||
Defaults to `true`.
|
||||
* `force_purge` - Optional, and ignored unless `purge` is `true`. If set to `true`, it
|
||||
will force a purge to go ahead even if there are local users still in the room. Do not
|
||||
use this unless a regular `purge` operation fails, as it could leave those users'
|
||||
clients in a confused state.
|
||||
|
||||
The JSON body must not be empty. The body must be at least `{}`.
|
||||
|
||||
@@ -499,217 +449,3 @@ The following fields are returned in the JSON response body:
|
||||
* `local_aliases` - An array of strings representing the local aliases that were migrated from
|
||||
the old room to the new.
|
||||
* `new_room_id` - A string representing the room ID of the new room.
|
||||
|
||||
|
||||
## Undoing room shutdowns
|
||||
|
||||
*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
|
||||
the structure can and does change without notice.
|
||||
|
||||
First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
|
||||
never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
|
||||
to recover at all:
|
||||
|
||||
* If the room was invite-only, your users will need to be re-invited.
|
||||
* If the room no longer has any members at all, it'll be impossible to rejoin.
|
||||
* The first user to rejoin will have to do so via an alias on a different server.
|
||||
|
||||
With all that being said, if you still want to try and recover the room:
|
||||
|
||||
1. For safety reasons, shut down Synapse.
|
||||
2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
|
||||
* For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
|
||||
* The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
|
||||
3. Restart Synapse.
|
||||
|
||||
You will have to manually handle, if you so choose, the following:
|
||||
|
||||
* Aliases that would have been redirected to the Content Violation room.
|
||||
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
||||
* Removal of the Content Violation room if desired.
|
||||
|
||||
|
||||
# Make Room Admin API
|
||||
|
||||
Grants another user the highest power available to a local user who is in the room.
|
||||
If the user is not in the room, and it is not publicly joinable, then invite the user.
|
||||
|
||||
By default the server admin (the caller) is granted power, but another user can
|
||||
optionally be specified, e.g.:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
|
||||
{
|
||||
"user_id": "@foo:example.com"
|
||||
}
|
||||
```
|
||||
|
||||
# Forward Extremities Admin API
|
||||
|
||||
Enables querying and deleting forward extremities from rooms. When a lot of forward
|
||||
extremities accumulate in a room, performance can become degraded. For details, see
|
||||
[#1760](https://github.com/matrix-org/synapse/issues/1760).
|
||||
|
||||
## Check for forward extremities
|
||||
|
||||
To check the status of forward extremities for a room:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
|
||||
```
|
||||
|
||||
A response as follows will be returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"count": 1,
|
||||
"results": [
|
||||
{
|
||||
"event_id": "$M5SP266vsnxctfwFgFLNceaCo3ujhRtg_NiiHabcdefgh",
|
||||
"state_group": 439,
|
||||
"depth": 123,
|
||||
"received_ts": 1611263016761
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Deleting forward extremities
|
||||
|
||||
**WARNING**: Please ensure you know what you're doing and have read
|
||||
the related issue [#1760](https://github.com/matrix-org/synapse/issues/1760).
|
||||
Under no situations should this API be executed as an automated maintenance task!
|
||||
|
||||
If a room has lots of forward extremities, the extra can be
|
||||
deleted as follows:
|
||||
|
||||
```
|
||||
DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
|
||||
```
|
||||
|
||||
A response as follows will be returned, indicating the amount of forward extremities
|
||||
that were deleted.
|
||||
|
||||
```json
|
||||
{
|
||||
"deleted": 1
|
||||
}
|
||||
```
|
||||
|
||||
# Event Context API
|
||||
|
||||
This API lets a client find the context of an event. This is designed primarily to investigate abuse reports.
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/context/<event_id>
|
||||
```
|
||||
|
||||
This API mimmicks [GET /_matrix/client/r0/rooms/{roomId}/context/{eventId}](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-rooms-roomid-context-eventid). Please refer to the link for all details on parameters and reseponse.
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
{
|
||||
"end": "t29-57_2_0_2",
|
||||
"events_after": [
|
||||
{
|
||||
"content": {
|
||||
"body": "This is an example text message",
|
||||
"msgtype": "m.text",
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<b>This is an example text message</b>"
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
}
|
||||
],
|
||||
"event": {
|
||||
"content": {
|
||||
"body": "filename.jpg",
|
||||
"info": {
|
||||
"h": 398,
|
||||
"w": 394,
|
||||
"mimetype": "image/jpeg",
|
||||
"size": 31037
|
||||
},
|
||||
"url": "mxc://example.org/JWEIFJgwEIhweiWJE",
|
||||
"msgtype": "m.image"
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"event_id": "$f3h4d129462ha:example.com",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
"events_before": [
|
||||
{
|
||||
"content": {
|
||||
"body": "something-important.doc",
|
||||
"filename": "something-important.doc",
|
||||
"info": {
|
||||
"mimetype": "application/msword",
|
||||
"size": 46144
|
||||
},
|
||||
"msgtype": "m.file",
|
||||
"url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe"
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
}
|
||||
],
|
||||
"start": "t27-54_2_0_2",
|
||||
"state": [
|
||||
{
|
||||
"content": {
|
||||
"creator": "@example:example.org",
|
||||
"room_version": "1",
|
||||
"m.federate": true,
|
||||
"predecessor": {
|
||||
"event_id": "$something:example.org",
|
||||
"room_id": "!oldroom:example.org"
|
||||
}
|
||||
},
|
||||
"type": "m.room.create",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
},
|
||||
"state_key": ""
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"membership": "join",
|
||||
"avatar_url": "mxc://example.org/SEsfnsuifSDFSSEF",
|
||||
"displayname": "Alice Margatroid"
|
||||
},
|
||||
"type": "m.room.member",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
},
|
||||
"state_key": "@alice:example.org"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
# Deprecated: Shutdown room API
|
||||
|
||||
**The old Shutdown room API is deprecated and will be removed in a future release.
|
||||
See the new [Delete Room API](rooms.md#delete-room-api) for more details.**
|
||||
# Shutdown room API
|
||||
|
||||
Shuts down a room, preventing new joins and moves local users and room aliases automatically
|
||||
to a new room. The new room will be created with the user specified by the
|
||||
@@ -13,6 +10,8 @@ disallow any further invites or joins.
|
||||
The local server will only have the power to move local user and room aliases to
|
||||
the new room. Users on other servers will be unaffected.
|
||||
|
||||
See also: [Delete Room API](rooms.md#delete-room-api)
|
||||
|
||||
## API
|
||||
|
||||
You will need to authenticate with an access token for an admin user.
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
# Users' media usage statistics
|
||||
|
||||
Returns information about all local media usage of users. Gives the
|
||||
possibility to filter them by time and user.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/statistics/users/media
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [README.rst](README.rst).
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"users": [
|
||||
{
|
||||
"displayname": "foo_user_0",
|
||||
"media_count": 2,
|
||||
"media_length": 134,
|
||||
"user_id": "@foo_user_0:test"
|
||||
},
|
||||
{
|
||||
"displayname": "foo_user_1",
|
||||
"media_count": 2,
|
||||
"media_length": 134,
|
||||
"user_id": "@foo_user_1:test"
|
||||
}
|
||||
],
|
||||
"next_token": 3,
|
||||
"total": 10
|
||||
}
|
||||
```
|
||||
|
||||
To paginate, check for `next_token` and if present, call the endpoint
|
||||
again with `from` set to the value of `next_token`. This will return a new page.
|
||||
|
||||
If the endpoint does not return a `next_token` then there are no more
|
||||
reports to paginate through.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
* `limit`: string representing a positive integer - Is optional but is
|
||||
used for pagination, denoting the maximum number of items to return
|
||||
in this call. Defaults to `100`.
|
||||
* `from`: string representing a positive integer - Is optional but used for pagination,
|
||||
denoting the offset in the returned results. This should be treated as an opaque value
|
||||
and not explicitly set to anything other than the return value of `next_token` from a
|
||||
previous call. Defaults to `0`.
|
||||
* `order_by` - string - The method in which to sort the returned list of users. Valid values are:
|
||||
- `user_id` - Users are ordered alphabetically by `user_id`. This is the default.
|
||||
- `displayname` - Users are ordered alphabetically by `displayname`.
|
||||
- `media_length` - Users are ordered by the total size of uploaded media in bytes.
|
||||
Smallest to largest.
|
||||
- `media_count` - Users are ordered by number of uploaded media. Smallest to largest.
|
||||
* `from_ts` - string representing a positive integer - Considers only
|
||||
files created at this timestamp or later. Unix timestamp in ms.
|
||||
* `until_ts` - string representing a positive integer - Considers only
|
||||
files created at this timestamp or earlier. Unix timestamp in ms.
|
||||
* `search_term` - string - Filter users by their user ID localpart **or** displayname.
|
||||
The search term can be found in any part of the string.
|
||||
Defaults to no filtering.
|
||||
* `dir` - string - Direction of order. Either `f` for forwards or `b` for backwards.
|
||||
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `users` - An array of objects, each containing information
|
||||
about the user and their local media. Objects contain the following fields:
|
||||
- `displayname` - string - Displayname of this user.
|
||||
- `media_count` - integer - Number of uploaded media by this user.
|
||||
- `media_length` - integer - Size of uploaded media in bytes by this user.
|
||||
- `user_id` - string - Fully-qualified user ID (ex. `@user:server.com`).
|
||||
* `next_token` - integer - Opaque value used for pagination. See above.
|
||||
* `total` - integer - Total number of users after filtering.
|
||||
@@ -29,14 +29,8 @@ It returns a JSON body like the following:
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": 0,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"password_hash": "$2b$12$p9B4GkqYdRTPGD",
|
||||
"creation_ts": 1560432506,
|
||||
"appservice_id": null,
|
||||
"consent_server_notice_sent": null,
|
||||
"consent_version": null
|
||||
"admin": false,
|
||||
"deactivated": false
|
||||
}
|
||||
|
||||
URL parameters:
|
||||
@@ -99,8 +93,6 @@ Body parameters:
|
||||
|
||||
- ``deactivated``, optional. If unspecified, deactivation state will be left
|
||||
unchanged on existing accounts and set to ``false`` for new accounts.
|
||||
A user cannot be erased by deactivating with this API. For details on deactivating users see
|
||||
`Deactivate Account <#deactivate-account>`_.
|
||||
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
|
||||
@@ -116,7 +108,7 @@ The api is::
|
||||
|
||||
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
The parameter ``from`` is optional but used for pagination, denoting the
|
||||
@@ -127,11 +119,8 @@ from a previous call.
|
||||
The parameter ``limit`` is optional but is used for pagination, denoting the
|
||||
maximum number of items to return in this call. Defaults to ``100``.
|
||||
|
||||
The parameter ``user_id`` is optional and filters to only return users with user IDs
|
||||
that contain this value. This parameter is ignored when using the ``name`` parameter.
|
||||
|
||||
The parameter ``name`` is optional and filters to only return users with user ID localparts
|
||||
**or** displaynames that contain this value.
|
||||
The parameter ``user_id`` is optional and filters to only users with user IDs
|
||||
that contain this value.
|
||||
|
||||
The parameter ``guests`` is optional and if ``false`` will **exclude** guest users.
|
||||
Defaults to ``true`` to include guest users.
|
||||
@@ -147,20 +136,20 @@ A JSON body is returned with the following shape:
|
||||
"users": [
|
||||
{
|
||||
"name": "<user_id1>",
|
||||
"password_hash": "<password_hash1>",
|
||||
"is_guest": 0,
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null
|
||||
}, {
|
||||
"name": "<user_id2>",
|
||||
"password_hash": "<password_hash2>",
|
||||
"is_guest": 0,
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>"
|
||||
}
|
||||
@@ -184,13 +173,6 @@ The api is::
|
||||
|
||||
GET /_synapse/admin/v1/whois/<user_id>
|
||||
|
||||
and::
|
||||
|
||||
GET /_matrix/client/r0/admin/whois/<userId>
|
||||
|
||||
See also: `Client Server API Whois
|
||||
<https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
@@ -229,11 +211,9 @@ Deactivate Account
|
||||
|
||||
This API deactivates an account. It removes active access tokens, resets the
|
||||
password, and deletes third-party IDs (to prevent the user requesting a
|
||||
password reset).
|
||||
|
||||
It can also mark the user as GDPR-erased. This means messages sent by the
|
||||
user will still be visible by anyone that was in the room when these messages
|
||||
were sent, but hidden from users joining the room afterwards.
|
||||
password reset). It can also mark the user as GDPR-erased (stopping their data
|
||||
from distributed further, and deleting it entirely if there are no other
|
||||
references to it).
|
||||
|
||||
The api is::
|
||||
|
||||
@@ -253,25 +233,6 @@ server admin: see `README.rst <README.rst>`_.
|
||||
The erase parameter is optional and defaults to ``false``.
|
||||
An empty body may be passed for backwards compatibility.
|
||||
|
||||
The following actions are performed when deactivating an user:
|
||||
|
||||
- Try to unpind 3PIDs from the identity server
|
||||
- Remove all 3PIDs from the homeserver
|
||||
- Delete all devices and E2EE keys
|
||||
- Delete all access tokens
|
||||
- Delete the password hash
|
||||
- Removal from all rooms the user is a member of
|
||||
- Remove the user from the user directory
|
||||
- Reject all pending invites
|
||||
- Remove all account validity information related to the user
|
||||
|
||||
The following additional actions are performed during deactivation if ``erase``
|
||||
is set to ``true``:
|
||||
|
||||
- Remove the user's display name
|
||||
- Remove the user's avatar URL
|
||||
- Mark the user as erased
|
||||
|
||||
|
||||
Reset password
|
||||
==============
|
||||
@@ -288,7 +249,7 @@ with a body of:
|
||||
|
||||
{
|
||||
"new_password": "<secret>",
|
||||
"logout_devices": true
|
||||
"logout_devices": true,
|
||||
}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
@@ -338,195 +299,6 @@ To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
|
||||
List room memberships of an user
|
||||
================================
|
||||
Gets a list of all ``room_id`` that a specific ``user_id`` is member.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/joined_rooms
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"joined_rooms": [
|
||||
"!DuGcnbhHGaSZQoNQR:matrix.org",
|
||||
"!ZtSaPCawyWtxfWiIy:matrix.org"
|
||||
],
|
||||
"total": 2
|
||||
}
|
||||
|
||||
The server returns the list of rooms of which the user and the server
|
||||
are member. If the user is local, all the rooms of which the user is
|
||||
member are returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``joined_rooms`` - An array of ``room_id``.
|
||||
- ``total`` - Number of rooms.
|
||||
|
||||
|
||||
List media of a user
|
||||
====================
|
||||
Gets a list of all local media that a specific ``user_id`` has created.
|
||||
By default, the response is ordered by descending creation date and ascending media ID.
|
||||
The newest media is on top. You can change the order with parameters
|
||||
``order_by`` and ``dir``.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/media
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"media": [
|
||||
{
|
||||
"created_ts": 100400,
|
||||
"last_access_ts": null,
|
||||
"media_id": "qXhyRzulkwLsNHTbpHreuEgo",
|
||||
"media_length": 67,
|
||||
"media_type": "image/png",
|
||||
"quarantined_by": null,
|
||||
"safe_from_quarantine": false,
|
||||
"upload_name": "test1.png"
|
||||
},
|
||||
{
|
||||
"created_ts": 200400,
|
||||
"last_access_ts": null,
|
||||
"media_id": "FHfiSnzoINDatrXHQIXBtahw",
|
||||
"media_length": 67,
|
||||
"media_type": "image/png",
|
||||
"quarantined_by": null,
|
||||
"safe_from_quarantine": false,
|
||||
"upload_name": "test2.png"
|
||||
}
|
||||
],
|
||||
"next_token": 3,
|
||||
"total": 2
|
||||
}
|
||||
|
||||
To paginate, check for ``next_token`` and if present, call the endpoint again
|
||||
with ``from`` set to the value of ``next_token``. This will return a new page.
|
||||
|
||||
If the endpoint does not return a ``next_token`` then there are no more
|
||||
reports to paginate through.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - string - fully qualified: for example, ``@user:server.com``.
|
||||
- ``limit``: string representing a positive integer - Is optional but is used for pagination,
|
||||
denoting the maximum number of items to return in this call. Defaults to ``100``.
|
||||
- ``from``: string representing a positive integer - Is optional but used for pagination,
|
||||
denoting the offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
|
||||
Defaults to ``0``.
|
||||
- ``order_by`` - The method by which to sort the returned list of media.
|
||||
If the ordered field has duplicates, the second order is always by ascending ``media_id``,
|
||||
which guarantees a stable ordering. Valid values are:
|
||||
|
||||
- ``media_id`` - Media are ordered alphabetically by ``media_id``.
|
||||
- ``upload_name`` - Media are ordered alphabetically by name the media was uploaded with.
|
||||
- ``created_ts`` - Media are ordered by when the content was uploaded in ms.
|
||||
Smallest to largest. This is the default.
|
||||
- ``last_access_ts`` - Media are ordered by when the content was last accessed in ms.
|
||||
Smallest to largest.
|
||||
- ``media_length`` - Media are ordered by length of the media in bytes.
|
||||
Smallest to largest.
|
||||
- ``media_type`` - Media are ordered alphabetically by MIME-type.
|
||||
- ``quarantined_by`` - Media are ordered alphabetically by the user ID that
|
||||
initiated the quarantine request for this media.
|
||||
- ``safe_from_quarantine`` - Media are ordered by the status if this media is safe
|
||||
from quarantining.
|
||||
|
||||
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
|
||||
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
|
||||
|
||||
If neither ``order_by`` nor ``dir`` is set, the default order is newest media on top
|
||||
(corresponds to ``order_by`` = ``created_ts`` and ``dir`` = ``b``).
|
||||
|
||||
Caution. The database only has indexes on the columns ``media_id``,
|
||||
``user_id`` and ``created_ts``. This means that if a different sort order is used
|
||||
(``upload_name``, ``last_access_ts``, ``media_length``, ``media_type``,
|
||||
``quarantined_by`` or ``safe_from_quarantine``), this can cause a large load on the
|
||||
database, especially for large environments.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``media`` - An array of objects, each containing information about a media.
|
||||
Media objects contain the following fields:
|
||||
|
||||
- ``created_ts`` - integer - Timestamp when the content was uploaded in ms.
|
||||
- ``last_access_ts`` - integer - Timestamp when the content was last accessed in ms.
|
||||
- ``media_id`` - string - The id used to refer to the media.
|
||||
- ``media_length`` - integer - Length of the media in bytes.
|
||||
- ``media_type`` - string - The MIME-type of the media.
|
||||
- ``quarantined_by`` - string - The user ID that initiated the quarantine request
|
||||
for this media.
|
||||
|
||||
- ``safe_from_quarantine`` - bool - Status if this media is safe from quarantining.
|
||||
- ``upload_name`` - string - The name the media was uploaded with.
|
||||
|
||||
- ``next_token``: integer - Indication for pagination. See above.
|
||||
- ``total`` - integer - Total number of media.
|
||||
|
||||
Login as a user
|
||||
===============
|
||||
|
||||
Get an access token that can be used to authenticate as that user. Useful for
|
||||
when admins wish to do actions on behalf of a user.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/users/<user_id>/login
|
||||
{}
|
||||
|
||||
An optional ``valid_until_ms`` field can be specified in the request body as an
|
||||
integer timestamp that specifies when the token should expire. By default tokens
|
||||
do not expire.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"access_token": "<opaque_access_token_string>"
|
||||
}
|
||||
|
||||
|
||||
This API does *not* generate a new device for the user, and so will not appear
|
||||
their ``/devices`` list, and in general the target user should not be able to
|
||||
tell they have been logged in as.
|
||||
|
||||
To expire the token call the standard ``/logout`` API with the token.
|
||||
|
||||
Note: The token will expire if the *admin* user calls ``/logout/all`` from any
|
||||
of their devices, but the token will *not* expire if the target user does the
|
||||
same.
|
||||
|
||||
|
||||
User devices
|
||||
============
|
||||
|
||||
@@ -561,8 +333,7 @@ A response body like the following is returned:
|
||||
"last_seen_ts": 1474491775025,
|
||||
"user_id": "<user_id>"
|
||||
}
|
||||
],
|
||||
"total": 2
|
||||
]
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
@@ -587,8 +358,6 @@ The following fields are returned in the JSON response body:
|
||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
||||
- ``user_id`` - Owner of device.
|
||||
|
||||
- ``total`` - Total number of user's devices.
|
||||
|
||||
Delete multiple devices
|
||||
------------------
|
||||
Deletes the given devices for a specific ``user_id``, and invalidates
|
||||
@@ -714,112 +483,3 @@ The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
- ``device_id`` - The device to delete.
|
||||
|
||||
List all pushers
|
||||
================
|
||||
Gets information about all pushers for a specific ``user_id``.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/pushers
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"pushers": [
|
||||
{
|
||||
"app_display_name":"HTTP Push Notifications",
|
||||
"app_id":"m.http",
|
||||
"data": {
|
||||
"url":"example.com"
|
||||
},
|
||||
"device_display_name":"pushy push",
|
||||
"kind":"http",
|
||||
"lang":"None",
|
||||
"profile_tag":"",
|
||||
"pushkey":"a@example.com"
|
||||
}
|
||||
],
|
||||
"total": 1
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``pushers`` - An array containing the current pushers for the user
|
||||
|
||||
- ``app_display_name`` - string - A string that will allow the user to identify
|
||||
what application owns this pusher.
|
||||
|
||||
- ``app_id`` - string - This is a reverse-DNS style identifier for the application.
|
||||
Max length, 64 chars.
|
||||
|
||||
- ``data`` - A dictionary of information for the pusher implementation itself.
|
||||
|
||||
- ``url`` - string - Required if ``kind`` is ``http``. The URL to use to send
|
||||
notifications to.
|
||||
|
||||
- ``format`` - string - The format to use when sending notifications to the
|
||||
Push Gateway.
|
||||
|
||||
- ``device_display_name`` - string - A string that will allow the user to identify
|
||||
what device owns this pusher.
|
||||
|
||||
- ``profile_tag`` - string - This string determines which set of device specific rules
|
||||
this pusher executes.
|
||||
|
||||
- ``kind`` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes.
|
||||
- ``lang`` - string - The preferred language for receiving notifications
|
||||
(e.g. 'en' or 'en-US')
|
||||
|
||||
- ``profile_tag`` - string - This string determines which set of device specific rules
|
||||
this pusher executes.
|
||||
|
||||
- ``pushkey`` - string - This is a unique identifier for this pusher.
|
||||
Max length, 512 bytes.
|
||||
|
||||
- ``total`` - integer - Number of pushers.
|
||||
|
||||
See also `Client-Server API Spec <https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers>`_
|
||||
|
||||
Shadow-banning users
|
||||
====================
|
||||
|
||||
Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
|
||||
A shadow-banned users receives successful responses to their client-server API requests,
|
||||
but the events are not propagated into rooms. This can be an effective tool as it
|
||||
(hopefully) takes longer for the user to realise they are being moderated before
|
||||
pivoting to another account.
|
||||
|
||||
Shadow-banning a user should be used as a tool of last resort and may lead to confusing
|
||||
or broken behaviour for the client. A shadow-banned user will not receive any
|
||||
notification and it is generally more appropriate to ban or kick abusive users.
|
||||
A shadow-banned user will be unable to contact anyone on the server.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/users/<user_id>/shadow_ban
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
digraph auth {
|
||||
nodesep=0.5;
|
||||
rankdir="RL";
|
||||
|
||||
C [label="Create (1,1)"];
|
||||
|
||||
BJ [label="Bob's Join (2,1)", color=red];
|
||||
BJ2 [label="Bob's Join (2,2)", color=red];
|
||||
BJ2 -> BJ [color=red, dir=none];
|
||||
|
||||
subgraph cluster_foo {
|
||||
A1 [label="Alice's invite (4,1)", color=blue];
|
||||
A2 [label="Alice's Join (4,2)", color=blue];
|
||||
A3 [label="Alice's Join (4,3)", color=blue];
|
||||
A3 -> A2 -> A1 [color=blue, dir=none];
|
||||
color=none;
|
||||
}
|
||||
|
||||
PL1 [label="Power Level (3,1)", color=darkgreen];
|
||||
PL2 [label="Power Level (3,2)", color=darkgreen];
|
||||
PL2 -> PL1 [color=darkgreen, dir=none];
|
||||
|
||||
{rank = same; C; BJ; PL1; A1;}
|
||||
|
||||
A1 -> C [color=grey];
|
||||
A1 -> BJ [color=grey];
|
||||
PL1 -> C [color=grey];
|
||||
BJ2 -> PL1 [penwidth=2];
|
||||
|
||||
A3 -> PL2 [penwidth=2];
|
||||
A1 -> PL1 -> BJ -> C [penwidth=2];
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 41 KiB |
@@ -1,108 +0,0 @@
|
||||
# Auth Chain Difference Algorithm
|
||||
|
||||
The auth chain difference algorithm is used by V2 state resolution, where a
|
||||
naive implementation can be a significant source of CPU and DB usage.
|
||||
|
||||
### Definitions
|
||||
|
||||
A *state set* is a set of state events; e.g. the input of a state resolution
|
||||
algorithm is a collection of state sets.
|
||||
|
||||
The *auth chain* of a set of events are all the events' auth events and *their*
|
||||
auth events, recursively (i.e. the events reachable by walking the graph induced
|
||||
by an event's auth events links).
|
||||
|
||||
The *auth chain difference* of a collection of state sets is the union minus the
|
||||
intersection of the sets of auth chains corresponding to the state sets, i.e an
|
||||
event is in the auth chain difference if it is reachable by walking the auth
|
||||
event graph from at least one of the state sets but not from *all* of the state
|
||||
sets.
|
||||
|
||||
## Breadth First Walk Algorithm
|
||||
|
||||
A way of calculating the auth chain difference without calculating the full auth
|
||||
chains for each state set is to do a parallel breadth first walk (ordered by
|
||||
depth) of each state set's auth chain. By tracking which events are reachable
|
||||
from each state set we can finish early if every pending event is reachable from
|
||||
every state set.
|
||||
|
||||
This can work well for state sets that have a small auth chain difference, but
|
||||
can be very inefficient for larger differences. However, this algorithm is still
|
||||
used if we don't have a chain cover index for the room (e.g. because we're in
|
||||
the process of indexing it).
|
||||
|
||||
## Chain Cover Index
|
||||
|
||||
Synapse computes auth chain differences by pre-computing a "chain cover" index
|
||||
for the auth chain in a room, allowing efficient reachability queries like "is
|
||||
event A in the auth chain of event B". This is done by assigning every event a
|
||||
*chain ID* and *sequence number* (e.g. `(5,3)`), and having a map of *links*
|
||||
between chains (e.g. `(5,3) -> (2,4)`) such that A is reachable by B (i.e. `A`
|
||||
is in the auth chain of `B`) if and only if either:
|
||||
|
||||
1. A and B have the same chain ID and `A`'s sequence number is less than `B`'s
|
||||
sequence number; or
|
||||
2. there is a link `L` between `B`'s chain ID and `A`'s chain ID such that
|
||||
`L.start_seq_no` <= `B.seq_no` and `A.seq_no` <= `L.end_seq_no`.
|
||||
|
||||
There are actually two potential implementations, one where we store links from
|
||||
each chain to every other reachable chain (the transitive closure of the links
|
||||
graph), and one where we remove redundant links (the transitive reduction of the
|
||||
links graph) e.g. if we have chains `C3 -> C2 -> C1` then the link `C3 -> C1`
|
||||
would not be stored. Synapse uses the former implementations so that it doesn't
|
||||
need to recurse to test reachability between chains.
|
||||
|
||||
### Example
|
||||
|
||||
An example auth graph would look like the following, where chains have been
|
||||
formed based on type/state_key and are denoted by colour and are labelled with
|
||||
`(chain ID, sequence number)`. Links are denoted by the arrows (links in grey
|
||||
are those that would be remove in the second implementation described above).
|
||||
|
||||

|
||||
|
||||
Note that we don't include all links between events and their auth events, as
|
||||
most of those links would be redundant. For example, all events point to the
|
||||
create event, but each chain only needs the one link from it's base to the
|
||||
create event.
|
||||
|
||||
## Using the Index
|
||||
|
||||
This index can be used to calculate the auth chain difference of the state sets
|
||||
by looking at the chain ID and sequence numbers reachable from each state set:
|
||||
|
||||
1. For every state set lookup the chain ID/sequence numbers of each state event
|
||||
2. Use the index to find all chains and the maximum sequence number reachable
|
||||
from each state set.
|
||||
3. The auth chain difference is then all events in each chain that have sequence
|
||||
numbers between the maximum sequence number reachable from *any* state set and
|
||||
the minimum reachable by *all* state sets (if any).
|
||||
|
||||
Note that steps 2 is effectively calculating the auth chain for each state set
|
||||
(in terms of chain IDs and sequence numbers), and step 3 is calculating the
|
||||
difference between the union and intersection of the auth chains.
|
||||
|
||||
### Worked Example
|
||||
|
||||
For example, given the above graph, we can calculate the difference between
|
||||
state sets consisting of:
|
||||
|
||||
1. `S1`: Alice's invite `(4,1)` and Bob's second join `(2,2)`; and
|
||||
2. `S2`: Alice's second join `(4,3)` and Bob's first join `(2,1)`.
|
||||
|
||||
Using the index we see that the following auth chains are reachable from each
|
||||
state set:
|
||||
|
||||
1. `S1`: `(1,1)`, `(2,2)`, `(3,1)` & `(4,1)`
|
||||
2. `S2`: `(1,1)`, `(2,1)`, `(3,2)` & `(4,3)`
|
||||
|
||||
And so, for each the ranges that are in the auth chain difference:
|
||||
1. Chain 1: None, (since everything can reach the create event).
|
||||
2. Chain 2: The range `(1, 2]` (i.e. just `2`), as `1` is reachable by all state
|
||||
sets and the maximum reachable is `2` (corresponding to Bob's second join).
|
||||
3. Chain 3: Similarly the range `(1, 2]` (corresponding to the second power
|
||||
level).
|
||||
4. Chain 4: The range `(1, 3]` (corresponding to both of Alice's joins).
|
||||
|
||||
So the final result is: Bob's second join `(2,2)`, the second power level
|
||||
`(3,2)` and both of Alice's joins `(4,2)` & `(4,3)`.
|
||||
@@ -8,16 +8,16 @@ errors in code.
|
||||
|
||||
The necessary tools are detailed below.
|
||||
|
||||
First install them with:
|
||||
|
||||
pip install -e ".[lint,mypy]"
|
||||
|
||||
- **black**
|
||||
|
||||
The Synapse codebase uses [black](https://pypi.org/project/black/)
|
||||
as an opinionated code formatter, ensuring all comitted code is
|
||||
properly formatted.
|
||||
|
||||
First install `black` with:
|
||||
|
||||
pip install --upgrade black
|
||||
|
||||
Have `black` auto-format your code (it shouldn't change any
|
||||
functionality) with:
|
||||
|
||||
@@ -28,6 +28,10 @@ First install them with:
|
||||
`flake8` is a code checking tool. We require code to pass `flake8`
|
||||
before being merged into the codebase.
|
||||
|
||||
Install `flake8` with:
|
||||
|
||||
pip install --upgrade flake8 flake8-comprehensions
|
||||
|
||||
Check all application and test code with:
|
||||
|
||||
flake8 synapse tests
|
||||
@@ -37,6 +41,10 @@ First install them with:
|
||||
`isort` ensures imports are nicely formatted, and can suggest and
|
||||
auto-fix issues such as double-importing.
|
||||
|
||||
Install `isort` with:
|
||||
|
||||
pip install --upgrade isort
|
||||
|
||||
Auto-fix imports with:
|
||||
|
||||
isort -rc synapse tests
|
||||
@@ -56,6 +64,8 @@ save as it takes a while and is very resource intensive.
|
||||
- Use underscores for functions and variables.
|
||||
- **Docstrings**: should follow the [google code
|
||||
style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings).
|
||||
This is so that we can generate documentation with
|
||||
[sphinx](http://sphinxcontrib-napoleon.readthedocs.org/en/latest/).
|
||||
See the
|
||||
[examples](http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
|
||||
in the sphinx documentation.
|
||||
|
||||
@@ -31,7 +31,7 @@ easy to run CAS implementation built on top of Django.
|
||||
You should now have a Django project configured to serve CAS authentication with
|
||||
a single user created.
|
||||
|
||||
## Configure Synapse (and Element) to use CAS
|
||||
## Configure Synapse (and Riot) to use CAS
|
||||
|
||||
1. Modify your `homeserver.yaml` to enable CAS and point it to your locally
|
||||
running Django test server:
|
||||
@@ -51,9 +51,9 @@ and that the CAS server is on port 8000, both on localhost.
|
||||
|
||||
## Testing the configuration
|
||||
|
||||
Then in Element:
|
||||
Then in Riot:
|
||||
|
||||
1. Visit the login page with a Element pointing at your homeserver.
|
||||
1. Visit the login page with a Riot pointing at your homeserver.
|
||||
2. Click the Single Sign-On button.
|
||||
3. Login using the credentials created with `createsuperuser`.
|
||||
4. You should be logged in.
|
||||
|
||||
@@ -47,18 +47,6 @@ you invite them to. This can be caused by an incorrectly-configured reverse
|
||||
proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions on how to correctly
|
||||
configure a reverse proxy.
|
||||
|
||||
### Known issues
|
||||
|
||||
**HTTP `308 Permanent Redirect` redirects are not followed**: Due to missing features
|
||||
in the HTTP library used by Synapse, 308 redirects are currently not followed by
|
||||
federating servers, which can cause `M_UNKNOWN` or `401 Unauthorized` errors. This
|
||||
may affect users who are redirecting apex-to-www (e.g. `example.com` -> `www.example.com`),
|
||||
and especially users of the Kubernetes *Nginx Ingress* module, which uses 308 redirect
|
||||
codes by default. For those Kubernetes users, [this Stackoverflow post](https://stackoverflow.com/a/52617528/5096871)
|
||||
might be helpful. For other users, switching to a `301 Moved Permanently` code may be
|
||||
an option. 308 redirect codes will be supported properly in a future
|
||||
release of Synapse.
|
||||
|
||||
## Running a demo federation of Synapses
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
|
||||
@@ -5,45 +5,8 @@ The "manhole" allows server administrators to access a Python shell on a running
|
||||
Synapse installation. This is a very powerful mechanism for administration and
|
||||
debugging.
|
||||
|
||||
**_Security Warning_**
|
||||
|
||||
Note that this will give administrative access to synapse to **all users** with
|
||||
shell access to the server. It should therefore **not** be enabled in
|
||||
environments where untrusted users have shell access.
|
||||
|
||||
***
|
||||
|
||||
To enable it, first uncomment the `manhole` listener configuration in
|
||||
`homeserver.yaml`. The configuration is slightly different if you're using docker.
|
||||
|
||||
#### Docker config
|
||||
|
||||
If you are using Docker, set `bind_addresses` to `['0.0.0.0']` as shown:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 9000
|
||||
bind_addresses: ['0.0.0.0']
|
||||
type: manhole
|
||||
```
|
||||
|
||||
When using `docker run` to start the server, you will then need to change the command to the following to include the
|
||||
`manhole` port forwarding. The `-p 127.0.0.1:9000:9000` below is important: it
|
||||
ensures that access to the `manhole` is only possible for local users.
|
||||
|
||||
```bash
|
||||
docker run -d --name synapse \
|
||||
--mount type=volume,src=synapse-data,dst=/data \
|
||||
-p 8008:8008 \
|
||||
-p 127.0.0.1:9000:9000 \
|
||||
matrixdotorg/synapse:latest
|
||||
```
|
||||
|
||||
#### Native config
|
||||
|
||||
If you are not using docker, set `bind_addresses` to `['::1', '127.0.0.1']` as shown.
|
||||
The `bind_addresses` in the example below is important: it ensures that access to the
|
||||
`manhole` is only possible for local users).
|
||||
`homeserver.yaml`:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
@@ -52,7 +15,12 @@ listeners:
|
||||
type: manhole
|
||||
```
|
||||
|
||||
#### Accessing synapse manhole
|
||||
(`bind_addresses` in the above is important: it ensures that access to the
|
||||
manhole is only possible for local users).
|
||||
|
||||
Note that this will give administrative access to synapse to **all users** with
|
||||
shell access to the server. It should therefore **not** be enabled in
|
||||
environments where untrusted users have shell access.
|
||||
|
||||
Then restart synapse, and point an ssh client at port 9000 on localhost, using
|
||||
the username `matrix`:
|
||||
@@ -67,12 +35,9 @@ This gives a Python REPL in which `hs` gives access to the
|
||||
`synapse.server.HomeServer` object - which in turn gives access to many other
|
||||
parts of the process.
|
||||
|
||||
Note that any call which returns a coroutine will need to be wrapped in `ensureDeferred`.
|
||||
|
||||
As a simple example, retrieving an event from the database:
|
||||
|
||||
```pycon
|
||||
>>> from twisted.internet import defer
|
||||
>>> defer.ensureDeferred(hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org'))
|
||||
```
|
||||
>>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')
|
||||
<Deferred at 0x7ff253fc6998 current result: <FrozenEvent event_id='$1416420717069yeQaw:matrix.org', type='m.room.create', state_key=''>>
|
||||
```
|
||||
|
||||
@@ -136,34 +136,24 @@ the server's database.
|
||||
|
||||
### Lifetime limits
|
||||
|
||||
Server admins can set limits on the values of `max_lifetime` to use when
|
||||
purging old events in a room. These limits can be defined as such in the
|
||||
`retention` section of the configuration file:
|
||||
**Note: this feature is mainly useful within a closed federation or on
|
||||
servers that don't federate, because there currently is no way to
|
||||
enforce these limits in an open federation.**
|
||||
|
||||
Server admins can restrict the values their local users are allowed to
|
||||
use for both `min_lifetime` and `max_lifetime`. These limits can be
|
||||
defined as such in the `retention` section of the configuration file:
|
||||
|
||||
```yaml
|
||||
allowed_lifetime_min: 1d
|
||||
allowed_lifetime_max: 1y
|
||||
```
|
||||
|
||||
The limits are considered when running purge jobs. If necessary, the
|
||||
effective value of `max_lifetime` will be brought between
|
||||
`allowed_lifetime_min` and `allowed_lifetime_max` (inclusive).
|
||||
This means that, if the value of `max_lifetime` defined in the room's state
|
||||
is lower than `allowed_lifetime_min`, the value of `allowed_lifetime_min`
|
||||
will be used instead. Likewise, if the value of `max_lifetime` is higher
|
||||
than `allowed_lifetime_max`, the value of `allowed_lifetime_max` will be
|
||||
used instead.
|
||||
|
||||
In the example above, we ensure Synapse never deletes events that are less
|
||||
than one day old, and that it always deletes events that are over a year
|
||||
old.
|
||||
|
||||
If a default policy is set, and its `max_lifetime` value is lower than
|
||||
`allowed_lifetime_min` or higher than `allowed_lifetime_max`, the same
|
||||
process applies.
|
||||
|
||||
Both parameters are optional; if one is omitted Synapse won't use it to
|
||||
adjust the effective value of `max_lifetime`.
|
||||
Here, `allowed_lifetime_min` is the lowest value a local user can set
|
||||
for both `min_lifetime` and `max_lifetime`, and `allowed_lifetime_max`
|
||||
is the highest value. Both parameters are optional (e.g. setting
|
||||
`allowed_lifetime_min` but not `allowed_lifetime_max` only enforces a
|
||||
minimum and no maximum).
|
||||
|
||||
Like other settings in this section, these parameters can be expressed
|
||||
either as a duration or as a number of milliseconds.
|
||||
|
||||
@@ -13,12 +13,10 @@
|
||||
can be enabled by adding the \"metrics\" resource to the existing
|
||||
listener as such:
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- metrics
|
||||
```
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- metrics
|
||||
|
||||
This provides a simple way of adding metrics to your Synapse
|
||||
installation, and serves under `/_synapse/metrics`. If you do not
|
||||
@@ -33,13 +31,11 @@
|
||||
|
||||
Add a new listener to homeserver.yaml:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- type: metrics
|
||||
port: 9000
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
```
|
||||
listeners:
|
||||
- type: metrics
|
||||
port: 9000
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
|
||||
For both options, you will need to ensure that `enable_metrics` is
|
||||
set to `True`.
|
||||
@@ -51,13 +47,10 @@
|
||||
It needs to set the `metrics_path` to a non-default value (under
|
||||
`scrape_configs`):
|
||||
|
||||
```yaml
|
||||
- job_name: "synapse"
|
||||
scrape_interval: 15s
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
```
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
|
||||
where `my.server.here` is the IP address of Synapse, and `port` is
|
||||
the listener port configured with the `metrics` resource.
|
||||
@@ -67,9 +60,6 @@
|
||||
|
||||
1. Restart Prometheus.
|
||||
|
||||
1. Consider using the [grafana dashboard](https://github.com/matrix-org/synapse/tree/master/contrib/grafana/)
|
||||
and required [recording rules](https://github.com/matrix-org/synapse/tree/master/contrib/prometheus/)
|
||||
|
||||
## Monitoring workers
|
||||
|
||||
To monitor a Synapse installation using
|
||||
@@ -84,9 +74,9 @@ To allow collecting metrics from a worker, you need to add a
|
||||
under `worker_listeners`:
|
||||
|
||||
```yaml
|
||||
- type: metrics
|
||||
bind_address: ''
|
||||
port: 9101
|
||||
- type: metrics
|
||||
bind_address: ''
|
||||
port: 9101
|
||||
```
|
||||
|
||||
The `bind_address` and `port` parameters should be set so that
|
||||
@@ -95,38 +85,6 @@ don't clash with an existing worker.
|
||||
With this example, the worker's metrics would then be available
|
||||
on `http://127.0.0.1:9101`.
|
||||
|
||||
Example Prometheus target for Synapse with workers:
|
||||
|
||||
```yaml
|
||||
- job_name: "synapse"
|
||||
scrape_interval: 15s
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "master"
|
||||
index: 1
|
||||
- targets: ["my.workerserver.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "generic_worker"
|
||||
index: 1
|
||||
- targets: ["my.workerserver.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "generic_worker"
|
||||
index: 2
|
||||
- targets: ["my.workerserver.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "media_repository"
|
||||
index: 1
|
||||
```
|
||||
|
||||
Labels (`instance`, `job`, `index`) can be defined as anything.
|
||||
The labels are used to group graphs in grafana.
|
||||
|
||||
## Renaming of metrics & deprecation of old names in 1.2
|
||||
|
||||
Synapse 1.2 updates the Prometheus metrics to match the naming
|
||||
|
||||
320
docs/openid.md
320
docs/openid.md
@@ -37,55 +37,29 @@ as follows:
|
||||
provided by `matrix.org` so no further action is needed.
|
||||
|
||||
* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip
|
||||
install matrix-synapse[oidc]` to install the necessary dependencies.
|
||||
install synapse[oidc]` to install the necessary dependencies.
|
||||
|
||||
* For other installation mechanisms, see the documentation provided by the
|
||||
maintainer.
|
||||
|
||||
To enable the OpenID integration, you should then add a section to the `oidc_providers`
|
||||
setting in your configuration file (or uncomment one of the existing examples).
|
||||
See [sample_config.yaml](./sample_config.yaml) for some sample settings, as well as
|
||||
the text below for example configurations for specific providers.
|
||||
To enable the OpenID integration, you should then add an `oidc_config` section
|
||||
to your configuration file (or uncomment the `enabled: true` line in the
|
||||
existing section). See [sample_config.yaml](./sample_config.yaml) for some
|
||||
sample settings, as well as the text below for example configurations for
|
||||
specific providers.
|
||||
|
||||
## Sample configs
|
||||
|
||||
Here are a few configs for providers that should work with Synapse.
|
||||
|
||||
### Microsoft Azure Active Directory
|
||||
Azure AD can act as an OpenID Connect Provider. Register a new application under
|
||||
*App registrations* in the Azure AD management console. The RedirectURI for your
|
||||
application should point to your matrix server:
|
||||
`[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Go to *Certificates & secrets* and register a new client secret. Make note of your
|
||||
Directory (tenant) ID as it will be used in the Azure links.
|
||||
Edit your Synapse config file and change the `oidc_config` section:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: microsoft
|
||||
idp_name: Microsoft
|
||||
issuer: "https://login.microsoftonline.com/<tenant id>/v2.0"
|
||||
client_id: "<client id>"
|
||||
client_secret: "<client secret>"
|
||||
scopes: ["openid", "profile"]
|
||||
authorization_endpoint: "https://login.microsoftonline.com/<tenant id>/oauth2/v2.0/authorize"
|
||||
token_endpoint: "https://login.microsoftonline.com/<tenant id>/oauth2/v2.0/token"
|
||||
userinfo_endpoint: "https://graph.microsoft.com/oidc/userinfo"
|
||||
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username.split('@')[0] }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### [Dex][dex-idp]
|
||||
|
||||
[Dex][dex-idp] is a simple, open-source, certified OpenID Connect Provider.
|
||||
Although it is designed to help building a full-blown provider with an
|
||||
external database, it can be configured with static passwords in a config file.
|
||||
|
||||
Follow the [Getting Started guide](https://dexidp.io/docs/getting-started/)
|
||||
Follow the [Getting Started
|
||||
guide](https://github.com/dexidp/dex/blob/master/Documentation/getting-started.md)
|
||||
to install Dex.
|
||||
|
||||
Edit `examples/config-dev.yaml` config file from the Dex repo to add a client:
|
||||
@@ -95,31 +69,30 @@ staticClients:
|
||||
- id: synapse
|
||||
secret: secret
|
||||
redirectURIs:
|
||||
- '[synapse public baseurl]/_synapse/client/oidc/callback'
|
||||
- '[synapse public baseurl]/_synapse/oidc/callback'
|
||||
name: 'Synapse'
|
||||
```
|
||||
|
||||
Run with `dex serve examples/config-dev.yaml`.
|
||||
Run with `dex serve examples/config-dex.yaml`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: dex
|
||||
idp_name: "My Dex server"
|
||||
skip_verification: true # This is needed as Dex is served on an insecure endpoint
|
||||
issuer: "http://127.0.0.1:5556/dex"
|
||||
client_id: "synapse"
|
||||
client_secret: "secret"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.name }}"
|
||||
display_name_template: "{{ user.name|capitalize }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
skip_verification: true # This is needed as Dex is served on an insecure endpoint
|
||||
issuer: "http://127.0.0.1:5556/dex"
|
||||
client_id: "synapse"
|
||||
client_secret: "secret"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.name }}"
|
||||
display_name_template: "{{ user.name|capitalize }}"
|
||||
```
|
||||
### [Keycloak][keycloak-idp]
|
||||
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
|
||||
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
|
||||
|
||||
@@ -141,7 +114,7 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
|
||||
| Enabled | `On` |
|
||||
| Client Protocol | `openid-connect` |
|
||||
| Access Type | `confidential` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/oidc/callback` |
|
||||
|
||||
5. Click `Save`
|
||||
6. On the Credentials tab, update the fields:
|
||||
@@ -154,22 +127,17 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
|
||||
8. Copy Secret
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: keycloak
|
||||
idp_name: "My KeyCloak server"
|
||||
issuer: "https://127.0.0.1:8443/auth/realms/{realm_name}"
|
||||
client_id: "synapse"
|
||||
client_secret: "copy secret generated from above"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://127.0.0.1:8443/auth/realms/{realm_name}"
|
||||
client_id: "synapse"
|
||||
client_secret: "copy secret generated from above"
|
||||
scopes: ["openid", "profile"]
|
||||
```
|
||||
### [Auth0][auth0]
|
||||
|
||||
1. Create a regular web application for Synapse
|
||||
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/oidc/callback`
|
||||
3. Add a rule to add the `preferred_username` claim.
|
||||
<details>
|
||||
<summary>Code sample</summary>
|
||||
@@ -194,17 +162,16 @@ oidc_providers:
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: auth0
|
||||
idp_name: Auth0
|
||||
issuer: "https://your-tier.eu.auth0.com/" # TO BE FILLED
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://your-tier.eu.auth0.com/" # TO BE FILLED
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### GitHub
|
||||
@@ -213,33 +180,31 @@ GitHub is a bit special as it is not an OpenID Connect compliant provider, but
|
||||
just a regular OAuth2 provider.
|
||||
|
||||
The [`/user` API endpoint](https://developer.github.com/v3/users/#get-the-authenticated-user)
|
||||
can be used to retrieve information on the authenticated user. As the Synapse
|
||||
can be used to retrieve information on the authenticated user. As the Synaspse
|
||||
login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new OAuth application: https://github.com/settings/applications/new.
|
||||
2. Set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
|
||||
2. Set the callback URL to `[synapse public baseurl]/_synapse/oidc/callback`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: github
|
||||
idp_name: Github
|
||||
idp_brand: "org.matrix.github" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://github.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://api.github.com/user"
|
||||
scopes: ["read:user"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
discover: false
|
||||
issuer: "https://github.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://api.github.com/user"
|
||||
scopes: ["read:user"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### [Google][google-idp]
|
||||
@@ -249,164 +214,37 @@ oidc_providers:
|
||||
2. add an "OAuth Client ID" for a Web Application under "Credentials".
|
||||
3. Copy the Client ID and Client Secret, and add the following to your synapse config:
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: google
|
||||
idp_name: Google
|
||||
idp_brand: "org.matrix.google" # optional: styling hint for clients
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.given_name|lower }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.given_name|lower }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
4. Back in the Google console, add this Authorized redirect URI: `[synapse
|
||||
public baseurl]/_synapse/client/oidc/callback`.
|
||||
public baseurl]/_synapse/oidc/callback`.
|
||||
|
||||
### Twitch
|
||||
|
||||
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
|
||||
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
|
||||
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: twitch
|
||||
idp_name: Twitch
|
||||
issuer: "https://id.twitch.tv/oauth2/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### GitLab
|
||||
|
||||
1. Create a [new application](https://gitlab.com/profile/applications).
|
||||
2. Add the `read_user` and `openid` scopes.
|
||||
3. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: gitlab
|
||||
idp_name: Gitlab
|
||||
idp_brand: "org.matrix.gitlab" # optional: styling hint for clients
|
||||
issuer: "https://gitlab.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
scopes: ["openid", "read_user"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.nickname }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
### Facebook
|
||||
|
||||
Like Github, Facebook provide a custom OAuth2 API rather than an OIDC-compliant
|
||||
one so requires a little more configuration.
|
||||
|
||||
0. You will need a Facebook developer account. You can register for one
|
||||
[here](https://developers.facebook.com/async/registration/).
|
||||
1. On the [apps](https://developers.facebook.com/apps/) page of the developer
|
||||
console, "Create App", and choose "Build Connected Experiences".
|
||||
2. Once the app is created, add "Facebook Login" and choose "Web". You don't
|
||||
need to go through the whole form here.
|
||||
3. In the left-hand menu, open "Products"/"Facebook Login"/"Settings".
|
||||
* Add `[synapse public baseurl]/_synapse/client/oidc/callback` as an OAuth Redirect
|
||||
URL.
|
||||
4. In the left-hand menu, open "Settings/Basic". Here you can copy the "App ID"
|
||||
and "App Secret" for use below.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
- idp_id: facebook
|
||||
idp_name: Facebook
|
||||
idp_brand: "org.matrix.facebook" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://facebook.com"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "email"]
|
||||
authorization_endpoint: https://facebook.com/dialog/oauth
|
||||
token_endpoint: https://graph.facebook.com/v9.0/oauth/access_token
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
userinfo_endpoint: "https://graph.facebook.com/v9.0/me?fields=id,name,email,picture"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
Relevant documents:
|
||||
* https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow
|
||||
* Using Facebook's Graph API: https://developers.facebook.com/docs/graph-api/using-graph-api/
|
||||
* Reference to the User endpoint: https://developers.facebook.com/docs/graph-api/reference/user
|
||||
|
||||
### Gitea
|
||||
|
||||
Gitea is, like Github, not an OpenID provider, but just an OAuth2 provider.
|
||||
|
||||
The [`/user` API endpoint](https://try.gitea.io/api/swagger#/user/userGetCurrent)
|
||||
can be used to retrieve information on the authenticated user. As the Synapse
|
||||
login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new application.
|
||||
2. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: gitea
|
||||
idp_name: Gitea
|
||||
discover: false
|
||||
issuer: "https://your-gitea.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: client_secret_post
|
||||
scopes: [] # Gitea doesn't support Scopes
|
||||
authorization_endpoint: "https://your-gitea.com/login/oauth/authorize"
|
||||
token_endpoint: "https://your-gitea.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://your-gitea.com/api/v1/user"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.full_name }}"
|
||||
```
|
||||
|
||||
### XWiki
|
||||
|
||||
Install [OpenID Connect Provider](https://extensions.xwiki.org/xwiki/bin/view/Extension/OpenID%20Connect/OpenID%20Connect%20Provider/) extension in your [XWiki](https://www.xwiki.org) instance.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: xwiki
|
||||
idp_name: "XWiki"
|
||||
issuer: "https://myxwikihost/xwiki/oidc/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
# Needed until https://github.com/matrix-org/synapse/issues/9212 is fixed
|
||||
client_secret: "dontcare"
|
||||
scopes: ["openid", "profile"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://id.twitch.tv/oauth2/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.preferred_username }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
@@ -26,7 +26,6 @@ Password auth provider classes must provide the following methods:
|
||||
|
||||
It should perform any appropriate sanity checks on the provided
|
||||
configuration, and return an object which is then passed into
|
||||
`__init__`.
|
||||
|
||||
This method should have the `@staticmethod` decoration.
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ connect to a postgres database.
|
||||
virtualenv](../INSTALL.md#installing-from-source), you can install
|
||||
the library with:
|
||||
|
||||
~/synapse/env/bin/pip install "matrix-synapse[postgres]"
|
||||
~/synapse/env/bin/pip install matrix-synapse[postgres]
|
||||
|
||||
(substituting the path to your virtualenv for `~/synapse/env`, if
|
||||
you used a different path). You will require the postgres
|
||||
@@ -106,17 +106,6 @@ Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
|
||||
## Fixing inconsistent sequences error
|
||||
|
||||
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
|
||||
and associated table can get out of sync if, for example, Synapse has been
|
||||
downgraded and then upgraded again.
|
||||
|
||||
To fix the issue shut down Synapse (including any and all workers) and run the
|
||||
SQL command included in the error message. Once done Synapse should start
|
||||
successfully.
|
||||
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
|
||||
@@ -9,24 +9,20 @@ of doing so is that it means that you can expose the default https port
|
||||
(443) to Matrix clients without needing to run Synapse with root
|
||||
privileges.
|
||||
|
||||
You should configure your reverse proxy to forward requests to `/_matrix` or
|
||||
`/_synapse/client` to Synapse, and have it set the `X-Forwarded-For` and
|
||||
`X-Forwarded-Proto` request headers.
|
||||
**NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
|
||||
the requested URI in any way (for example, by decoding `%xx` escapes).
|
||||
Beware that Apache *will* canonicalise URIs unless you specifify
|
||||
`nocanon`.
|
||||
|
||||
You should remember that Matrix clients and other Matrix servers do not
|
||||
necessarily need to connect to your server via the same server name or
|
||||
port. Indeed, clients will use port 443 by default, whereas servers default to
|
||||
port 8448. Where these are different, we refer to the 'client port' and the
|
||||
'federation port'. See [the Matrix
|
||||
When setting up a reverse proxy, remember that Matrix clients and other
|
||||
Matrix servers do not necessarily need to connect to your server via the
|
||||
same server name or port. Indeed, clients will use port 443 by default,
|
||||
whereas servers default to port 8448. Where these are different, we
|
||||
refer to the 'client port' and the 'federation port'. See [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
|
||||
for more details of the algorithm used for federation connections, and
|
||||
[delegate.md](<delegate.md>) for instructions on setting up delegation.
|
||||
|
||||
**NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
|
||||
the requested URI in any way (for example, by decoding `%xx` escapes).
|
||||
Beware that Apache *will* canonicalise URIs unless you specify
|
||||
`nocanon`.
|
||||
|
||||
Let's assume that we expect clients to connect to our server at
|
||||
`https://matrix.example.com`, and other servers to connect at
|
||||
`https://example.com:8448`. The following sections detail the configuration of
|
||||
@@ -40,22 +36,21 @@ the reverse proxy and the homeserver.
|
||||
|
||||
```
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
# For the federation port
|
||||
listen 8448 ssl http2 default_server;
|
||||
listen [::]:8448 ssl http2 default_server;
|
||||
listen 8448 ssl default_server;
|
||||
listen [::]:8448 ssl default_server;
|
||||
|
||||
server_name matrix.example.com;
|
||||
|
||||
location ~* ^(\/_matrix|\/_synapse\/client) {
|
||||
location /_matrix {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 50M;
|
||||
client_max_body_size 10M;
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -70,10 +65,6 @@ matrix.example.com {
|
||||
proxy /_matrix http://localhost:8008 {
|
||||
transparent
|
||||
}
|
||||
|
||||
proxy /_synapse/client http://localhost:8008 {
|
||||
transparent
|
||||
}
|
||||
}
|
||||
|
||||
example.com:8448 {
|
||||
@@ -88,7 +79,6 @@ example.com:8448 {
|
||||
```
|
||||
matrix.example.com {
|
||||
reverse_proxy /_matrix/* http://localhost:8008
|
||||
reverse_proxy /_synapse/client/* http://localhost:8008
|
||||
}
|
||||
|
||||
example.com:8448 {
|
||||
@@ -103,19 +93,15 @@ example.com:8448 {
|
||||
SSLEngine on
|
||||
ServerName matrix.example.com;
|
||||
|
||||
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||
AllowEncodedSlashes NoDecode
|
||||
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
|
||||
ProxyPass /_synapse/client http://127.0.0.1:8008/_synapse/client nocanon
|
||||
ProxyPassReverse /_synapse/client http://127.0.0.1:8008/_synapse/client
|
||||
</VirtualHost>
|
||||
|
||||
<VirtualHost *:8448>
|
||||
SSLEngine on
|
||||
ServerName example.com;
|
||||
|
||||
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||
AllowEncodedSlashes NoDecode
|
||||
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
|
||||
@@ -124,36 +110,20 @@ example.com:8448 {
|
||||
|
||||
**NOTE**: ensure the `nocanon` options are included.
|
||||
|
||||
**NOTE 2**: It appears that Synapse is currently incompatible with the ModSecurity module for Apache (`mod_security2`). If you need it enabled for other services on your web server, you can disable it for Synapse's two VirtualHosts by including the following lines before each of the two `</VirtualHost>` above:
|
||||
|
||||
```
|
||||
<IfModule security2_module>
|
||||
SecRuleEngine off
|
||||
</IfModule>
|
||||
```
|
||||
|
||||
### HAProxy
|
||||
|
||||
```
|
||||
frontend https
|
||||
bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
|
||||
http-request set-header X-Forwarded-For %[src]
|
||||
|
||||
# Matrix client traffic
|
||||
acl matrix-host hdr(host) -i matrix.example.com
|
||||
acl matrix-path path_beg /_matrix
|
||||
acl matrix-path path_beg /_synapse/client
|
||||
|
||||
use_backend matrix if matrix-host matrix-path
|
||||
|
||||
frontend matrix-federation
|
||||
bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
|
||||
http-request set-header X-Forwarded-For %[src]
|
||||
|
||||
default_backend matrix
|
||||
|
||||
backend matrix
|
||||
@@ -176,10 +146,3 @@ connecting to Synapse from a client.
|
||||
Synapse exposes a health check endpoint for use by reverse proxies.
|
||||
Each configured HTTP listener has a `/health` endpoint which always returns
|
||||
200 OK (and doesn't get logged).
|
||||
|
||||
## Synapse administration endpoints
|
||||
|
||||
Endpoints for administering your Synapse instance are placed under
|
||||
`/_synapse/admin`. These require authentication through an access token of an
|
||||
admin user. However as access to these endpoints grants the caller a lot of power,
|
||||
we do not recommend exposing them to the public internet without good reason.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user