Compare commits
2 Commits
erikj/remo
...
v1.23.1-mu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb7e610136 | ||
|
|
2d7ca53ab5 |
@@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -14,7 +15,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from synapse.storage.engines import create_engine
|
||||
|
||||
logger = logging.getLogger("create_postgres_db")
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
# this script is run by buildkite in a plain `bionic` container; it installs the
|
||||
# minimal requirements for tox and hands over to the py3-old tox environment.
|
||||
# this script is run by buildkite in a plain `xenial` container; it installs the
|
||||
# minimal requirements for tox and hands over to the py35-old tox environment.
|
||||
|
||||
set -ex
|
||||
|
||||
apt-get update
|
||||
apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox
|
||||
apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev tox
|
||||
|
||||
export LANG="C.UTF-8"
|
||||
|
||||
# Prevent virtualenv from auto-updating pip to an incompatible version
|
||||
export VIRTUALENV_NO_DOWNLOAD=1
|
||||
|
||||
exec tox -e py3-old,combine
|
||||
exec tox -e py35-old,combine
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
#
|
||||
# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
|
||||
# with additional dependencies needed for the test (such as coverage or the PostgreSQL
|
||||
|
||||
@@ -7,41 +7,15 @@ jobs:
|
||||
- checkout
|
||||
- docker_prepare
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
# for release builds, we want to get the amd64 image out asap, so first
|
||||
# we do an amd64-only build, before following up with a multiarch build.
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
platforms: linux/amd64
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tag: -t matrixdotorg/synapse:v1.23.1
|
||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
|
||||
dockerhubuploadlatest:
|
||||
docker:
|
||||
- image: docker:git
|
||||
steps:
|
||||
- checkout
|
||||
- docker_prepare
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
# for `latest`, we don't want the arm images to disappear, so don't update the tag
|
||||
# until all of the platforms are built.
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
workflows:
|
||||
build:
|
||||
jobs:
|
||||
- dockerhubuploadrelease:
|
||||
filters:
|
||||
tags:
|
||||
only: /v[0-9].[0-9]+.[0-9]+.*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
- dockerhubuploadlatest:
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
- dockerhubuploadrelease
|
||||
|
||||
commands:
|
||||
docker_prepare:
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
# Black reformatting (#5482).
|
||||
32e7c9e7f20b57dd081023ac42d6931a8da9b3a3
|
||||
|
||||
# Target Python 3.5 with black (#8664).
|
||||
aff1eb7c671b0a3813407321d2702ec46c71fa56
|
||||
|
||||
# Update black to 20.8b1 (#9381).
|
||||
0a00b7ff14890987f09112a2ae696c61001e6cf1
|
||||
322
.github/workflows/tests.yml
vendored
322
.github/workflows/tests.yml
vendored
@@ -1,322 +0,0 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
toxenv:
|
||||
- "check-sampleconfig"
|
||||
- "check_codestyle"
|
||||
- "check_isort"
|
||||
- "mypy"
|
||||
- "packaging"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install tox
|
||||
- run: tox -e ${{ matrix.toxenv }}
|
||||
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
lint-newsfile:
|
||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install tox
|
||||
- name: Patch Buildkite-specific test script
|
||||
run: |
|
||||
sed -i -e 's/\$BUILDKITE_PULL_REQUEST/${{ github.event.number }}/' \
|
||||
scripts-dev/check-newsfragment
|
||||
- run: scripts-dev/check-newsfragment
|
||||
|
||||
lint-sdist:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install wheel
|
||||
- run: python setup.py sdist bdist_wheel
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Python Distributions
|
||||
path: dist/*
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ always() }} # Run this even if prior jobs were skipped
|
||||
needs: [lint, lint-crlf, lint-newsfile, lint-sdist]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
|
||||
trial:
|
||||
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9"]
|
||||
database: ["sqlite"]
|
||||
include:
|
||||
# Newest Python without optional deps
|
||||
- python-version: "3.9"
|
||||
toxenv: "py-noextras,combine"
|
||||
|
||||
# Oldest Python with PostgreSQL
|
||||
- python-version: "3.6"
|
||||
database: "postgres"
|
||||
postgres-version: "9.6"
|
||||
|
||||
# Newest Python with PostgreSQL
|
||||
- python-version: "3.9"
|
||||
database: "postgres"
|
||||
postgres-version: "13"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
run: |
|
||||
docker run -d -p 5432:5432 \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install tox
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: tox -e py,combine
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
trial-olddeps:
|
||||
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Test with old deps
|
||||
uses: docker://ubuntu:bionic # For old python and sqlite
|
||||
with:
|
||||
workdir: /github/workspace
|
||||
entrypoint: .buildkite/scripts/test_old_deps.sh
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
trial-pypy:
|
||||
# Very slow; only run if the branch name includes 'pypy'
|
||||
if: ${{ contains(github.ref, 'pypy') && !failure() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.6"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install tox
|
||||
- run: tox -e py,combine
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
sytest:
|
||||
if: ${{ !failure() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
BUILDKITE_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
REDIS: ${{ matrix.redis && 1 }}
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- sytest-tag: bionic
|
||||
|
||||
- sytest-tag: bionic
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: testing
|
||||
postgres: postgres
|
||||
|
||||
- sytest-tag: bionic
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: multi-postgres
|
||||
workers: workers
|
||||
|
||||
- sytest-tag: buster
|
||||
postgres: postgres
|
||||
workers: workers
|
||||
redis: redis
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .buildkite/worker-blacklist > synapse-blacklist-with-workers
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
- name: Dump results.tap
|
||||
if: ${{ always() }}
|
||||
run: cat /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
portdb:
|
||||
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.6"
|
||||
postgres-version: "9.6"
|
||||
|
||||
- python-version: "3.9"
|
||||
postgres-version: "13"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:${{ matrix.postgres-version }}
|
||||
ports:
|
||||
- 5432:5432
|
||||
env:
|
||||
POSTGRES_PASSWORD: "postgres"
|
||||
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Patch Buildkite-specific test scripts
|
||||
run: |
|
||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/create_postgres_db.py
|
||||
sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml
|
||||
sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml
|
||||
sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc
|
||||
- run: .buildkite/scripts/test_synapse_port_db.sh
|
||||
|
||||
complement:
|
||||
if: ${{ !failure() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile
|
||||
image: matrixdotorg/complement:latest
|
||||
env:
|
||||
CI: true
|
||||
ports:
|
||||
- 8448:8448
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Run actions/checkout@v2 for complement
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: "matrix-org/complement"
|
||||
path: complement
|
||||
|
||||
# Build initial Synapse image
|
||||
- run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
|
||||
working-directory: synapse
|
||||
|
||||
# Build a ready-to-run Synapse image based on the initial image above.
|
||||
# This new image includes a config file, keys for signing and TLS, and
|
||||
# other settings to make it suitable for testing under Complement.
|
||||
- run: docker build -t complement-synapse -f Synapse.Dockerfile .
|
||||
working-directory: complement/dockerfiles
|
||||
|
||||
# Run Complement
|
||||
- run: go test -v -tags synapse_blacklist ./tests
|
||||
env:
|
||||
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
|
||||
working-directory: complement
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -6,19 +6,16 @@
|
||||
*.egg
|
||||
*.egg-info
|
||||
*.lock
|
||||
*.py[cod]
|
||||
*.pyc
|
||||
*.snap
|
||||
*.tac
|
||||
_trial_temp/
|
||||
_trial_temp*/
|
||||
/out
|
||||
.DS_Store
|
||||
__pycache__/
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.db
|
||||
/*.log
|
||||
/*.log.*
|
||||
/*.log.config
|
||||
/*.pid
|
||||
/.python-version
|
||||
|
||||
1218
CHANGES.md
1218
CHANGES.md
File diff suppressed because it is too large
Load Diff
271
CONTRIBUTING.md
271
CONTRIBUTING.md
@@ -1,31 +1,4 @@
|
||||
Welcome to Synapse
|
||||
|
||||
This document aims to get you started with contributing to this repo!
|
||||
|
||||
- [1. Who can contribute to Synapse?](#1-who-can-contribute-to-synapse)
|
||||
- [2. What do I need?](#2-what-do-i-need)
|
||||
- [3. Get the source.](#3-get-the-source)
|
||||
- [4. Install the dependencies](#4-install-the-dependencies)
|
||||
* [Under Unix (macOS, Linux, BSD, ...)](#under-unix-macos-linux-bsd-)
|
||||
* [Under Windows](#under-windows)
|
||||
- [5. Get in touch.](#5-get-in-touch)
|
||||
- [6. Pick an issue.](#6-pick-an-issue)
|
||||
- [7. Turn coffee and documentation into code and documentation!](#7-turn-coffee-and-documentation-into-code-and-documentation)
|
||||
- [8. Test, test, test!](#8-test-test-test)
|
||||
* [Run the linters.](#run-the-linters)
|
||||
* [Run the unit tests.](#run-the-unit-tests)
|
||||
* [Run the integration tests.](#run-the-integration-tests)
|
||||
- [9. Submit your patch.](#9-submit-your-patch)
|
||||
* [Changelog](#changelog)
|
||||
+ [How do I know what to call the changelog file before I create the PR?](#how-do-i-know-what-to-call-the-changelog-file-before-i-create-the-pr)
|
||||
+ [Debian changelog](#debian-changelog)
|
||||
* [Sign off](#sign-off)
|
||||
- [10. Turn feedback into better code.](#10-turn-feedback-into-better-code)
|
||||
- [11. Find a new issue.](#11-find-a-new-issue)
|
||||
- [Notes for maintainers on merging PRs etc](#notes-for-maintainers-on-merging-prs-etc)
|
||||
- [Conclusion](#conclusion)
|
||||
|
||||
# 1. Who can contribute to Synapse?
|
||||
# Contributing code to Synapse
|
||||
|
||||
Everyone is welcome to contribute code to [matrix.org
|
||||
projects](https://github.com/matrix-org), provided that they are willing to
|
||||
@@ -36,179 +9,70 @@ license the code under the same terms as the project's overall 'outbound'
|
||||
license - in our case, this is almost always Apache Software License v2 (see
|
||||
[LICENSE](LICENSE)).
|
||||
|
||||
# 2. What do I need?
|
||||
|
||||
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download).
|
||||
|
||||
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
|
||||
# 3. Get the source.
|
||||
## How to contribute
|
||||
|
||||
The preferred and easiest way to contribute changes is to fork the relevant
|
||||
project on GitHub, and then [create a pull request](
|
||||
project on github, and then [create a pull request](
|
||||
https://help.github.com/articles/using-pull-requests/) to ask us to pull your
|
||||
changes into our repo.
|
||||
|
||||
Please base your changes on the `develop` branch.
|
||||
Some other points to follow:
|
||||
|
||||
```sh
|
||||
git clone git@github.com:YOUR_GITHUB_USER_NAME/synapse.git
|
||||
git checkout develop
|
||||
```
|
||||
* Please base your changes on the `develop` branch.
|
||||
|
||||
If you need help getting started with git, this is beyond the scope of the document, but you
|
||||
can find many good git tutorials on the web.
|
||||
* Please follow the [code style requirements](#code-style).
|
||||
|
||||
# 4. Install the dependencies
|
||||
* Please include a [changelog entry](#changelog) with each PR.
|
||||
|
||||
## Under Unix (macOS, Linux, BSD, ...)
|
||||
* Please [sign off](#sign-off) your contribution.
|
||||
|
||||
Once you have installed Python 3 and added the source, please open a terminal and
|
||||
setup a *virtualenv*, as follows:
|
||||
* Please keep an eye on the pull request for feedback from the [continuous
|
||||
integration system](#continuous-integration-and-testing) and try to fix any
|
||||
errors that come up.
|
||||
|
||||
```sh
|
||||
cd path/where/you/have/cloned/the/repository
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install -e ".[all,lint,mypy,test]"
|
||||
pip install tox
|
||||
```
|
||||
* If you need to [update your PR](#updating-your-pull-request), just add new
|
||||
commits to your branch rather than rebasing.
|
||||
|
||||
This will install the developer dependencies for the project.
|
||||
|
||||
## Under Windows
|
||||
|
||||
TBD
|
||||
|
||||
|
||||
# 5. Get in touch.
|
||||
|
||||
Join our developer community on Matrix: #synapse-dev:matrix.org !
|
||||
|
||||
|
||||
# 6. Pick an issue.
|
||||
|
||||
Fix your favorite problem or perhaps find a [Good First Issue](https://github.com/matrix-org/synapse/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22)
|
||||
to work on.
|
||||
|
||||
|
||||
# 7. Turn coffee and documentation into code and documentation!
|
||||
## Code style
|
||||
|
||||
Synapse's code style is documented [here](docs/code_style.md). Please follow
|
||||
it, including the conventions for the [sample configuration
|
||||
file](docs/code_style.md#configuration-file-format).
|
||||
|
||||
There is a growing amount of documentation located in the [docs](docs)
|
||||
directory. This documentation is intended primarily for sysadmins running their
|
||||
own Synapse instance, as well as developers interacting externally with
|
||||
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
|
||||
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
|
||||
regarding Synapse's Admin API, which is used mostly by sysadmins and external
|
||||
service developers.
|
||||
Many of the conventions are enforced by scripts which are run as part of the
|
||||
[continuous integration system](#continuous-integration-and-testing). To help
|
||||
check if you have followed the code style, you can run `scripts-dev/lint.sh`
|
||||
locally. You'll need python 3.6 or later, and to install a number of tools:
|
||||
|
||||
If you add new files added to either of these folders, please use [GitHub-Flavoured
|
||||
Markdown](https://guides.github.com/features/mastering-markdown/).
|
||||
```
|
||||
# Install the dependencies
|
||||
pip install -e ".[lint,mypy]"
|
||||
|
||||
Some documentation also exists in [Synapse's GitHub
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
|
||||
# 8. Test, test, test!
|
||||
<a name="test-test-test"></a>
|
||||
|
||||
While you're developing and before submitting a patch, you'll
|
||||
want to test your code.
|
||||
|
||||
## Run the linters.
|
||||
|
||||
The linters look at your code and do two things:
|
||||
|
||||
- ensure that your code follows the coding style adopted by the project;
|
||||
- catch a number of errors in your code.
|
||||
|
||||
They're pretty fast, don't hesitate!
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
# Run the linter script
|
||||
./scripts-dev/lint.sh
|
||||
```
|
||||
|
||||
Note that this script *will modify your files* to fix styling errors.
|
||||
Make sure that you have saved all your files.
|
||||
**Note that the script does not just test/check, but also reformats code, so you
|
||||
may wish to ensure any new code is committed first**.
|
||||
|
||||
If you wish to restrict the linters to only the files changed since the last commit
|
||||
(much faster!), you can instead run:
|
||||
By default, this script checks all files and can take some time; if you alter
|
||||
only certain files, you might wish to specify paths as arguments to reduce the
|
||||
run-time:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh -d
|
||||
```
|
||||
|
||||
Or if you know exactly which files you wish to lint, you can instead run:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||
```
|
||||
|
||||
## Run the unit tests.
|
||||
You can also provide the `-d` option, which will lint the files that have been
|
||||
changed since the last git commit. This will often be significantly faster than
|
||||
linting the whole codebase.
|
||||
|
||||
The unit tests run parts of Synapse, including your changes, to see if anything
|
||||
was broken. They are slower than the linters but will typically catch more errors.
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests
|
||||
```
|
||||
|
||||
If you wish to only run *some* unit tests, you may specify
|
||||
another module instead of `tests` - or a test class or a method:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
|
||||
```
|
||||
|
||||
If your tests fail, you may wish to look at the logs:
|
||||
|
||||
```sh
|
||||
less _trial_temp/test.log
|
||||
```
|
||||
|
||||
## Run the integration tests.
|
||||
|
||||
The integration tests are a more comprehensive suite of tests. They
|
||||
run a full version of Synapse, including your changes, to check if
|
||||
anything was broken. They are slower than the unit tests but will
|
||||
typically catch more errors.
|
||||
|
||||
The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:py37
|
||||
```
|
||||
|
||||
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
||||
|
||||
|
||||
# 9. Submit your patch.
|
||||
|
||||
Once you're happy with your patch, it's time to prepare a Pull Request.
|
||||
|
||||
To prepare a Pull Request, please:
|
||||
|
||||
1. verify that [all the tests pass](#test-test-test), including the coding style;
|
||||
2. [sign off](#sign-off) your contribution;
|
||||
3. `git push` your commit to your fork of Synapse;
|
||||
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
|
||||
5. add a [changelog entry](#changelog) and push it to your Pull Request;
|
||||
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
|
||||
Before pushing new changes, ensure they don't produce linting errors. Commit any
|
||||
files that were corrected.
|
||||
|
||||
Please ensure your changes match the cosmetic style of the existing project,
|
||||
and **never** mix cosmetic and functional changes in the same commit, as it
|
||||
makes it horribly hard to review otherwise.
|
||||
|
||||
## Changelog
|
||||
|
||||
@@ -292,6 +156,24 @@ directory, you will need both a regular newsfragment *and* an entry in the
|
||||
debian changelog. (Though typically such changes should be submitted as two
|
||||
separate pull requests.)
|
||||
|
||||
## Documentation
|
||||
|
||||
There is a growing amount of documentation located in the [docs](docs)
|
||||
directory. This documentation is intended primarily for sysadmins running their
|
||||
own Synapse instance, as well as developers interacting externally with
|
||||
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
|
||||
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
|
||||
regarding Synapse's Admin API, which is used mostly by sysadmins and external
|
||||
service developers.
|
||||
|
||||
New files added to both folders should be written in [Github-Flavoured
|
||||
Markdown](https://guides.github.com/features/mastering-markdown/), and attempts
|
||||
should be made to migrate existing documents to markdown where possible.
|
||||
|
||||
Some documentation also exists in [Synapse's Github
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
## Sign off
|
||||
|
||||
In order to have a concrete record that your contribution is intentional
|
||||
@@ -358,36 +240,47 @@ Git allows you to add this signoff automatically when using the `-s`
|
||||
flag to `git commit`, which uses the name and email set in your
|
||||
`user.name` and `user.email` git configs.
|
||||
|
||||
## Continuous integration and testing
|
||||
|
||||
# 10. Turn feedback into better code.
|
||||
[Buildkite](https://buildkite.com/matrix-dot-org/synapse) will automatically
|
||||
run a series of checks and tests against any PR which is opened against the
|
||||
project; if your change breaks the build, this will be shown in GitHub, with
|
||||
links to the build results. If your build fails, please try to fix the errors
|
||||
and update your branch.
|
||||
|
||||
Once the Pull Request is opened, you will see a few things:
|
||||
To run unit tests in a local development environment, you can use:
|
||||
|
||||
1. our automated CI (Continuous Integration) pipeline will run (again) the linters, the unit tests, the integration tests and more;
|
||||
2. one or more of the developers will take a look at your Pull Request and offer feedback.
|
||||
- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
|
||||
for SQLite-backed Synapse on Python 3.5.
|
||||
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
|
||||
- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
|
||||
(requires a running local PostgreSQL with access to create databases).
|
||||
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
|
||||
(requires Docker). Entirely self-contained, recommended if you don't want to
|
||||
set up PostgreSQL yourself.
|
||||
|
||||
From this point, you should:
|
||||
Docker images are available for running the integration tests (SyTest) locally,
|
||||
see the [documentation in the SyTest repo](
|
||||
https://github.com/matrix-org/sytest/blob/develop/docker/README.md) for more
|
||||
information.
|
||||
|
||||
1. Look at the results of the CI pipeline.
|
||||
- If there is any error, fix the error.
|
||||
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
|
||||
3. Create a new commit with the changes.
|
||||
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
|
||||
- Push this commits to your Pull Request.
|
||||
4. Back to 1.
|
||||
## Updating your pull request
|
||||
|
||||
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
|
||||
If you decide to make changes to your pull request - perhaps to address issues
|
||||
raised in a review, or to fix problems highlighted by [continuous
|
||||
integration](#continuous-integration-and-testing) - just add new commits to your
|
||||
branch, and push to GitHub. The pull request will automatically be updated.
|
||||
|
||||
# 11. Find a new issue.
|
||||
Please **avoid** rebasing your branch, especially once the PR has been
|
||||
reviewed: doing so makes it very difficult for a reviewer to see what has
|
||||
changed since a previous review.
|
||||
|
||||
By now, you know the drill!
|
||||
|
||||
# Notes for maintainers on merging PRs etc
|
||||
## Notes for maintainers on merging PRs etc
|
||||
|
||||
There are some notes for those with commit access to the project on how we
|
||||
manage git [here](docs/dev/git.md).
|
||||
|
||||
# Conclusion
|
||||
## Conclusion
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect
|
||||
given our obsession with open communication. If we're going to successfully
|
||||
|
||||
321
INSTALL.md
321
INSTALL.md
@@ -1,45 +1,19 @@
|
||||
# Installation Instructions
|
||||
- [Choosing your server name](#choosing-your-server-name)
|
||||
- [Picking a database engine](#picking-a-database-engine)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
- [Client Well-Known URI](#client-well-known-uri)
|
||||
- [Email](#email)
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
- [URL previews](#url-previews)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
|
||||
There are 3 steps to follow under **Installation Instructions**.
|
||||
|
||||
- [Installation Instructions](#installation-instructions)
|
||||
- [Choosing your server name](#choosing-your-server-name)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-specific prerequisites](#platform-specific-prerequisites)
|
||||
- [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
|
||||
- [ArchLinux](#archlinux)
|
||||
- [CentOS/Fedora](#centosfedora)
|
||||
- [macOS](#macos)
|
||||
- [OpenSUSE](#opensuse)
|
||||
- [OpenBSD](#openbsd)
|
||||
- [Windows](#windows)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Docker images and Ansible playbooks](#docker-images-and-ansible-playbooks)
|
||||
- [Debian/Ubuntu](#debianubuntu)
|
||||
- [Matrix.org packages](#matrixorg-packages)
|
||||
- [Downstream Debian packages](#downstream-debian-packages)
|
||||
- [Downstream Ubuntu packages](#downstream-ubuntu-packages)
|
||||
- [Fedora](#fedora)
|
||||
- [OpenSUSE](#opensuse-1)
|
||||
- [SUSE Linux Enterprise Server](#suse-linux-enterprise-server)
|
||||
- [ArchLinux](#archlinux-1)
|
||||
- [Void Linux](#void-linux)
|
||||
- [FreeBSD](#freebsd)
|
||||
- [OpenBSD](#openbsd-1)
|
||||
- [NixOS](#nixos)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [Using PostgreSQL](#using-postgresql)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
- [Client Well-Known URI](#client-well-known-uri)
|
||||
- [Email](#email)
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
- [URL previews](#url-previews)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
|
||||
|
||||
## Choosing your server name
|
||||
# Choosing your server name
|
||||
|
||||
It is important to choose the name for your server before you install Synapse,
|
||||
because it cannot be changed later.
|
||||
@@ -55,24 +29,46 @@ that your email address is probably `user@example.com` rather than
|
||||
`user@email.example.com`) - but doing so may require more advanced setup: see
|
||||
[Setting up Federation](docs/federate.md).
|
||||
|
||||
## Installing Synapse
|
||||
# Picking a database engine
|
||||
|
||||
### Installing from source
|
||||
Synapse offers two database engines:
|
||||
* [PostgreSQL](https://www.postgresql.org)
|
||||
* [SQLite](https://sqlite.org/)
|
||||
|
||||
Almost all installations should opt to use PostgreSQL. Advantages include:
|
||||
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
[docs/postgres.md](docs/postgres.md)
|
||||
|
||||
By default Synapse uses SQLite and in doing so trades performance for convenience.
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
light workloads.
|
||||
|
||||
# Installing Synapse
|
||||
|
||||
## Installing from source
|
||||
|
||||
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
|
||||
|
||||
When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
|
||||
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.5.2 or later, up to Python 3.9.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
C. So before we can install Synapse itself we need a working C compiler and the
|
||||
header files for Python C extensions. See [Platform-Specific
|
||||
Instructions](#platform-specific-instructions) for information on installing
|
||||
these on various platforms.
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
```sh
|
||||
```
|
||||
mkdir -p ~/synapse
|
||||
virtualenv -p python3 ~/synapse/env
|
||||
source ~/synapse/env/bin/activate
|
||||
@@ -89,7 +85,7 @@ prefer.
|
||||
This Synapse installation can then be later upgraded by using pip again with the
|
||||
update flag:
|
||||
|
||||
```sh
|
||||
```
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install -U matrix-synapse
|
||||
```
|
||||
@@ -97,7 +93,7 @@ pip install -U matrix-synapse
|
||||
Before you can start Synapse, you will need to generate a configuration
|
||||
file. To do this, run (in your virtualenv, as before):
|
||||
|
||||
```sh
|
||||
```
|
||||
cd ~/synapse
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name my.domain.name \
|
||||
@@ -115,58 +111,70 @@ wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your homeserver's keys, you may find that other homeserver have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the `<server name>.signing.key` file (the second word) to something
|
||||
different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
|
||||
different. See the
|
||||
[spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys)
|
||||
for more information on key management).
|
||||
|
||||
To actually run your new homeserver, pick a working directory for Synapse to
|
||||
run (e.g. `~/synapse`), and:
|
||||
|
||||
```sh
|
||||
```
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start
|
||||
```
|
||||
|
||||
#### Platform-specific prerequisites
|
||||
### Platform-Specific Instructions
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
C. So before we can install Synapse itself we need a working C compiler and the
|
||||
header files for Python C extensions.
|
||||
|
||||
##### Debian/Ubuntu/Raspbian
|
||||
#### Debian/Ubuntu/Raspbian
|
||||
|
||||
Installing prerequisites on Ubuntu or Debian:
|
||||
|
||||
```sh
|
||||
sudo apt install build-essential python3-dev libffi-dev \
|
||||
```
|
||||
sudo apt-get install build-essential python3-dev libffi-dev \
|
||||
python3-pip python3-setuptools sqlite3 \
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev
|
||||
```
|
||||
|
||||
##### ArchLinux
|
||||
#### ArchLinux
|
||||
|
||||
Installing prerequisites on ArchLinux:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo pacman -S base-devel python python-pip \
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
```
|
||||
|
||||
##### CentOS/Fedora
|
||||
#### CentOS/Fedora
|
||||
|
||||
Installing prerequisites on CentOS or Fedora Linux:
|
||||
Installing prerequisites on CentOS 8 or Fedora>26:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel
|
||||
libwebp-devel tk-devel redhat-rpm-config \
|
||||
python3-virtualenv libffi-devel openssl-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
##### macOS
|
||||
Installing prerequisites on CentOS 7 or Fedora<=25:
|
||||
|
||||
```
|
||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||
python3-virtualenv libffi-devel openssl-devel
|
||||
sudo yum groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
Note that Synapse does not support versions of SQLite before 3.11, and CentOS 7
|
||||
uses SQLite 3.7. You may be able to work around this by installing a more
|
||||
recent SQLite version, but it is recommended that you instead use a Postgres
|
||||
database: see [docs/postgres.md](docs/postgres.md).
|
||||
|
||||
#### macOS
|
||||
|
||||
Installing prerequisites on macOS:
|
||||
|
||||
```sh
|
||||
```
|
||||
xcode-select --install
|
||||
sudo easy_install pip
|
||||
sudo pip install virtualenv
|
||||
@@ -176,23 +184,22 @@ brew install pkg-config libffi
|
||||
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
|
||||
via brew and inform `pip` about it so that `psycopg2` builds:
|
||||
|
||||
```sh
|
||||
```
|
||||
brew install openssl@1.1
|
||||
export LDFLAGS="-L/usr/local/opt/openssl/lib"
|
||||
export CPPFLAGS="-I/usr/local/opt/openssl/include"
|
||||
export LDFLAGS=-L/usr/local/Cellar/openssl\@1.1/1.1.1d/lib/
|
||||
```
|
||||
|
||||
##### OpenSUSE
|
||||
#### OpenSUSE
|
||||
|
||||
Installing prerequisites on openSUSE:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
```
|
||||
|
||||
##### OpenBSD
|
||||
#### OpenBSD
|
||||
|
||||
A port of Synapse is available under `net/synapse`. The filesystem
|
||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||
@@ -206,72 +213,73 @@ mounted with `wxallowed` (cf. `mount(8)`).
|
||||
Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
|
||||
default OpenBSD installation is mounted with `wxallowed`):
|
||||
|
||||
```sh
|
||||
```
|
||||
doas mkdir /usr/local/pobj_wxallowed
|
||||
```
|
||||
|
||||
Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
|
||||
configured in `/etc/mk.conf`:
|
||||
|
||||
```sh
|
||||
```
|
||||
doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
|
||||
```
|
||||
|
||||
Setting the `WRKOBJDIR` for building python:
|
||||
|
||||
```sh
|
||||
```
|
||||
echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
|
||||
```
|
||||
|
||||
Building Synapse:
|
||||
|
||||
```sh
|
||||
```
|
||||
cd /usr/ports/net/synapse
|
||||
make install
|
||||
```
|
||||
|
||||
##### Windows
|
||||
#### Windows
|
||||
|
||||
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
|
||||
Linux provides a Linux environment on Windows 10 which is capable of using the
|
||||
Debian, Fedora, or source installation methods. More information about WSL can
|
||||
be found at <https://docs.microsoft.com/en-us/windows/wsl/install-win10> for
|
||||
Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
|
||||
be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
|
||||
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
|
||||
for Windows Server.
|
||||
|
||||
### Prebuilt packages
|
||||
## Prebuilt packages
|
||||
|
||||
As an alternative to installing from source, prebuilt packages are available
|
||||
for a number of platforms.
|
||||
|
||||
#### Docker images and Ansible playbooks
|
||||
### Docker images and Ansible playbooks
|
||||
|
||||
There is an official synapse image available at
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
||||
There is an offical synapse image available at
|
||||
https://hub.docker.com/r/matrixdotorg/synapse which can be used with
|
||||
the docker-compose file available at [contrib/docker](contrib/docker). Further
|
||||
information on this including configuration options is available in the README
|
||||
on hub.docker.com.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
<https://hub.docker.com/r/avhost/docker-matrix/tags/>
|
||||
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Slavi Pantaleev has created an Ansible playbook,
|
||||
which installs the offical Docker image of Matrix Synapse
|
||||
along with many other Matrix-related services (Postgres database, Element, coturn,
|
||||
ma1sd, SSL support, etc.).
|
||||
For more details, see
|
||||
<https://github.com/spantaleev/matrix-docker-ansible-deploy>
|
||||
https://github.com/spantaleev/matrix-docker-ansible-deploy
|
||||
|
||||
#### Debian/Ubuntu
|
||||
|
||||
##### Matrix.org packages
|
||||
### Debian/Ubuntu
|
||||
|
||||
#### Matrix.org packages
|
||||
|
||||
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
|
||||
Synapse via <https://packages.matrix.org/debian/>. They are available for Debian
|
||||
Synapse via https://packages.matrix.org/debian/. They are available for Debian
|
||||
9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo apt install -y lsb-release wget apt-transport-https
|
||||
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
|
||||
@@ -291,7 +299,7 @@ The fingerprint of the repository signing key (as shown by `gpg
|
||||
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
|
||||
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
|
||||
|
||||
##### Downstream Debian packages
|
||||
#### Downstream Debian packages
|
||||
|
||||
We do not recommend using the packages from the default Debian `buster`
|
||||
repository at this time, as they are old and suffer from known security
|
||||
@@ -303,49 +311,49 @@ for information on how to use backports.
|
||||
If you are using Debian `sid` or testing, Synapse is available in the default
|
||||
repositories and it should be possible to install it simply with:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo apt install matrix-synapse
|
||||
```
|
||||
|
||||
##### Downstream Ubuntu packages
|
||||
#### Downstream Ubuntu packages
|
||||
|
||||
We do not recommend using the packages in the default Ubuntu repository
|
||||
at this time, as they are old and suffer from known security vulnerabilities.
|
||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||
|
||||
#### Fedora
|
||||
### Fedora
|
||||
|
||||
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo dnf install matrix-synapse
|
||||
```
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
|
||||
#### OpenSUSE
|
||||
### OpenSUSE
|
||||
|
||||
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo zypper install matrix-synapse
|
||||
```
|
||||
|
||||
#### SUSE Linux Enterprise Server
|
||||
### SUSE Linux Enterprise Server
|
||||
|
||||
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
|
||||
<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
|
||||
https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
|
||||
|
||||
#### ArchLinux
|
||||
### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
|
||||
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo pip install --upgrade pip
|
||||
```
|
||||
|
||||
@@ -354,28 +362,28 @@ ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||
compile it under the right architecture. (This should not be needed if
|
||||
installing under virtualenv):
|
||||
|
||||
```sh
|
||||
```
|
||||
sudo pip uninstall py-bcrypt
|
||||
sudo pip install py-bcrypt
|
||||
```
|
||||
|
||||
#### Void Linux
|
||||
### Void Linux
|
||||
|
||||
Synapse can be found in the void repositories as 'synapse':
|
||||
|
||||
```sh
|
||||
```
|
||||
xbps-install -Su
|
||||
xbps-install -S synapse
|
||||
```
|
||||
|
||||
#### FreeBSD
|
||||
### FreeBSD
|
||||
|
||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||
|
||||
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||
- Packages: `pkg install py37-matrix-synapse`
|
||||
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||
- Packages: `pkg install py37-matrix-synapse`
|
||||
|
||||
#### OpenBSD
|
||||
### OpenBSD
|
||||
|
||||
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
|
||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||
@@ -384,35 +392,20 @@ and mounting it to `/var/synapse` should be taken into consideration.
|
||||
|
||||
Installing Synapse:
|
||||
|
||||
```sh
|
||||
```
|
||||
doas pkg_add synapse
|
||||
```
|
||||
|
||||
#### NixOS
|
||||
### NixOS
|
||||
|
||||
Robin Lambertz has packaged Synapse for NixOS at:
|
||||
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
|
||||
https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix
|
||||
|
||||
## Setting up Synapse
|
||||
# Setting up Synapse
|
||||
|
||||
Once you have installed synapse as above, you will need to configure it.
|
||||
|
||||
### Using PostgreSQL
|
||||
|
||||
By default Synapse uses [SQLite](https://sqlite.org/) and in doing so trades performance for convenience.
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
very light workloads.
|
||||
|
||||
Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org). Advantages include:
|
||||
|
||||
- significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
- allowing the DB to be run on separate hardware
|
||||
|
||||
For information on how to install and use PostgreSQL in Synapse, please see
|
||||
[docs/postgres.md](docs/postgres.md)
|
||||
|
||||
### TLS certificates
|
||||
## TLS certificates
|
||||
|
||||
The default configuration exposes a single HTTP port on the local
|
||||
interface: `http://localhost:8008`. It is suitable for local testing,
|
||||
@@ -426,19 +419,19 @@ The recommended way to do so is to set up a reverse proxy on port
|
||||
Alternatively, you can configure Synapse to expose an HTTPS port. To do
|
||||
so, you will need to edit `homeserver.yaml`, as follows:
|
||||
|
||||
- First, under the `listeners` section, uncomment the configuration for the
|
||||
* First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
each line). The relevant lines are like this:
|
||||
|
||||
```yaml
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
- You will also need to uncomment the `tls_certificate_path` and
|
||||
* You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You will need to manage
|
||||
provisioning of these certificates yourself — Synapse had built-in ACME
|
||||
support, but the ACMEv1 protocol Synapse implements is deprecated, not
|
||||
@@ -453,7 +446,7 @@ so, you will need to edit `homeserver.yaml`, as follows:
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
[federate.md](docs/federate.md).
|
||||
|
||||
### Client Well-Known URI
|
||||
## Client Well-Known URI
|
||||
|
||||
Setting up the client Well-Known URI is optional but if you set it up, it will
|
||||
allow users to enter their full username (e.g. `@user:<server_name>`) into clients
|
||||
@@ -464,7 +457,7 @@ about the actual homeserver URL you are using.
|
||||
The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
|
||||
the following format.
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
@@ -474,7 +467,7 @@ the following format.
|
||||
|
||||
It can optionally contain identity server information as well.
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
@@ -491,11 +484,10 @@ Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
|
||||
view it.
|
||||
|
||||
In nginx this would be something like:
|
||||
|
||||
```nginx
|
||||
```
|
||||
location /.well-known/matrix/client {
|
||||
return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
|
||||
default_type application/json;
|
||||
add_header Content-Type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
}
|
||||
```
|
||||
@@ -505,11 +497,11 @@ correctly. `public_baseurl` should be set to the URL that clients will use to
|
||||
connect to your server. This is the same URL you put for the `m.homeserver`
|
||||
`base_url` above.
|
||||
|
||||
```yaml
|
||||
```
|
||||
public_baseurl: "https://<matrix.example.com>"
|
||||
```
|
||||
|
||||
### Email
|
||||
## Email
|
||||
|
||||
It is desirable for Synapse to have the capability to send email. This allows
|
||||
Synapse to send password reset emails, send verifications when an email address
|
||||
@@ -524,28 +516,18 @@ and `notif_from` fields filled out. You may also need to set `smtp_user`,
|
||||
If email is not configured, password reset, registration and notifications via
|
||||
email will be disabled.
|
||||
|
||||
### Registering a user
|
||||
## Registering a user
|
||||
|
||||
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||
|
||||
Alternatively, you can do so from the command line. This can be done as follows:
|
||||
Alternatively you can do so from the command line if you have installed via pip.
|
||||
|
||||
1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
|
||||
installed via a prebuilt package, `register_new_matrix_user` should already be
|
||||
on the search path):
|
||||
```sh
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start # if not already running
|
||||
```
|
||||
2. Run the following command:
|
||||
```sh
|
||||
register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
```
|
||||
This can be done as follows:
|
||||
|
||||
This will prompt you to add details for the new user, and will then connect to
|
||||
the running Synapse to create the new user. For example:
|
||||
```
|
||||
$ source ~/synapse/env/bin/activate
|
||||
$ synctl start # if not already running
|
||||
$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
New user localpart: erikj
|
||||
Password:
|
||||
Confirm password:
|
||||
@@ -560,12 +542,12 @@ value is generated by `--generate-config`), but it should be kept secret, as
|
||||
anyone with knowledge of it can register users, including admin accounts,
|
||||
on your server even if `enable_registration` is `false`.
|
||||
|
||||
### Setting up a TURN server
|
||||
## Setting up a TURN server
|
||||
|
||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||
a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
|
||||
|
||||
### URL previews
|
||||
## URL previews
|
||||
|
||||
Synapse includes support for previewing URLs, which is disabled by default. To
|
||||
turn it on you must enable the `url_preview_enabled: True` config parameter
|
||||
@@ -575,18 +557,19 @@ This is critical from a security perspective to stop arbitrary Matrix users
|
||||
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||
your loopback and RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional `lxml` python dependency to be installed. This
|
||||
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
|
||||
means `apt-get install libxml2-dev`, or equivalent for your OS.
|
||||
This also requires the optional `lxml` and `netaddr` python dependencies to be
|
||||
installed. This in turn requires the `libxml2` library to be available - on
|
||||
Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for
|
||||
your OS.
|
||||
|
||||
### Troubleshooting Installation
|
||||
# Troubleshooting Installation
|
||||
|
||||
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||
happens, you will have to individually install the dependencies which are
|
||||
failing, e.g.:
|
||||
|
||||
```sh
|
||||
```
|
||||
pip install twisted
|
||||
```
|
||||
|
||||
|
||||
@@ -20,10 +20,9 @@ recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
recursive-include tests *.pem
|
||||
recursive-include tests *.p8
|
||||
recursive-include tests *.crt
|
||||
recursive-include tests *.key
|
||||
include tests/http/ca.crt
|
||||
include tests/http/ca.key
|
||||
include tests/http/server.key
|
||||
|
||||
recursive-include synapse/res *
|
||||
recursive-include synapse/static *.css
|
||||
|
||||
46
README.rst
46
README.rst
@@ -183,9 +183,8 @@ Using a reverse proxy with Synapse
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
|
||||
`HAProxy <https://www.haproxy.org/>`_ or
|
||||
`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
|
||||
`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_ or
|
||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
@@ -244,8 +243,6 @@ Then update the ``users`` table in the database::
|
||||
Synapse Development
|
||||
===================
|
||||
|
||||
Join our developer community on Matrix: `#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Installing from source <INSTALL.md#installing-from-source>`_.
|
||||
@@ -281,27 +278,6 @@ differ)::
|
||||
|
||||
PASSED (skips=15, successes=1322)
|
||||
|
||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
|
||||
|
||||
./demo/start.sh
|
||||
|
||||
(to stop, you can use `./demo/stop.sh`)
|
||||
|
||||
If you just want to start a single instance of the app and run it directly::
|
||||
|
||||
# Create the homeserver.yaml config once
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
|
||||
# Start the app
|
||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
|
||||
|
||||
|
||||
Running the Integration Tests
|
||||
=============================
|
||||
|
||||
@@ -314,15 +290,6 @@ Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `installation instructions
|
||||
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
|
||||
Platform dependencies
|
||||
=====================
|
||||
|
||||
Synapse uses a number of platform dependencies such as Python and PostgreSQL,
|
||||
and aims to follow supported upstream versions. See the
|
||||
`<docs/deprecation_policy.md>`_ document for more details.
|
||||
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
@@ -393,17 +360,12 @@ massive excess of outgoing federation requests (see `discussion
|
||||
indicate that your server is also issuing far more outgoing federation
|
||||
requests than can be accounted for by your users' activity, this is a
|
||||
likely cause. The misbehavior can be worked around by setting
|
||||
the following in the Synapse config file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
presence:
|
||||
enabled: false
|
||||
``use_presence: false`` in the Synapse config file.
|
||||
|
||||
People can't accept room invitations from me
|
||||
--------------------------------------------
|
||||
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
The typical failure mode here is that you send an invitation to someone
|
||||
to join a room or direct chat, but when they go to accept it, they get an
|
||||
error (typically along the lines of "Invalid signature"). They might see
|
||||
something like the following in their logs::
|
||||
|
||||
241
UPGRADE.rst
241
UPGRADE.rst
@@ -5,16 +5,6 @@ Before upgrading check if any special steps are required to upgrade from the
|
||||
version you currently have installed to the current version of Synapse. The extra
|
||||
instructions that may be required are listed later in this document.
|
||||
|
||||
* Check that your versions of Python and PostgreSQL are still supported.
|
||||
|
||||
Synapse follows upstream lifecycles for `Python`_ and `PostgreSQL`_, and
|
||||
removes support for versions which are no longer maintained.
|
||||
|
||||
The website https://endoflife.date also offers convenient summaries.
|
||||
|
||||
.. _Python: https://devguide.python.org/devcycle/#end-of-life-branches
|
||||
.. _PostgreSQL: https://www.postgresql.org/support/versioning/
|
||||
|
||||
* If Synapse was installed using `prebuilt packages
|
||||
<INSTALL.md#prebuilt-packages>`_, you will need to follow the normal process
|
||||
for upgrading those packages.
|
||||
@@ -85,237 +75,6 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.32.0
|
||||
====================
|
||||
|
||||
Removal of old List Accounts Admin API
|
||||
--------------------------------------
|
||||
|
||||
The deprecated v1 "list accounts" admin API (``GET /_synapse/admin/v1/users/<user_id>``) has been removed in this version.
|
||||
|
||||
The `v2 list accounts API <https://github.com/matrix-org/synapse/blob/master/docs/admin_api/user_admin_api.rst#list-accounts>`_
|
||||
has been available since Synapse 1.7.0 (2019-12-13), and is accessible under ``GET /_synapse/admin/v2/users``.
|
||||
|
||||
The deprecation of the old endpoint was announced with Synapse 1.28.0 (released on 2021-02-25).
|
||||
|
||||
Upgrading to v1.29.0
|
||||
====================
|
||||
|
||||
Requirement for X-Forwarded-Proto header
|
||||
----------------------------------------
|
||||
|
||||
When using Synapse with a reverse proxy (in particular, when using the
|
||||
`x_forwarded` option on an HTTP listener), Synapse now expects to receive an
|
||||
`X-Forwarded-Proto` header on incoming HTTP requests. If it is not set, Synapse
|
||||
will log a warning on each received request.
|
||||
|
||||
To avoid the warning, administrators using a reverse proxy should ensure that
|
||||
the reverse proxy sets `X-Forwarded-Proto` header to `https` or `http` to
|
||||
indicate the protocol used by the client.
|
||||
|
||||
Synapse also requires the `Host` header to be preserved.
|
||||
|
||||
See the `reverse proxy documentation <docs/reverse_proxy.md>`_, where the
|
||||
example configurations have been updated to show how to set these headers.
|
||||
|
||||
(Users of `Caddy <https://caddyserver.com/>`_ are unaffected, since we believe it
|
||||
sets `X-Forwarded-Proto` by default.)
|
||||
|
||||
Upgrading to v1.27.0
|
||||
====================
|
||||
|
||||
Changes to callback URI for OAuth2 / OpenID Connect and SAML2
|
||||
-------------------------------------------------------------
|
||||
|
||||
This version changes the URI used for callbacks from OAuth2 and SAML2 identity providers:
|
||||
|
||||
* If your server is configured for single sign-on via an OpenID Connect or OAuth2 identity
|
||||
provider, you will need to add ``[synapse public baseurl]/_synapse/client/oidc/callback``
|
||||
to the list of permitted "redirect URIs" at the identity provider.
|
||||
|
||||
See `docs/openid.md <docs/openid.md>`_ for more information on setting up OpenID
|
||||
Connect.
|
||||
|
||||
* If your server is configured for single sign-on via a SAML2 identity provider, you will
|
||||
need to add ``[synapse public baseurl]/_synapse/client/saml2/authn_response`` as a permitted
|
||||
"ACS location" (also known as "allowed callback URLs") at the identity provider.
|
||||
|
||||
The "Issuer" in the "AuthnRequest" to the SAML2 identity provider is also updated to
|
||||
``[synapse public baseurl]/_synapse/client/saml2/metadata.xml``. If your SAML2 identity
|
||||
provider uses this property to validate or otherwise identify Synapse, its configuration
|
||||
will need to be updated to use the new URL. Alternatively you could create a new, separate
|
||||
"EntityDescriptor" in your SAML2 identity provider with the new URLs and leave the URLs in
|
||||
the existing "EntityDescriptor" as they were.
|
||||
|
||||
Changes to HTML templates
|
||||
-------------------------
|
||||
|
||||
The HTML templates for SSO and email notifications now have `Jinja2's autoescape <https://jinja.palletsprojects.com/en/2.11.x/api/#autoescaping>`_
|
||||
enabled for files ending in ``.html``, ``.htm``, and ``.xml``. If you have customised
|
||||
these templates and see issues when viewing them you might need to update them.
|
||||
It is expected that most configurations will need no changes.
|
||||
|
||||
If you have customised the templates *names* for these templates, it is recommended
|
||||
to verify they end in ``.html`` to ensure autoescape is enabled.
|
||||
|
||||
The above applies to the following templates:
|
||||
|
||||
* ``add_threepid.html``
|
||||
* ``add_threepid_failure.html``
|
||||
* ``add_threepid_success.html``
|
||||
* ``notice_expiry.html``
|
||||
* ``notice_expiry.html``
|
||||
* ``notif_mail.html`` (which, by default, includes ``room.html`` and ``notif.html``)
|
||||
* ``password_reset.html``
|
||||
* ``password_reset_confirmation.html``
|
||||
* ``password_reset_failure.html``
|
||||
* ``password_reset_success.html``
|
||||
* ``registration.html``
|
||||
* ``registration_failure.html``
|
||||
* ``registration_success.html``
|
||||
* ``sso_account_deactivated.html``
|
||||
* ``sso_auth_bad_user.html``
|
||||
* ``sso_auth_confirm.html``
|
||||
* ``sso_auth_success.html``
|
||||
* ``sso_error.html``
|
||||
* ``sso_login_idp_picker.html``
|
||||
* ``sso_redirect_confirm.html``
|
||||
|
||||
Upgrading to v1.26.0
|
||||
====================
|
||||
|
||||
Rolling back to v1.25.0 after a failed upgrade
|
||||
----------------------------------------------
|
||||
|
||||
v1.26.0 includes a lot of large changes. If something problematic occurs, you
|
||||
may want to roll-back to a previous version of Synapse. Because v1.26.0 also
|
||||
includes a new database schema version, reverting that version is also required
|
||||
alongside the generic rollback instructions mentioned above. In short, to roll
|
||||
back to v1.25.0 you need to:
|
||||
|
||||
1. Stop the server
|
||||
2. Decrease the schema version in the database:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
UPDATE schema_version SET version = 58;
|
||||
|
||||
3. Delete the ignored users & chain cover data:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
DROP TABLE IF EXISTS ignored_users;
|
||||
UPDATE rooms SET has_auth_chain_index = false;
|
||||
|
||||
For PostgreSQL run:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
TRUNCATE event_auth_chain_links;
|
||||
TRUNCATE event_auth_chains;
|
||||
|
||||
For SQLite run:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
DELETE FROM event_auth_chain_links;
|
||||
DELETE FROM event_auth_chains;
|
||||
|
||||
4. Mark the deltas as not run (so they will re-run on upgrade).
|
||||
|
||||
.. code:: sql
|
||||
|
||||
DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/01ignored_user.py";
|
||||
DELETE FROM applied_schema_deltas WHERE version = 59 AND file = "59/06chain_cover_index.sql";
|
||||
|
||||
5. Downgrade Synapse by following the instructions for your installation method
|
||||
in the "Rolling back to older versions" section above.
|
||||
|
||||
Upgrading to v1.25.0
|
||||
====================
|
||||
|
||||
Last release supporting Python 3.5
|
||||
----------------------------------
|
||||
|
||||
This is the last release of Synapse which guarantees support with Python 3.5,
|
||||
which passed its upstream End of Life date several months ago.
|
||||
|
||||
We will attempt to maintain support through March 2021, but without guarantees.
|
||||
|
||||
In the future, Synapse will follow upstream schedules for ending support of
|
||||
older versions of Python and PostgreSQL. Please upgrade to at least Python 3.6
|
||||
and PostgreSQL 9.6 as soon as possible.
|
||||
|
||||
Blacklisting IP ranges
|
||||
----------------------
|
||||
|
||||
Synapse v1.25.0 includes new settings, ``ip_range_blacklist`` and
|
||||
``ip_range_whitelist``, for controlling outgoing requests from Synapse for federation,
|
||||
identity servers, push, and for checking key validity for third-party invite events.
|
||||
The previous setting, ``federation_ip_range_blacklist``, is deprecated. The new
|
||||
``ip_range_blacklist`` defaults to private IP ranges if it is not defined.
|
||||
|
||||
If you have never customised ``federation_ip_range_blacklist`` it is recommended
|
||||
that you remove that setting.
|
||||
|
||||
If you have customised ``federation_ip_range_blacklist`` you should update the
|
||||
setting name to ``ip_range_blacklist``.
|
||||
|
||||
If you have a custom push server that is reached via private IP space you may
|
||||
need to customise ``ip_range_blacklist`` or ``ip_range_whitelist``.
|
||||
|
||||
Upgrading to v1.24.0
|
||||
====================
|
||||
|
||||
Custom OpenID Connect mapping provider breaking change
|
||||
------------------------------------------------------
|
||||
|
||||
This release allows the OpenID Connect mapping provider to perform normalisation
|
||||
of the localpart of the Matrix ID. This allows for the mapping provider to
|
||||
specify different algorithms, instead of the [default way](https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets).
|
||||
|
||||
If your Synapse configuration uses a custom mapping provider
|
||||
(`oidc_config.user_mapping_provider.module` is specified and not equal to
|
||||
`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`) then you *must* ensure
|
||||
that `map_user_attributes` of the mapping provider performs some normalisation
|
||||
of the `localpart` returned. To match previous behaviour you can use the
|
||||
`map_username_to_mxid_localpart` function provided by Synapse. An example is
|
||||
shown below:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from synapse.types import map_username_to_mxid_localpart
|
||||
|
||||
class MyMappingProvider:
|
||||
def map_user_attributes(self, userinfo, token):
|
||||
# ... your custom logic ...
|
||||
sso_user_id = ...
|
||||
localpart = map_username_to_mxid_localpart(sso_user_id)
|
||||
|
||||
return {"localpart": localpart}
|
||||
|
||||
Removal historical Synapse Admin API
|
||||
------------------------------------
|
||||
|
||||
Historically, the Synapse Admin API has been accessible under:
|
||||
|
||||
* ``/_matrix/client/api/v1/admin``
|
||||
* ``/_matrix/client/unstable/admin``
|
||||
* ``/_matrix/client/r0/admin``
|
||||
* ``/_synapse/admin/v1``
|
||||
|
||||
The endpoints with ``/_matrix/client/*`` prefixes have been removed as of v1.24.0.
|
||||
The Admin API is now only accessible under:
|
||||
|
||||
* ``/_synapse/admin/v1``
|
||||
|
||||
The only exception is the `/admin/whois` endpoint, which is
|
||||
`also available via the client-server API <https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_.
|
||||
|
||||
The deprecation of the old endpoints was announced with Synapse 1.20.0 (released
|
||||
on 2020-09-22) and makes it easier for homeserver admins to lock down external
|
||||
access to the Admin API endpoints.
|
||||
|
||||
Upgrading to v1.23.0
|
||||
====================
|
||||
|
||||
|
||||
1
changelog.d/8906.misc
Normal file
1
changelog.d/8906.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix multiarch docker image builds.
|
||||
@@ -1 +0,0 @@
|
||||
Add a dockerfile for running Synapse in worker-mode under Complement.
|
||||
@@ -1 +0,0 @@
|
||||
Speed up federation transmission by using fewer database calls. Contributed by @ShadowJonathan.
|
||||
@@ -1 +0,0 @@
|
||||
Apply `pyupgrade` across the codebase.
|
||||
@@ -1 +0,0 @@
|
||||
Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg.
|
||||
@@ -1 +0,0 @@
|
||||
Move some replication processing out of `generic_worker`.
|
||||
@@ -1 +0,0 @@
|
||||
Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership.
|
||||
@@ -1 +0,0 @@
|
||||
Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms.
|
||||
@@ -1 +0,0 @@
|
||||
Replace `HomeServer.get_config()` with inline references.
|
||||
@@ -1 +0,0 @@
|
||||
Add experimental support for handling presence on a worker.
|
||||
@@ -24,7 +24,6 @@ import sys
|
||||
import time
|
||||
import urllib
|
||||
from http import TwistedHttpClient
|
||||
from typing import Optional
|
||||
|
||||
import nacl.encoding
|
||||
import nacl.signing
|
||||
@@ -93,7 +92,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
return self.config["user"].split(":")[1]
|
||||
|
||||
def do_config(self, line):
|
||||
"""Show the config for this client: "config"
|
||||
""" Show the config for this client: "config"
|
||||
Edit a key value mapping: "config key value" e.g. "config token 1234"
|
||||
Config variables:
|
||||
user: The username to auth with.
|
||||
@@ -361,7 +360,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
print(e)
|
||||
|
||||
def do_topic(self, line):
|
||||
""" "topic [set|get] <roomid> [<newtopic>]"
|
||||
""""topic [set|get] <roomid> [<newtopic>]"
|
||||
Set the topic for a room: topic set <roomid> <newtopic>
|
||||
Get the topic for a room: topic get <roomid>
|
||||
"""
|
||||
@@ -691,7 +690,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
self._do_presence_state(2, line)
|
||||
|
||||
def _parse(self, line, keys, force_keys=False):
|
||||
"""Parses the given line.
|
||||
""" Parses the given line.
|
||||
|
||||
Args:
|
||||
line : The line to parse
|
||||
@@ -719,10 +718,10 @@ class SynapseCmd(cmd.Cmd):
|
||||
method,
|
||||
path,
|
||||
data=None,
|
||||
query_params: Optional[dict] = None,
|
||||
query_params={"access_token": None},
|
||||
alt_text=None,
|
||||
):
|
||||
"""Runs an HTTP request and pretty prints the output.
|
||||
""" Runs an HTTP request and pretty prints the output.
|
||||
|
||||
Args:
|
||||
method: HTTP method
|
||||
@@ -730,8 +729,6 @@ class SynapseCmd(cmd.Cmd):
|
||||
data: Raw JSON data if any
|
||||
query_params: dict of query parameters to add to the url
|
||||
"""
|
||||
query_params = query_params or {"access_token": None}
|
||||
|
||||
url = self._url() + path
|
||||
if "access_token" in query_params:
|
||||
query_params["access_token"] = self._tok()
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -15,7 +16,6 @@
|
||||
import json
|
||||
import urllib
|
||||
from pprint import pformat
|
||||
from typing import Optional
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.client import Agent, readBody
|
||||
@@ -23,10 +23,11 @@ from twisted.web.http_headers import Headers
|
||||
|
||||
|
||||
class HttpClient:
|
||||
"""Interface for talking json over http"""
|
||||
""" Interface for talking json over http
|
||||
"""
|
||||
|
||||
def put_json(self, url, data):
|
||||
"""Sends the specifed json data using PUT
|
||||
""" Sends the specifed json data using PUT
|
||||
|
||||
Args:
|
||||
url (str): The URL to PUT data to.
|
||||
@@ -40,7 +41,7 @@ class HttpClient:
|
||||
pass
|
||||
|
||||
def get_json(self, url, args=None):
|
||||
"""Gets some json from the given host homeserver and path
|
||||
""" Gets some json from the given host homeserver and path
|
||||
|
||||
Args:
|
||||
url (str): The URL to GET data from.
|
||||
@@ -57,7 +58,7 @@ class HttpClient:
|
||||
|
||||
|
||||
class TwistedHttpClient(HttpClient):
|
||||
"""Wrapper around the twisted HTTP client api.
|
||||
""" Wrapper around the twisted HTTP client api.
|
||||
|
||||
Attributes:
|
||||
agent (twisted.web.client.Agent): The twisted Agent used to send the
|
||||
@@ -85,9 +86,9 @@ class TwistedHttpClient(HttpClient):
|
||||
body = yield readBody(response)
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None):
|
||||
"""Wrapper of _create_request to issue a PUT request"""
|
||||
headers_dict = headers_dict or {}
|
||||
def _create_put_request(self, url, json_data, headers_dict={}):
|
||||
""" Wrapper of _create_request to issue a PUT request
|
||||
"""
|
||||
|
||||
if "Content-Type" not in headers_dict:
|
||||
raise defer.error(RuntimeError("Must include Content-Type header for PUTs"))
|
||||
@@ -96,22 +97,15 @@ class TwistedHttpClient(HttpClient):
|
||||
"PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict
|
||||
)
|
||||
|
||||
def _create_get_request(self, url, headers_dict: Optional[dict] = None):
|
||||
"""Wrapper of _create_request to issue a GET request"""
|
||||
return self._create_request("GET", url, headers_dict=headers_dict or {})
|
||||
def _create_get_request(self, url, headers_dict={}):
|
||||
""" Wrapper of _create_request to issue a GET request
|
||||
"""
|
||||
return self._create_request("GET", url, headers_dict=headers_dict)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_request(
|
||||
self,
|
||||
method,
|
||||
url,
|
||||
data=None,
|
||||
qparams=None,
|
||||
jsonreq=True,
|
||||
headers: Optional[dict] = None,
|
||||
self, method, url, data=None, qparams=None, jsonreq=True, headers={}
|
||||
):
|
||||
headers = headers or {}
|
||||
|
||||
if qparams:
|
||||
url = "%s?%s" % (url, urllib.urlencode(qparams, True))
|
||||
|
||||
@@ -132,12 +126,9 @@ class TwistedHttpClient(HttpClient):
|
||||
defer.returnValue(json.loads(body))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_request(
|
||||
self, method, url, producer=None, headers_dict: Optional[dict] = None
|
||||
):
|
||||
"""Creates and sends a request to the given url"""
|
||||
headers_dict = headers_dict or {}
|
||||
|
||||
def _create_request(self, method, url, producer=None, headers_dict={}):
|
||||
""" Creates and sends a request to the given url
|
||||
"""
|
||||
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
|
||||
|
||||
retries_left = 5
|
||||
@@ -194,7 +185,8 @@ class _RawProducer:
|
||||
|
||||
|
||||
class _JsonProducer:
|
||||
"""Used by the twisted http client to create the HTTP body from json"""
|
||||
""" Used by the twisted http client to create the HTTP body from json
|
||||
"""
|
||||
|
||||
def __init__(self, jsn):
|
||||
self.data = jsn
|
||||
|
||||
@@ -63,7 +63,8 @@ class CursesStdIO:
|
||||
self.redraw()
|
||||
|
||||
def redraw(self):
|
||||
"""method for redisplaying lines based on internal list of lines"""
|
||||
""" method for redisplaying lines
|
||||
based on internal list of lines """
|
||||
|
||||
self.stdscr.clear()
|
||||
self.paintStatus(self.statusText)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -55,7 +56,7 @@ def excpetion_errback(failure):
|
||||
|
||||
|
||||
class InputOutput:
|
||||
"""This is responsible for basic I/O so that a user can interact with
|
||||
""" This is responsible for basic I/O so that a user can interact with
|
||||
the example app.
|
||||
"""
|
||||
|
||||
@@ -67,7 +68,8 @@ class InputOutput:
|
||||
self.server = server
|
||||
|
||||
def on_line(self, line):
|
||||
"""This is where we process commands."""
|
||||
""" This is where we process commands.
|
||||
"""
|
||||
|
||||
try:
|
||||
m = re.match(r"^join (\S+)$", line)
|
||||
@@ -131,7 +133,7 @@ class IOLoggerHandler(logging.Handler):
|
||||
|
||||
|
||||
class Room:
|
||||
"""Used to store (in memory) the current membership state of a room, and
|
||||
""" Used to store (in memory) the current membership state of a room, and
|
||||
which home servers we should send PDUs associated with the room to.
|
||||
"""
|
||||
|
||||
@@ -146,7 +148,8 @@ class Room:
|
||||
self.have_got_metadata = False
|
||||
|
||||
def add_participant(self, participant):
|
||||
"""Someone has joined the room"""
|
||||
""" Someone has joined the room
|
||||
"""
|
||||
self.participants.add(participant)
|
||||
self.invited.discard(participant)
|
||||
|
||||
@@ -157,13 +160,14 @@ class Room:
|
||||
self.oldest_server = server
|
||||
|
||||
def add_invited(self, invitee):
|
||||
"""Someone has been invited to the room"""
|
||||
""" Someone has been invited to the room
|
||||
"""
|
||||
self.invited.add(invitee)
|
||||
self.servers.add(origin_from_ucid(invitee))
|
||||
|
||||
|
||||
class HomeServer(ReplicationHandler):
|
||||
"""A very basic home server implentation that allows people to join a
|
||||
""" A very basic home server implentation that allows people to join a
|
||||
room and then invite other people.
|
||||
"""
|
||||
|
||||
@@ -177,7 +181,8 @@ class HomeServer(ReplicationHandler):
|
||||
self.output = output
|
||||
|
||||
def on_receive_pdu(self, pdu):
|
||||
"""We just received a PDU"""
|
||||
""" We just received a PDU
|
||||
"""
|
||||
pdu_type = pdu.pdu_type
|
||||
|
||||
if pdu_type == "sy.room.message":
|
||||
@@ -194,20 +199,23 @@ class HomeServer(ReplicationHandler):
|
||||
)
|
||||
|
||||
def _on_message(self, pdu):
|
||||
"""We received a message"""
|
||||
""" We received a message
|
||||
"""
|
||||
self.output.print_line(
|
||||
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
|
||||
)
|
||||
|
||||
def _on_join(self, context, joinee):
|
||||
"""Someone has joined a room, either a remote user or a local user"""
|
||||
""" Someone has joined a room, either a remote user or a local user
|
||||
"""
|
||||
room = self._get_or_create_room(context)
|
||||
room.add_participant(joinee)
|
||||
|
||||
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
|
||||
|
||||
def _on_invite(self, origin, context, invitee):
|
||||
"""Someone has been invited"""
|
||||
""" Someone has been invited
|
||||
"""
|
||||
room = self._get_or_create_room(context)
|
||||
room.add_invited(invitee)
|
||||
|
||||
@@ -220,27 +228,27 @@ class HomeServer(ReplicationHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_message(self, room_name, sender, body):
|
||||
"""Send a message to a room!"""
|
||||
""" Send a message to a room!
|
||||
"""
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
yield self.replication_layer.send_pdus(
|
||||
[
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
pdu_type="sy.room.message",
|
||||
content={"sender": sender, "body": body},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
]
|
||||
yield self.replication_layer.send_pdu(
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
pdu_type="sy.room.message",
|
||||
content={"sender": sender, "body": body},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def join_room(self, room_name, sender, joinee):
|
||||
"""Join a room!"""
|
||||
""" Join a room!
|
||||
"""
|
||||
self._on_join(room_name, joinee)
|
||||
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
@@ -255,30 +263,29 @@ class HomeServer(ReplicationHandler):
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
yield self.replication_layer.send_pdus([pdu])
|
||||
yield self.replication_layer.send_pdu(pdu)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def invite_to_room(self, room_name, sender, invitee):
|
||||
"""Invite someone to a room!"""
|
||||
""" Invite someone to a room!
|
||||
"""
|
||||
self._on_invite(self.server_name, room_name, invitee)
|
||||
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
yield self.replication_layer.send_pdus(
|
||||
[
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
is_state=True,
|
||||
pdu_type="sy.room.member",
|
||||
state_key=invitee,
|
||||
content={"membership": "invite"},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
]
|
||||
yield self.replication_layer.send_pdu(
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
is_state=True,
|
||||
pdu_type="sy.room.member",
|
||||
state_key=invitee,
|
||||
content={"membership": "invite"},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
@@ -193,12 +193,15 @@ class TrivialXmppClient:
|
||||
time.sleep(7)
|
||||
print("SSRC spammer started")
|
||||
while self.running:
|
||||
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % {
|
||||
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
||||
"nick": self.userId,
|
||||
"assrc": self.ssrcs["audio"],
|
||||
"vssrc": self.ssrcs["video"],
|
||||
}
|
||||
ssrcMsg = (
|
||||
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
|
||||
% {
|
||||
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
||||
"nick": self.userId,
|
||||
"assrc": self.ssrcs["audio"],
|
||||
"vssrc": self.ssrcs["video"],
|
||||
}
|
||||
)
|
||||
res = self.sendIq(ssrcMsg)
|
||||
print("reply from ssrc announce: ", res)
|
||||
time.sleep(10)
|
||||
|
||||
@@ -20,7 +20,6 @@ Add a new job to the main prometheus.conf file:
|
||||
```
|
||||
|
||||
### for Prometheus v2
|
||||
|
||||
Add a new job to the main prometheus.yml file:
|
||||
|
||||
```yaml
|
||||
@@ -30,17 +29,14 @@ Add a new job to the main prometheus.yml file:
|
||||
scheme: "https"
|
||||
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
- targets: ['SERVER.LOCATION:PORT']
|
||||
```
|
||||
|
||||
An example of a Prometheus configuration with workers can be found in
|
||||
[metrics-howto.md](https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md).
|
||||
|
||||
To use `synapse.rules` add
|
||||
|
||||
```yaml
|
||||
rule_files:
|
||||
- "/PATH/TO/synapse-v2.rules"
|
||||
rule_files:
|
||||
- "/PATH/TO/synapse-v2.rules"
|
||||
```
|
||||
|
||||
Metrics are disabled by default when running synapse; they must be enabled
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resource_utime"),
|
||||
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
||||
name: "[[job]]-[[index]]",
|
||||
name: "[[job]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
renderer: "line",
|
||||
@@ -22,12 +22,12 @@ new PromConsole.Graph({
|
||||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="process_resident_memory_bytes"></div>
|
||||
<div id="process_resource_maxrss"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resident_memory_bytes"),
|
||||
expr: "process_resident_memory_bytes",
|
||||
name: "[[job]]-[[index]]",
|
||||
node: document.querySelector("#process_resource_maxrss"),
|
||||
expr: "process_psutil_rss:max",
|
||||
name: "Maxrss",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
@@ -43,8 +43,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_fds"),
|
||||
expr: "process_open_fds",
|
||||
name: "[[job]]-[[index]]",
|
||||
expr: "process_open_fds{job='synapse'}",
|
||||
name: "FDs",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
@@ -62,8 +62,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_total_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time_sum[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
||||
name: "time",
|
||||
max: 1,
|
||||
min: 0,
|
||||
renderer: "area",
|
||||
@@ -80,8 +80,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_average_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time_sum[2m]) / rate(python_twisted_reactor_tick_time_count[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
||||
name: "time",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
@@ -97,14 +97,14 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_pending_calls"),
|
||||
expr: "rate(python_twisted_reactor_pending_calls_sum[30s]) / rate(python_twisted_reactor_pending_calls_count[30s])",
|
||||
name: "[[job]]-[[index]]",
|
||||
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
||||
name: "calls",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yTitle: "Pending Calls"
|
||||
yTitle: "Pending Cals"
|
||||
})
|
||||
</script>
|
||||
|
||||
@@ -115,7 +115,7 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_query_time"),
|
||||
expr: "sum(rate(synapse_storage_query_time_count[2m])) by (verb)",
|
||||
expr: "rate(synapse_storage_query_time:count[2m])",
|
||||
name: "[[verb]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
@@ -129,8 +129,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transaction_time"),
|
||||
expr: "topk(10, rate(synapse_storage_transaction_time_count[2m]))",
|
||||
name: "[[job]]-[[index]] [[desc]]",
|
||||
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
||||
name: "[[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
@@ -140,12 +140,12 @@ new PromConsole.Graph({
|
||||
</script>
|
||||
|
||||
<h3>Transaction execution time</h3>
|
||||
<div id="synapse_storage_transactions_time_sec"></div>
|
||||
<div id="synapse_storage_transactions_time_msec"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transactions_time_sec"),
|
||||
expr: "rate(synapse_storage_transaction_time_sum[2m])",
|
||||
name: "[[job]]-[[index]] [[desc]]",
|
||||
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
||||
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
||||
name: "[[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
@@ -154,33 +154,34 @@ new PromConsole.Graph({
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Average time waiting for database connection</h3>
|
||||
<div id="synapse_storage_avg_waiting_time"></div>
|
||||
<h3>Database scheduling latency</h3>
|
||||
<div id="synapse_storage_schedule_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_avg_waiting_time"),
|
||||
expr: "rate(synapse_storage_schedule_time_sum[2m]) / rate(synapse_storage_schedule_time_count[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
node: document.querySelector("#synapse_storage_schedule_time"),
|
||||
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
||||
name: "Total latency",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s",
|
||||
yTitle: "Time"
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Cache request rate</h3>
|
||||
<div id="synapse_cache_request_rate"></div>
|
||||
<h3>Cache hit ratio</h3>
|
||||
<div id="synapse_cache_ratio"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_request_rate"),
|
||||
expr: "rate(synapse_util_caches_cache:total[2m])",
|
||||
name: "[[job]]-[[index]] [[name]]",
|
||||
node: document.querySelector("#synapse_cache_ratio"),
|
||||
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
||||
name: "[[name]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "rps",
|
||||
yTitle: "Cache request rate"
|
||||
yUnits: "%",
|
||||
yTitle: "Percentage"
|
||||
})
|
||||
</script>
|
||||
|
||||
@@ -190,7 +191,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_size"),
|
||||
expr: "synapse_util_caches_cache:size",
|
||||
name: "[[job]]-[[index]] [[name]]",
|
||||
name: "[[name]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
@@ -205,8 +206,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet"),
|
||||
expr: "rate(synapse_http_server_in_flight_requests_count[2m])",
|
||||
name: "[[job]]-[[index]] [[method]] [[servlet]]",
|
||||
expr: "rate(synapse_http_server_request_count:servlet[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
@@ -218,8 +219,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
|
||||
expr: "rate(synapse_http_server_in_flight_requests_count{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[job]]-[[index]] [[method]] [[servlet]]",
|
||||
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
@@ -232,8 +233,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time_seconds_sum[2m]) / rate(synapse_http_server_response_count[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
@@ -276,7 +277,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
@@ -291,7 +292,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
@@ -305,8 +306,8 @@ new PromConsole.Graph({
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
@@ -322,7 +323,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_client_sent"),
|
||||
expr: "rate(synapse_federation_client_sent[2m])",
|
||||
name: "[[job]]-[[index]] [[type]]",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
@@ -336,7 +337,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_server_received"),
|
||||
expr: "rate(synapse_federation_server_received[2m])",
|
||||
name: "[[job]]-[[index]] [[type]]",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
@@ -366,7 +367,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_listeners"),
|
||||
expr: "synapse_notifier_listeners",
|
||||
name: "[[job]]-[[index]]",
|
||||
name: "listeners",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
@@ -381,7 +382,7 @@ new PromConsole.Graph({
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_notified_events"),
|
||||
expr: "rate(synapse_notifier_notified_events[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
name: "events",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "events/s",
|
||||
|
||||
@@ -58,21 +58,3 @@ groups:
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
|
||||
labels:
|
||||
type: remote
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: local
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: bridges
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
# this script will use the api:
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
|
||||
31
debian/build_virtualenv
vendored
31
debian/build_virtualenv
vendored
@@ -33,13 +33,11 @@ esac
|
||||
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
|
||||
# than the 2/3 compatible `virtualenv`.
|
||||
|
||||
# Pin pip to 20.3.4 to fix breakage in 21.0 on py3.5 (xenial)
|
||||
|
||||
dh_virtualenv \
|
||||
--install-suffix "matrix-synapse" \
|
||||
--builtin-venv \
|
||||
--python "$SNAKE" \
|
||||
--upgrade-pip-to="20.3.4" \
|
||||
--upgrade-pip \
|
||||
--preinstall="lxml" \
|
||||
--preinstall="mock" \
|
||||
--extra-pip-arg="--no-cache-dir" \
|
||||
@@ -50,27 +48,18 @@ PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||
|
||||
case "$DEB_BUILD_OPTIONS" in
|
||||
*nocheck*)
|
||||
# Skip running tests if "nocheck" present in $DEB_BUILD_OPTIONS
|
||||
;;
|
||||
# we copy the tests to a temporary directory so that we can put them on the
|
||||
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
||||
tmpdir=`mktemp -d`
|
||||
trap "rm -r $tmpdir" EXIT
|
||||
|
||||
*)
|
||||
# Copy tests to a temporary directory so that we can put them on the
|
||||
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
||||
tmpdir=`mktemp -d`
|
||||
trap "rm -r $tmpdir" EXIT
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
;;
|
||||
esac
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
# build the config file
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
--config-dir="/etc/matrix-synapse" \
|
||||
--data-dir="/var/lib/matrix-synapse" |
|
||||
perl -pe '
|
||||
@@ -96,7 +85,7 @@ esac
|
||||
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
|
||||
|
||||
# build the log config file
|
||||
"${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
||||
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_log_config" \
|
||||
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
|
||||
|
||||
# add a dependency on the right version of python to substvars.
|
||||
|
||||
77
debian/changelog
vendored
77
debian/changelog
vendored
@@ -1,80 +1,3 @@
|
||||
matrix-synapse-py3 (1.31.0+nmu1) UNRELEASED; urgency=medium
|
||||
|
||||
* Skip tests when DEB_BUILD_OPTIONS contains "nocheck".
|
||||
|
||||
-- Dan Callahan <danc@element.io> Mon, 12 Apr 2021 13:07:36 +0000
|
||||
|
||||
matrix-synapse-py3 (1.31.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.31.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Apr 2021 13:08:29 +0100
|
||||
|
||||
matrix-synapse-py3 (1.30.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.30.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 26 Mar 2021 12:01:28 +0000
|
||||
|
||||
matrix-synapse-py3 (1.30.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.30.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 22 Mar 2021 13:15:34 +0000
|
||||
|
||||
matrix-synapse-py3 (1.29.0) stable; urgency=medium
|
||||
|
||||
[ Jonathan de Jong ]
|
||||
* Remove the python -B flag (don't generate bytecode) in scripts and documentation.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.29.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 08 Mar 2021 13:51:50 +0000
|
||||
|
||||
matrix-synapse-py3 (1.28.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.28.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Feb 2021 10:21:57 +0000
|
||||
|
||||
matrix-synapse-py3 (1.27.0) stable; urgency=medium
|
||||
|
||||
[ Dan Callahan ]
|
||||
* Fix build on Ubuntu 16.04 LTS (Xenial).
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.27.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Feb 2021 13:11:28 +0000
|
||||
|
||||
matrix-synapse-py3 (1.26.0) stable; urgency=medium
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
* Remove dependency on `python3-distutils`.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.26.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 27 Jan 2021 12:43:35 -0500
|
||||
|
||||
matrix-synapse-py3 (1.25.0) stable; urgency=medium
|
||||
|
||||
[ Dan Callahan ]
|
||||
* Update dependencies to account for the removal of the transitional
|
||||
dh-systemd package from Debian Bullseye.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.25.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 13 Jan 2021 10:14:55 +0000
|
||||
|
||||
matrix-synapse-py3 (1.24.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.24.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 09 Dec 2020 10:14:30 +0000
|
||||
|
||||
matrix-synapse-py3 (1.23.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.23.1.
|
||||
|
||||
7
debian/control
vendored
7
debian/control
vendored
@@ -3,11 +3,9 @@ Section: contrib/python
|
||||
Priority: extra
|
||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
||||
# TODO: Remove the dependency on dh-systemd after dropping support for Ubuntu xenial
|
||||
# On all other supported releases, it's merely a transitional package which
|
||||
# does nothing but depends on debhelper (> 9.20160709)
|
||||
Build-Depends:
|
||||
debhelper (>= 9.20160709) | dh-systemd,
|
||||
debhelper (>= 9),
|
||||
dh-systemd,
|
||||
dh-virtualenv (>= 1.1),
|
||||
libsystemd-dev,
|
||||
libpq-dev,
|
||||
@@ -31,6 +29,7 @@ Pre-Depends: dpkg (>= 1.16.1)
|
||||
Depends:
|
||||
adduser,
|
||||
debconf,
|
||||
python3-distutils|libpython3-stdlib (<< 3.6),
|
||||
${misc:Depends},
|
||||
${shlibs:Depends},
|
||||
${synapse:pydepends},
|
||||
|
||||
2
debian/synctl.1
vendored
2
debian/synctl.1
vendored
@@ -44,7 +44,7 @@ Configuration file may be generated as follows:
|
||||
.
|
||||
.nf
|
||||
|
||||
$ python \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
||||
$ python \-B \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
||||
.
|
||||
.fi
|
||||
.
|
||||
|
||||
2
debian/synctl.ronn
vendored
2
debian/synctl.ronn
vendored
@@ -41,7 +41,7 @@ process.
|
||||
|
||||
Configuration file may be generated as follows:
|
||||
|
||||
$ python -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
|
||||
$ python -B -m synapse.app.homeserver -c config.yaml --generate-config --server-name=<server name>
|
||||
|
||||
## ENVIRONMENT
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
|
||||
59
demo/webserver.py
Normal file
59
demo/webserver.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import argparse
|
||||
import BaseHTTPServer
|
||||
import os
|
||||
import SimpleHTTPServer
|
||||
import cgi, logging
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
|
||||
class SimpleHTTPRequestHandlerWithPOST(SimpleHTTPServer.SimpleHTTPRequestHandler):
|
||||
UPLOAD_PATH = "upload"
|
||||
|
||||
"""
|
||||
Accept all post request as file upload
|
||||
"""
|
||||
|
||||
def do_POST(self):
|
||||
|
||||
path = os.path.join(self.UPLOAD_PATH, os.path.basename(self.path))
|
||||
length = self.headers["content-length"]
|
||||
data = self.rfile.read(int(length))
|
||||
|
||||
with open(path, "wb") as fh:
|
||||
fh.write(data)
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "application/json")
|
||||
self.end_headers()
|
||||
|
||||
# Return the absolute path of the uploaded file
|
||||
self.wfile.write('{"url":"/%s"}' % path)
|
||||
|
||||
|
||||
def setup():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("directory")
|
||||
parser.add_argument("-p", "--port", dest="port", type=int, default=8080)
|
||||
parser.add_argument("-P", "--pid-file", dest="pid", default="web.pid")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Get absolute path to directory to serve, as daemonize changes to '/'
|
||||
os.chdir(args.directory)
|
||||
dr = os.getcwd()
|
||||
|
||||
httpd = BaseHTTPServer.HTTPServer(("", args.port), SimpleHTTPRequestHandlerWithPOST)
|
||||
|
||||
def run():
|
||||
os.chdir(dr)
|
||||
httpd.serve_forever()
|
||||
|
||||
daemon = Daemonize(
|
||||
app="synapse-webclient", pid=args.pid, action=run, auto_close_fds=False
|
||||
)
|
||||
|
||||
daemon.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
setup()
|
||||
@@ -28,32 +28,31 @@ RUN apt-get update && apt-get install -y \
|
||||
libwebp-dev \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy just what we need to pip install
|
||||
# Build dependencies that are not available as wheels, to speed up rebuilds
|
||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
frozendict \
|
||||
jaeger-client \
|
||||
opentracing \
|
||||
# Match the version constraints of Synapse
|
||||
"prometheus_client>=0.4.0,<0.9.0" \
|
||||
psycopg2 \
|
||||
pycparser \
|
||||
pyrsistent \
|
||||
pyyaml \
|
||||
simplejson \
|
||||
threadloop \
|
||||
thrift
|
||||
|
||||
# now install synapse and all of the python deps to /install.
|
||||
COPY synapse /synapse/synapse/
|
||||
COPY scripts /synapse/scripts/
|
||||
COPY MANIFEST.in README.rst setup.py synctl /synapse/
|
||||
COPY synapse/__init__.py /synapse/synapse/__init__.py
|
||||
COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
# the whole synapse project so that we this layer in the Docker cache can be
|
||||
# used while you develop on the source
|
||||
#
|
||||
# This is aiming at installing the `install_requires` and `extras_require` from `setup.py`
|
||||
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
/synapse[all]
|
||||
|
||||
# Copy over the rest of the project
|
||||
COPY synapse /synapse/synapse/
|
||||
|
||||
# Install the synapse package itself and all of its children packages.
|
||||
#
|
||||
# This is aiming at installing only the `packages=find_packages(...)` from `setup.py
|
||||
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
|
||||
/synapse[all]
|
||||
|
||||
###
|
||||
### Stage 1: runtime
|
||||
@@ -61,11 +60,6 @@ RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
|
||||
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim
|
||||
|
||||
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
|
||||
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
|
||||
LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git'
|
||||
LABEL org.opencontainers.image.licenses='Apache-2.0'
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
gosu \
|
||||
@@ -73,10 +67,7 @@ RUN apt-get update && apt-get install -y \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
libssl-dev \
|
||||
openssl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /install /usr/local
|
||||
COPY ./docker/start.py /start.py
|
||||
@@ -89,4 +80,4 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp
|
||||
ENTRYPOINT ["/start.py"]
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=5s \
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
CMD curl -fSs http://localhost:8008/health || exit 1
|
||||
|
||||
@@ -27,7 +27,6 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
wget
|
||||
|
||||
# fetch and unpack the package
|
||||
# TODO: Upgrade to 1.2.2 once xenial is dropped
|
||||
RUN mkdir /dh-virtualenv
|
||||
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
|
||||
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
|
||||
@@ -51,22 +50,17 @@ FROM ${distro}
|
||||
ARG distro=""
|
||||
ENV distro ${distro}
|
||||
|
||||
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
|
||||
# http://bugs.python.org/issue19846
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
# Install the build dependencies
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
# TODO: it would be nice to do that automatically.
|
||||
# TODO: Remove the dh-systemd stanza after dropping support for Ubuntu xenial
|
||||
# it's a transitional package on all other, more recent releases
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
debhelper \
|
||||
devscripts \
|
||||
dh-systemd \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
@@ -75,11 +69,7 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
xmlsec1 \
|
||||
&& ( env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
dh-systemd || true )
|
||||
libpq-dev
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /
|
||||
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Inherit from the official Synapse docker image
|
||||
FROM matrixdotorg/synapse
|
||||
|
||||
# Install deps
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y supervisor redis nginx
|
||||
|
||||
# Remove the default nginx sites
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
|
||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||
COPY ./docker/conf-workers/* /conf/
|
||||
|
||||
# Expose nginx listener port
|
||||
EXPOSE 8080/tcp
|
||||
|
||||
# Volume for user-editable config files, logs etc.
|
||||
VOLUME ["/data"]
|
||||
|
||||
# A script to read environment variables and create the necessary
|
||||
# files to run the desired worker configuration. Will start supervisord.
|
||||
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
|
||||
ENTRYPOINT ["/configure_workers_and_start.py"]
|
||||
@@ -1,140 +0,0 @@
|
||||
# Running tests against a dockerised Synapse
|
||||
|
||||
It's possible to run integration tests against Synapse
|
||||
using [Complement](https://github.com/matrix-org/complement). Complement is a Matrix Spec
|
||||
compliance test suite for homeservers, and supports any homeserver docker image configured
|
||||
to listen on ports 8008/8448. This document contains instructions for building Synapse
|
||||
docker images that can be run inside Complement for testing purposes.
|
||||
|
||||
Note that running Synapse's unit tests from within the docker image is not supported.
|
||||
|
||||
## Testing with SQLite and single-process Synapse
|
||||
|
||||
> Note that `scripts-dev/complement.sh` is a script that will automatically build
|
||||
> and run an SQLite-based, single-process of Synapse against Complement.
|
||||
|
||||
The instructions below will set up Complement testing for a single-process,
|
||||
SQLite-based Synapse deployment.
|
||||
|
||||
Start by building the base Synapse docker image. If you wish to run tests with the latest
|
||||
release of Synapse, instead of your current checkout, you can skip this step. From the
|
||||
root of the repository:
|
||||
|
||||
```sh
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
This will build an image with the tag `matrixdotorg/synapse`.
|
||||
|
||||
Next, build the Synapse image for Complement. You will need a local checkout
|
||||
of Complement. Change to the root of your Complement checkout and run:
|
||||
|
||||
```sh
|
||||
docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles
|
||||
```
|
||||
|
||||
This will build an image with the tag `complement-synapse`, which can be handed to
|
||||
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
|
||||
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
|
||||
how to run the tests, as well as the various available command line flags.
|
||||
|
||||
## Testing with PostgreSQL and single or multi-process Synapse
|
||||
|
||||
The above docker image only supports running Synapse with SQLite and in a
|
||||
single-process topology. The following instructions are used to build a Synapse image for
|
||||
Complement that supports either single or multi-process topology with a PostgreSQL
|
||||
database backend.
|
||||
|
||||
As with the single-process image, build the base Synapse docker image. If you wish to run
|
||||
tests with the latest release of Synapse, instead of your current checkout, you can skip
|
||||
this step. From the root of the repository:
|
||||
|
||||
```sh
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
This will build an image with the tag `matrixdotorg/synapse`.
|
||||
|
||||
Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`.
|
||||
Again, from the root of the repository:
|
||||
|
||||
```sh
|
||||
docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers .
|
||||
```
|
||||
|
||||
This will build an image with the tag` matrixdotorg/synapse-workers`.
|
||||
|
||||
It's worth noting at this point that this image is fully functional, and
|
||||
can be used for testing against locally. See instructions for using the container
|
||||
under
|
||||
[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone)
|
||||
below.
|
||||
|
||||
Finally, build the Synapse image for Complement, which is based on
|
||||
`matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to
|
||||
the root of your Complement checkout and run:
|
||||
|
||||
```sh
|
||||
docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles
|
||||
```
|
||||
|
||||
This will build an image with the tag `complement-synapse`, which can be handed to
|
||||
Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to
|
||||
[Complement's documentation](https://github.com/matrix-org/complement/#running) for
|
||||
how to run the tests, as well as the various available command line flags.
|
||||
|
||||
## Running the Dockerfile-worker image standalone
|
||||
|
||||
For manual testing of a multi-process Synapse instance in Docker,
|
||||
[Dockerfile-workers](Dockerfile-workers) is a Dockerfile that will produce an image
|
||||
bundling all necessary components together for a workerised homeserver instance.
|
||||
|
||||
This includes any desired Synapse worker processes, a nginx to route traffic accordingly,
|
||||
a redis for worker communication and a supervisord instance to start up and monitor all
|
||||
processes. You will need to provide your own postgres container to connect to, and TLS
|
||||
is not handled by the container.
|
||||
|
||||
Once you've built the image using the above instructions, you can run it. Be sure
|
||||
you've set up a volume according to the [usual Synapse docker instructions](README.md).
|
||||
Then run something along the lines of:
|
||||
|
||||
```
|
||||
docker run -d --name synapse \
|
||||
--mount type=volume,src=synapse-data,dst=/data \
|
||||
-p 8008:8008 \
|
||||
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
||||
-e SYNAPSE_REPORT_STATS=no \
|
||||
-e POSTGRES_HOST=postgres \
|
||||
-e POSTGRES_USER=postgres \
|
||||
-e POSTGRES_PASSWORD=somesecret \
|
||||
-e SYNAPSE_WORKER_TYPES=synchrotron,media_repository,user_dir \
|
||||
-e SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1 \
|
||||
matrixdotorg/synapse-workers
|
||||
```
|
||||
|
||||
...substituting `POSTGRES*` variables for those that match a postgres host you have
|
||||
available (usually a running postgres docker container).
|
||||
|
||||
The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to
|
||||
use when running the container. All possible worker names are defined by the keys of the
|
||||
`WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the
|
||||
Dockerfile makes use of to generate appropriate worker, nginx and supervisord config
|
||||
files.
|
||||
|
||||
Sharding is supported for a subset of workers, in line with the
|
||||
[worker documentation](../docs/workers.md). To run multiple instances of a given worker
|
||||
type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES`
|
||||
(e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`).
|
||||
|
||||
Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers
|
||||
(leaving only the main process). The container is configured to use redis-based worker
|
||||
mode.
|
||||
|
||||
Logs for workers and the main process are logged to stdout and can be viewed with
|
||||
standard `docker logs` tooling. Worker logs contain their worker name
|
||||
after the timestamp.
|
||||
|
||||
Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to
|
||||
`<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00:
|
||||
00, according to the container's clock. Logging for the main process must still be
|
||||
configured by modifying the homeserver's log config in your Synapse data volume.
|
||||
@@ -2,28 +2,26 @@
|
||||
|
||||
This Docker image will run Synapse as a single process. By default it uses a
|
||||
sqlite database; for production use you should connect it to a separate
|
||||
postgres database. The image also does *not* provide a TURN server.
|
||||
postgres database.
|
||||
|
||||
This image should work on all platforms that are supported by Docker upstream.
|
||||
Note that Docker's WS1-backend Linux Containers on Windows
|
||||
platform is [experimental](https://github.com/docker/for-win/issues/6470) and
|
||||
is not supported by this image.
|
||||
The image also does *not* provide a TURN server.
|
||||
|
||||
## Volumes
|
||||
|
||||
By default, the image expects a single volume, located at `/data`, that will hold:
|
||||
By default, the image expects a single volume, located at ``/data``, that will hold:
|
||||
|
||||
* configuration files;
|
||||
* temporary files during uploads;
|
||||
* uploaded media and thumbnails;
|
||||
* the SQLite database if you do not configure postgres;
|
||||
* the appservices configuration.
|
||||
|
||||
You are free to use separate volumes depending on storage endpoints at your
|
||||
disposal. For instance, `/data/media` could be stored on a large but low
|
||||
disposal. For instance, ``/data/media`` could be stored on a large but low
|
||||
performance hdd storage while other files could be stored on high performance
|
||||
endpoints.
|
||||
|
||||
In order to setup an application service, simply create an `appservices`
|
||||
In order to setup an application service, simply create an ``appservices``
|
||||
directory in the data volume and write the application service Yaml
|
||||
configuration file there. Multiple application services are supported.
|
||||
|
||||
@@ -56,8 +54,6 @@ The following environment variables are supported in `generate` mode:
|
||||
* `SYNAPSE_SERVER_NAME` (mandatory): the server public hostname.
|
||||
* `SYNAPSE_REPORT_STATS` (mandatory, `yes` or `no`): whether to enable
|
||||
anonymous statistics reporting.
|
||||
* `SYNAPSE_HTTP_PORT`: the port Synapse should listen on for http traffic.
|
||||
Defaults to `8008`.
|
||||
* `SYNAPSE_CONFIG_DIR`: where additional config files (such as the log config
|
||||
and event signing key) will be stored. Defaults to `/data`.
|
||||
* `SYNAPSE_CONFIG_PATH`: path to the file to be generated. Defaults to
|
||||
@@ -78,8 +74,6 @@ docker run -d --name synapse \
|
||||
matrixdotorg/synapse:latest
|
||||
```
|
||||
|
||||
(assuming 8008 is the port Synapse is configured to listen on for http traffic.)
|
||||
|
||||
You can then check that it has started correctly with:
|
||||
|
||||
```
|
||||
@@ -211,8 +205,3 @@ healthcheck:
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
## Using jemalloc
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse [README](../README.md).
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
# The script to build the Debian package, as ran inside the Docker image.
|
||||
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
# This file contains the base config for the reverse proxy, as part of ../Dockerfile-workers.
|
||||
# configure_workers_and_start.py uses and amends to this file depending on the workers
|
||||
# that have been selected.
|
||||
|
||||
{{ upstream_directives }}
|
||||
|
||||
server {
|
||||
# Listen on an unoccupied port number
|
||||
listen 8008;
|
||||
listen [::]:8008;
|
||||
|
||||
server_name localhost;
|
||||
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 100M;
|
||||
|
||||
{{ worker_locations }}
|
||||
|
||||
# Send all other traffic to the main process
|
||||
location ~* ^(\\/_matrix|\\/_synapse) {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
# This file contains the base for the shared homeserver config file between Synapse workers,
|
||||
# as part of ./Dockerfile-workers.
|
||||
# configure_workers_and_start.py uses and amends to this file depending on the workers
|
||||
# that have been selected.
|
||||
|
||||
redis:
|
||||
enabled: true
|
||||
|
||||
{{ shared_worker_config }}
|
||||
@@ -1,41 +0,0 @@
|
||||
# This file contains the base config for supervisord, as part of ../Dockerfile-workers.
|
||||
# configure_workers_and_start.py uses and amends to this file depending on the workers
|
||||
# that have been selected.
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
user=root
|
||||
|
||||
[program:nginx]
|
||||
command=/usr/sbin/nginx -g "daemon off;"
|
||||
priority=500
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
username=www-data
|
||||
autorestart=true
|
||||
|
||||
[program:redis]
|
||||
command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no
|
||||
priority=1
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
username=redis
|
||||
autorestart=true
|
||||
|
||||
[program:synapse_main]
|
||||
command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
|
||||
priority=10
|
||||
# Log startup failures to supervisord's stdout/err
|
||||
# Regular synapse logs will still go in the configured data directory
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=unexpected
|
||||
exitcodes=0
|
||||
|
||||
# Additional process blocks
|
||||
{{ worker_config }}
|
||||
@@ -1,26 +0,0 @@
|
||||
# This is a configuration template for a single worker instance, and is
|
||||
# used by Dockerfile-workers.
|
||||
# Values will be change depending on whichever workers are selected when
|
||||
# running that image.
|
||||
|
||||
worker_app: "{{ app }}"
|
||||
worker_name: "{{ name }}"
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: {{ port }}
|
||||
{% if listener_resources %}
|
||||
resources:
|
||||
- names:
|
||||
{%- for resource in listener_resources %}
|
||||
- {{ resource }}
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
|
||||
worker_log_config: {{ worker_log_config_filepath }}
|
||||
|
||||
{{ worker_extra_conf }}
|
||||
@@ -40,9 +40,7 @@ listeners:
|
||||
compress: false
|
||||
{% endif %}
|
||||
|
||||
# Allow configuring in case we want to reverse proxy 8008
|
||||
# using another process in the same container
|
||||
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
|
||||
- port: 8008
|
||||
tls: false
|
||||
bind_addresses: ['::']
|
||||
type: http
|
||||
@@ -91,6 +89,7 @@ federation_rc_concurrent: 3
|
||||
## Files ##
|
||||
|
||||
media_store_path: "/data/media"
|
||||
uploads_path: "/data/uploads"
|
||||
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "50M" }}"
|
||||
max_image_pixels: "32M"
|
||||
dynamic_thumbnails: false
|
||||
@@ -175,10 +174,18 @@ report_stats: False
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
room_invite_state_types:
|
||||
- "m.room.join_rules"
|
||||
- "m.room.canonical_alias"
|
||||
- "m.room.avatar"
|
||||
- "m.room.name"
|
||||
|
||||
{% if SYNAPSE_APPSERVICES %}
|
||||
app_service_config_files:
|
||||
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
app_service_config_files: []
|
||||
{% endif %}
|
||||
|
||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||
@@ -191,10 +198,12 @@ old_signing_keys: {}
|
||||
key_refresh_interval: "1d" # 1 Day.
|
||||
|
||||
# The trusted servers to download signing keys from.
|
||||
trusted_key_servers:
|
||||
- server_name: matrix.org
|
||||
verify_keys:
|
||||
"ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||
perspectives:
|
||||
servers:
|
||||
"matrix.org":
|
||||
verify_keys:
|
||||
"ed25519:auto":
|
||||
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||
|
||||
password_config:
|
||||
enabled: true
|
||||
|
||||
@@ -2,34 +2,9 @@ version: 1
|
||||
|
||||
formatters:
|
||||
precise:
|
||||
{% if worker_name %}
|
||||
format: '%(asctime)s - worker:{{ worker_name }} - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
{% else %}
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
{% endif %}
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
|
||||
handlers:
|
||||
file:
|
||||
class: logging.handlers.TimedRotatingFileHandler
|
||||
formatter: precise
|
||||
filename: {{ LOG_FILE_PATH or "homeserver.log" }}
|
||||
when: "midnight"
|
||||
backupCount: 6 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
|
||||
# Default to buffering writes to log file for efficiency. This means that
|
||||
# there will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
|
||||
# logs will still be flushed immediately.
|
||||
buffer:
|
||||
class: logging.handlers.MemoryHandler
|
||||
target: file
|
||||
# The capacity is the number of log lines that are buffered before
|
||||
# being written to disk. Increasing this will lead to better
|
||||
# performance, at the expensive of it taking longer for log lines to
|
||||
# be written to disk.
|
||||
capacity: 10
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
@@ -42,11 +17,6 @@ loggers:
|
||||
|
||||
root:
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
|
||||
|
||||
{% if LOG_FILE_PATH %}
|
||||
handlers: [console, buffer]
|
||||
{% else %}
|
||||
handlers: [console]
|
||||
{% endif %}
|
||||
|
||||
disable_existing_loggers: false
|
||||
|
||||
@@ -1,558 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script reads environment variables and generates a shared Synapse worker,
|
||||
# nginx and supervisord configs depending on the workers requested.
|
||||
#
|
||||
# The environment variables it reads are:
|
||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers, or set to '*' for all possible workers.
|
||||
#
|
||||
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
|
||||
# in the project's README), this script may be run multiple times, and functionality should
|
||||
# continue to work if so.
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import jinja2
|
||||
import yaml
|
||||
|
||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
|
||||
|
||||
WORKERS_CONFIG = {
|
||||
"pusher": {
|
||||
"app": "synapse.app.pusher",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"start_pushers": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"user_dir": {
|
||||
"app": "synapse.app.user_dir",
|
||||
"listener_resources": ["client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$"
|
||||
],
|
||||
"shared_extra_conf": {"update_user_directory": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"media_repository": {
|
||||
"app": "synapse.app.media_repository",
|
||||
"listener_resources": ["media"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/media/",
|
||||
"^/_synapse/admin/v1/purge_media_cache$",
|
||||
"^/_synapse/admin/v1/room/.*/media.*$",
|
||||
"^/_synapse/admin/v1/user/.*/media.*$",
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
],
|
||||
"shared_extra_conf": {"enable_media_repo": False},
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
"appservice": {
|
||||
"app": "synapse.app.appservice",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"notify_appservices": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
"app": "synapse.app.federation_sender",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"send_federation": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"synchrotron": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(v2_alpha|r0)/sync$",
|
||||
"^/_matrix/client/(api/v1|v2_alpha|r0)/events$",
|
||||
"^/_matrix/client/(api/v1|r0)/initialSync$",
|
||||
"^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_reader": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["federation"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/federation/(v1|v2)/event/",
|
||||
"^/_matrix/federation/(v1|v2)/state/",
|
||||
"^/_matrix/federation/(v1|v2)/state_ids/",
|
||||
"^/_matrix/federation/(v1|v2)/backfill/",
|
||||
"^/_matrix/federation/(v1|v2)/get_missing_events/",
|
||||
"^/_matrix/federation/(v1|v2)/publicRooms",
|
||||
"^/_matrix/federation/(v1|v2)/query/",
|
||||
"^/_matrix/federation/(v1|v2)/make_join/",
|
||||
"^/_matrix/federation/(v1|v2)/make_leave/",
|
||||
"^/_matrix/federation/(v1|v2)/send_join/",
|
||||
"^/_matrix/federation/(v1|v2)/send_leave/",
|
||||
"^/_matrix/federation/(v1|v2)/invite/",
|
||||
"^/_matrix/federation/(v1|v2)/query_auth/",
|
||||
"^/_matrix/federation/(v1|v2)/event_auth/",
|
||||
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
|
||||
"^/_matrix/federation/(v1|v2)/user/devices/",
|
||||
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
|
||||
"^/_matrix/key/v2/query",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_inbound": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["federation"],
|
||||
"endpoint_patterns": ["/_matrix/federation/(v1|v2)/send/"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"event_persister": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["replication"],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"background_worker": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
# This worker cannot be sharded. Therefore there should only ever be one background
|
||||
# worker, and it should be named background_worker1
|
||||
"shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"event_creator": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact",
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/join/",
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/profile/",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"frontend_proxy": {
|
||||
"app": "synapse.app.frontend_proxy",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|unstable)/keys/upload"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": (
|
||||
"worker_main_http_uri: http://127.0.0.1:%d"
|
||||
% (MAIN_PROCESS_HTTP_LISTENER_PORT,),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
# Templates for sections that may be inserted multiple times in config files
|
||||
SUPERVISORD_PROCESS_CONFIG_BLOCK = """
|
||||
[program:synapse_{name}]
|
||||
command=/usr/local/bin/python -m {app} \
|
||||
--config-path="{config_path}" \
|
||||
--config-path=/conf/workers/shared.yaml \
|
||||
--config-path=/conf/workers/{name}.yaml
|
||||
autorestart=unexpected
|
||||
priority=500
|
||||
exitcodes=0
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
"""
|
||||
|
||||
NGINX_LOCATION_CONFIG_BLOCK = """
|
||||
location ~* {endpoint} {
|
||||
proxy_pass {upstream};
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
"""
|
||||
|
||||
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
||||
upstream {upstream_worker_type} {
|
||||
{body}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
# Utility functions
|
||||
def log(txt: str):
|
||||
"""Log something to the stdout.
|
||||
|
||||
Args:
|
||||
txt: The text to log.
|
||||
"""
|
||||
print(txt)
|
||||
|
||||
|
||||
def error(txt: str):
|
||||
"""Log something and exit with an error code.
|
||||
|
||||
Args:
|
||||
txt: The text to log in error.
|
||||
"""
|
||||
log(txt)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def convert(src: str, dst: str, **template_vars):
|
||||
"""Generate a file from a template
|
||||
|
||||
Args:
|
||||
src: Path to the input file.
|
||||
dst: Path to write to.
|
||||
template_vars: The arguments to replace placeholder variables in the template with.
|
||||
"""
|
||||
# Read the template file
|
||||
with open(src) as infile:
|
||||
template = infile.read()
|
||||
|
||||
# Generate a string from the template. We disable autoescape to prevent template
|
||||
# variables from being escaped.
|
||||
rendered = jinja2.Template(template, autoescape=False).render(**template_vars)
|
||||
|
||||
# Write the generated contents to a file
|
||||
#
|
||||
# We use append mode in case the files have already been written to by something else
|
||||
# (for instance, as part of the instructions in a dockerfile).
|
||||
with open(dst, "a") as outfile:
|
||||
# In case the existing file doesn't end with a newline
|
||||
outfile.write("\n")
|
||||
|
||||
outfile.write(rendered)
|
||||
|
||||
|
||||
def add_sharding_to_shared_config(
|
||||
shared_config: dict,
|
||||
worker_type: str,
|
||||
worker_name: str,
|
||||
worker_port: int,
|
||||
) -> None:
|
||||
"""Given a dictionary representing a config file shared across all workers,
|
||||
append sharded worker information to it for the current worker_type instance.
|
||||
|
||||
Args:
|
||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
||||
worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
|
||||
worker_name: The name of the worker instance.
|
||||
worker_port: The HTTP replication port that the worker instance is listening on.
|
||||
"""
|
||||
# The instance_map config field marks the workers that write to various replication streams
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
|
||||
# Worker-type specific sharding config
|
||||
if worker_type == "pusher":
|
||||
shared_config.setdefault("pusher_instances", []).append(worker_name)
|
||||
|
||||
elif worker_type == "federation_sender":
|
||||
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
|
||||
|
||||
elif worker_type == "event_persister":
|
||||
# Event persisters write to the events stream, so we need to update
|
||||
# the list of event stream writers
|
||||
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
|
||||
worker_name
|
||||
)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
elif worker_type == "media_repository":
|
||||
# The first configured media worker will run the media background jobs
|
||||
shared_config.setdefault("media_instance_running_background_jobs", worker_name)
|
||||
|
||||
|
||||
def generate_base_homeserver_config():
|
||||
"""Starts Synapse and generates a basic homeserver config, which will later be
|
||||
modified for worker support.
|
||||
|
||||
Raises: CalledProcessError if calling start.py returned a non-zero exit code.
|
||||
"""
|
||||
# start.py already does this for us, so just call that.
|
||||
# note that this script is copied in in the official, monolith dockerfile
|
||||
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
|
||||
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
|
||||
|
||||
|
||||
def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
"""Read the desired list of workers from environment variables and generate
|
||||
shared homeserver, nginx and supervisord configs.
|
||||
|
||||
Args:
|
||||
environ: _Environ[str]
|
||||
config_path: Where to output the generated Synapse main worker config file.
|
||||
data_dir: The location of the synapse data directory. Where log and
|
||||
user-facing config files live.
|
||||
"""
|
||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||
# into files at the correct indentation below.
|
||||
|
||||
# shared_config is the contents of a Synapse config file that will be shared amongst
|
||||
# the main Synapse process as well as all workers.
|
||||
# It is intended mainly for disabling functionality when certain workers are spun up,
|
||||
# and adding a replication listener.
|
||||
|
||||
# First read the original config file and extract the listeners block. Then we'll add
|
||||
# another listener for replication. Later we'll write out the result.
|
||||
listeners = [
|
||||
{
|
||||
"port": 9093,
|
||||
"bind_address": "127.0.0.1",
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
with open(config_path) as file_stream:
|
||||
original_config = yaml.safe_load(file_stream)
|
||||
original_listeners = original_config.get("listeners")
|
||||
if original_listeners:
|
||||
listeners += original_listeners
|
||||
|
||||
# The shared homeserver config. The contents of which will be inserted into the
|
||||
# base shared worker jinja2 template.
|
||||
#
|
||||
# This config file will be passed to all workers, included Synapse's main process.
|
||||
shared_config = {"listeners": listeners}
|
||||
|
||||
# The supervisord config. The contents of which will be inserted into the
|
||||
# base supervisord jinja2 template.
|
||||
#
|
||||
# Supervisord will be in charge of running everything, from redis to nginx to Synapse
|
||||
# and all of its worker processes. Load the config template, which defines a few
|
||||
# services that are necessary to run.
|
||||
supervisord_config = ""
|
||||
|
||||
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
|
||||
# ports of each worker. For example:
|
||||
# {
|
||||
# worker_type: {1234, 1235, ...}}
|
||||
# }
|
||||
# and will be used to construct 'upstream' nginx directives.
|
||||
nginx_upstreams = {}
|
||||
|
||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
|
||||
# placed after the proxy_pass directive. The main benefit to representing this data as a
|
||||
# dict over a str is that we can easily deduplicate endpoints across multiple instances
|
||||
# of the same worker.
|
||||
#
|
||||
# An nginx site config that will be amended to depending on the workers that are
|
||||
# spun up. To be placed in /etc/nginx/conf.d.
|
||||
nginx_locations = {}
|
||||
|
||||
# Read the desired worker configuration from the environment
|
||||
worker_types = environ.get("SYNAPSE_WORKER_TYPES")
|
||||
if worker_types is None:
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
else:
|
||||
# Split type names by comma
|
||||
worker_types = worker_types.split(",")
|
||||
|
||||
# Create the worker configuration directory if it doesn't already exist
|
||||
os.makedirs("/conf/workers", exist_ok=True)
|
||||
|
||||
# Start worker ports from this arbitrary port
|
||||
worker_port = 18009
|
||||
|
||||
# A counter of worker_type -> int. Used for determining the name for a given
|
||||
# worker type when generating its config file, as each worker's name is just
|
||||
# worker_type + instance #
|
||||
worker_type_counter = {}
|
||||
|
||||
# For each worker type specified by the user, create config values
|
||||
for worker_type in worker_types:
|
||||
worker_type = worker_type.strip()
|
||||
|
||||
worker_config = WORKERS_CONFIG.get(worker_type)
|
||||
if worker_config:
|
||||
worker_config = worker_config.copy()
|
||||
else:
|
||||
log(worker_type + " is an unknown worker type! It will be ignored")
|
||||
continue
|
||||
|
||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
||||
worker_type_counter[worker_type] = new_worker_count
|
||||
|
||||
# Name workers by their type concatenated with an incrementing number
|
||||
# e.g. federation_reader1
|
||||
worker_name = worker_type + str(new_worker_count)
|
||||
worker_config.update(
|
||||
{"name": worker_name, "port": worker_port, "config_path": config_path}
|
||||
)
|
||||
|
||||
# Update the shared config with any worker-type specific options
|
||||
shared_config.update(worker_config["shared_extra_conf"])
|
||||
|
||||
# Check if more than one instance of this worker type has been specified
|
||||
worker_type_total_count = worker_types.count(worker_type)
|
||||
if worker_type_total_count > 1:
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_sharding_to_shared_config(
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
)
|
||||
|
||||
# Enable the worker in supervisord
|
||||
supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config)
|
||||
|
||||
# Add nginx location blocks for this worker's endpoints (if any are defined)
|
||||
for pattern in worker_config["endpoint_patterns"]:
|
||||
# Determine whether we need to load-balance this worker
|
||||
if worker_type_total_count > 1:
|
||||
# Create or add to a load-balanced upstream for this worker
|
||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
||||
|
||||
# Upstreams are named after the worker_type
|
||||
upstream = "http://" + worker_type
|
||||
else:
|
||||
upstream = "http://localhost:%d" % (worker_port,)
|
||||
|
||||
# Note that this endpoint should proxy to this upstream
|
||||
nginx_locations[pattern] = upstream
|
||||
|
||||
# Write out the worker's logging config file
|
||||
|
||||
# Check whether we should write worker logs to disk, in addition to the console
|
||||
extra_log_template_args = {}
|
||||
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
|
||||
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
|
||||
dir=data_dir, name=worker_name
|
||||
)
|
||||
|
||||
# Render and write the file
|
||||
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
|
||||
convert(
|
||||
"/conf/log.config",
|
||||
log_config_filepath,
|
||||
worker_name=worker_name,
|
||||
**extra_log_template_args,
|
||||
)
|
||||
|
||||
# Then a worker config file
|
||||
convert(
|
||||
"/conf/worker.yaml.j2",
|
||||
"/conf/workers/{name}.yaml".format(name=worker_name),
|
||||
**worker_config,
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
)
|
||||
|
||||
worker_port += 1
|
||||
|
||||
# Build the nginx location config blocks
|
||||
nginx_location_config = ""
|
||||
for endpoint, upstream in nginx_locations.items():
|
||||
nginx_location_config += NGINX_LOCATION_CONFIG_BLOCK.format(
|
||||
endpoint=endpoint,
|
||||
upstream=upstream,
|
||||
)
|
||||
|
||||
# Determine the load-balancing upstreams to configure
|
||||
nginx_upstream_config = ""
|
||||
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += " server localhost:%d;\n" % (port,)
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
upstream_worker_type=upstream_worker_type,
|
||||
body=body,
|
||||
)
|
||||
|
||||
# Finally, we'll write out the config files.
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
"/conf/shared.yaml.j2",
|
||||
"/conf/workers/shared.yaml",
|
||||
shared_worker_config=yaml.dump(shared_config),
|
||||
)
|
||||
|
||||
# Nginx config
|
||||
convert(
|
||||
"/conf/nginx.conf.j2",
|
||||
"/etc/nginx/conf.d/matrix-synapse.conf",
|
||||
worker_locations=nginx_location_config,
|
||||
upstream_directives=nginx_upstream_config,
|
||||
)
|
||||
|
||||
# Supervisord config
|
||||
convert(
|
||||
"/conf/supervisord.conf.j2",
|
||||
"/etc/supervisor/conf.d/supervisord.conf",
|
||||
main_config_path=config_path,
|
||||
worker_config=supervisord_config,
|
||||
)
|
||||
|
||||
# Ensure the logging directory exists
|
||||
log_dir = data_dir + "/logs"
|
||||
if not os.path.exists(log_dir):
|
||||
os.mkdir(log_dir)
|
||||
|
||||
|
||||
def start_supervisord():
|
||||
"""Starts up supervisord which then starts and monitors all other necessary processes
|
||||
|
||||
Raises: CalledProcessError if calling start.py return a non-zero exit code.
|
||||
"""
|
||||
subprocess.run(["/usr/bin/supervisord"], stdin=subprocess.PIPE)
|
||||
|
||||
|
||||
def main(args, environ):
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
|
||||
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
|
||||
|
||||
# override SYNAPSE_NO_TLS, we don't support TLS in worker mode,
|
||||
# this needs to be handled by a frontend proxy
|
||||
environ["SYNAPSE_NO_TLS"] = "yes"
|
||||
|
||||
# Generate the base homeserver config if one does not yet exist
|
||||
if not os.path.exists(config_path):
|
||||
log("Generating base homeserver config")
|
||||
generate_base_homeserver_config()
|
||||
|
||||
# This script may be run multiple times (mostly by Complement, see note at top of file).
|
||||
# Don't re-configure workers in this instance.
|
||||
mark_filepath = "/conf/workers_have_been_configured"
|
||||
if not os.path.exists(mark_filepath):
|
||||
# Always regenerate all other config files
|
||||
generate_worker_files(environ, config_path, data_dir)
|
||||
|
||||
# Mark workers as being configured
|
||||
with open(mark_filepath, "w") as f:
|
||||
f.write("")
|
||||
|
||||
# Start supervisord, which will start Synapse, all of the configured worker
|
||||
# processes, redis, nginx etc. according to the config we created above.
|
||||
start_supervisord()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv, os.environ)
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
# This script runs the PostgreSQL tests inside a Docker container. It expects
|
||||
# the relevant source files to be mounted into /src (done automatically by the
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
import codecs
|
||||
import glob
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@@ -214,13 +213,6 @@ def main(args, environ):
|
||||
if "-m" not in args:
|
||||
args = ["-m", synapse_worker] + args
|
||||
|
||||
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
|
||||
|
||||
if os.path.isfile(jemallocpath):
|
||||
environ["LD_PRELOAD"] = jemallocpath
|
||||
else:
|
||||
log("Could not find %s, will not use" % (jemallocpath,))
|
||||
|
||||
# if there are no config files passed to synapse, try adding the default file
|
||||
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
|
||||
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
||||
@@ -256,9 +248,9 @@ running with 'migrate_config'. See the README for more details.
|
||||
args = ["python"] + args
|
||||
if ownership is not None:
|
||||
args = ["gosu", ownership] + args
|
||||
os.execve("/usr/sbin/gosu", args, environ)
|
||||
os.execv("/usr/sbin/gosu", args)
|
||||
else:
|
||||
os.execve("/usr/local/bin/python", args, environ)
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,25 +1,6 @@
|
||||
# Contents
|
||||
- [Querying media](#querying-media)
|
||||
* [List all media in a room](#list-all-media-in-a-room)
|
||||
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
|
||||
- [Quarantine media](#quarantine-media)
|
||||
* [Quarantining media by ID](#quarantining-media-by-id)
|
||||
* [Quarantining media in a room](#quarantining-media-in-a-room)
|
||||
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
|
||||
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
|
||||
- [Delete local media](#delete-local-media)
|
||||
* [Delete a specific local media](#delete-a-specific-local-media)
|
||||
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
||||
- [Purge Remote Media API](#purge-remote-media-api)
|
||||
|
||||
# Querying media
|
||||
|
||||
These APIs allow extracting media information from the homeserver.
|
||||
|
||||
## List all media in a room
|
||||
# List all media in a room
|
||||
|
||||
This API gets a list of known media in a room.
|
||||
However, it only shows media from unencrypted events or rooms.
|
||||
|
||||
The API is:
|
||||
```
|
||||
@@ -29,25 +10,19 @@ To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
|
||||
The API returns a JSON body like the following:
|
||||
```json
|
||||
```
|
||||
{
|
||||
"local": [
|
||||
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
|
||||
"mxc://localhost/abcdefghijklmnopqrstuvwx"
|
||||
],
|
||||
"remote": [
|
||||
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
|
||||
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
|
||||
]
|
||||
"local": [
|
||||
"mxc://localhost/xwvutsrqponmlkjihgfedcba",
|
||||
"mxc://localhost/abcdefghijklmnopqrstuvwx"
|
||||
],
|
||||
"remote": [
|
||||
"mxc://matrix.org/xwvutsrqponmlkjihgfedcba",
|
||||
"mxc://matrix.org/abcdefghijklmnopqrstuvwx"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## List all media uploaded by a user
|
||||
|
||||
Listing all media that has been uploaded by a local user can be achieved through
|
||||
the use of the [List media of a user](user_admin_api.rst#list-media-of-a-user)
|
||||
Admin API.
|
||||
|
||||
# Quarantine media
|
||||
|
||||
Quarantining media means that it is marked as inaccessible by users. It applies
|
||||
@@ -72,7 +47,7 @@ form of `abcdefg12345...`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{}
|
||||
```
|
||||
|
||||
@@ -92,18 +67,14 @@ Where `room_id` is in the form of `!roomid12345:example.org`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"num_quarantined": 10
|
||||
"num_quarantined": 10 # The number of media items successfully quarantined
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `num_quarantined`: integer - The number of media items successfully quarantined
|
||||
|
||||
Note that there is a legacy endpoint, `POST
|
||||
/_synapse/admin/v1/quarantine_media/<room_id>`, that operates the same.
|
||||
/_synapse/admin/v1/quarantine_media/<room_id >`, that operates the same.
|
||||
However, it is deprecated and may be removed in a future release.
|
||||
|
||||
## Quarantining all media of a user
|
||||
@@ -120,52 +91,23 @@ POST /_synapse/admin/v1/user/<user_id>/media/quarantine
|
||||
{}
|
||||
```
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `user_id`: string - User ID in the form of `@bob:example.org`
|
||||
Where `user_id` is in the form of `@bob:example.org`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"num_quarantined": 10
|
||||
"num_quarantined": 10 # The number of media items successfully quarantined
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `num_quarantined`: integer - The number of media items successfully quarantined
|
||||
|
||||
## Protecting media from being quarantined
|
||||
|
||||
This API protects a single piece of local media from being quarantined using the
|
||||
above APIs. This is useful for sticker packs and other shared media which you do
|
||||
not want to get quarantined, especially when
|
||||
[quarantining media in a room](#quarantining-media-in-a-room).
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/media/protect/<media_id>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Where `media_id` is in the form of `abcdefg12345...`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{}
|
||||
```
|
||||
|
||||
# Delete local media
|
||||
This API deletes the *local* media from the disk of your own server.
|
||||
This includes any local thumbnails and copies of media downloaded from
|
||||
remote homeservers.
|
||||
This API will not affect media that has been uploaded to external
|
||||
media repositories (e.g https://github.com/turt2live/matrix-media-repo/).
|
||||
See also [Purge Remote Media API](#purge-remote-media-api).
|
||||
See also [purge_remote_media.rst](purge_remote_media.rst).
|
||||
|
||||
## Delete a specific local media
|
||||
Delete a specific `media_id`.
|
||||
@@ -186,12 +128,12 @@ URL Parameters
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"deleted_media": [
|
||||
"abcdefghijklmnopqrstuvwx"
|
||||
],
|
||||
"total": 1
|
||||
}
|
||||
{
|
||||
"deleted_media": [
|
||||
"abcdefghijklmnopqrstuvwx"
|
||||
],
|
||||
"total": 1
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
@@ -224,51 +166,16 @@ If `false` these files will be deleted. Defaults to `true`.
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"deleted_media": [
|
||||
"abcdefghijklmnopqrstuvwx",
|
||||
"abcdefghijklmnopqrstuvwz"
|
||||
],
|
||||
"total": 2
|
||||
}
|
||||
{
|
||||
"deleted_media": [
|
||||
"abcdefghijklmnopqrstuvwx",
|
||||
"abcdefghijklmnopqrstuvwz"
|
||||
],
|
||||
"total": 2
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `deleted_media`: an array of strings - List of deleted `media_id`
|
||||
* `total`: integer - Total number of deleted `media_id`
|
||||
|
||||
# Purge Remote Media API
|
||||
|
||||
The purge remote media API allows server admins to purge old cached remote media.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in ms.
|
||||
All cached media that was last accessed before this timestamp will be removed.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"deleted": 10
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `deleted`: integer - The number of media items successfully deleted
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
|
||||
If the user re-requests purged remote media, synapse will re-request the media
|
||||
from the originating server.
|
||||
|
||||
20
docs/admin_api/purge_remote_media.rst
Normal file
20
docs/admin_api/purge_remote_media.rst
Normal file
@@ -0,0 +1,20 @@
|
||||
Purge Remote Media API
|
||||
======================
|
||||
|
||||
The purge remote media API allows server admins to purge old cached remote
|
||||
media.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
|
||||
|
||||
{}
|
||||
|
||||
\... which will remove all cached media that was last accessed before
|
||||
``<unix_timestamp_in_ms>``.
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
If the user re-requests purged remote media, synapse will re-request the media
|
||||
from the originating server.
|
||||
@@ -1,13 +1,12 @@
|
||||
Deprecated: Purge room API
|
||||
==========================
|
||||
|
||||
**The old Purge room API is deprecated and will be removed in a future release.
|
||||
See the new [Delete Room API](rooms.md#delete-room-api) for more details.**
|
||||
Purge room API
|
||||
==============
|
||||
|
||||
This API will remove all trace of a room from your database.
|
||||
|
||||
All local users must have left the room before it can be removed.
|
||||
|
||||
See also: [Delete Room API](rooms.md#delete-room-api)
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
# Contents
|
||||
- [List Room API](#list-room-api)
|
||||
* [Parameters](#parameters)
|
||||
* [Usage](#usage)
|
||||
- [Room Details API](#room-details-api)
|
||||
- [Room Members API](#room-members-api)
|
||||
- [Delete Room API](#delete-room-api)
|
||||
* [Parameters](#parameters-1)
|
||||
* [Response](#response)
|
||||
* [Undoing room shutdowns](#undoing-room-shutdowns)
|
||||
- [Make Room Admin API](#make-room-admin-api)
|
||||
- [Forward Extremities Admin API](#forward-extremities-admin-api)
|
||||
- [Event Context API](#event-context-api)
|
||||
|
||||
# List Room API
|
||||
|
||||
The List Room admin API allows server admins to get a list of rooms on their
|
||||
@@ -90,7 +76,7 @@ GET /_synapse/admin/v1/rooms
|
||||
|
||||
Response:
|
||||
|
||||
```jsonc
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
@@ -142,7 +128,7 @@ GET /_synapse/admin/v1/rooms?search_term=TWIM
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
@@ -177,7 +163,7 @@ GET /_synapse/admin/v1/rooms?order_by=size
|
||||
|
||||
Response:
|
||||
|
||||
```jsonc
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
@@ -233,14 +219,14 @@ GET /_synapse/admin/v1/rooms?order_by=size&from=100
|
||||
|
||||
Response:
|
||||
|
||||
```jsonc
|
||||
```
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
"room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
|
||||
"name": "Music Theory",
|
||||
"canonical_alias": "#musictheory:matrix.org",
|
||||
"joined_members": 127,
|
||||
"joined_members": 127
|
||||
"joined_local_members": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
@@ -257,7 +243,7 @@ Response:
|
||||
"room_id": "!twcBhHVdZlQWuuxBhN:termina.org.uk",
|
||||
"name": "weechat-matrix",
|
||||
"canonical_alias": "#weechat-matrix:termina.org.uk",
|
||||
"joined_members": 137,
|
||||
"joined_members": 137
|
||||
"joined_local_members": 20,
|
||||
"version": "4",
|
||||
"creator": "@foo:termina.org.uk",
|
||||
@@ -292,7 +278,6 @@ The following fields are possible in the JSON response body:
|
||||
* `canonical_alias` - The canonical (main) alias address of the room.
|
||||
* `joined_members` - How many users are currently in the room.
|
||||
* `joined_local_members` - How many local users are currently in the room.
|
||||
* `joined_local_devices` - How many local devices are currently in the room.
|
||||
* `version` - The version of the room as a string.
|
||||
* `creator` - The `user_id` of the room creator.
|
||||
* `encryption` - Algorithm of end-to-end encryption of messages. Is `null` if encryption is not active.
|
||||
@@ -315,16 +300,15 @@ GET /_synapse/admin/v1/rooms/<room_id>
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"room_id": "!mscvqgqpHYjBGDxNym:matrix.org",
|
||||
"name": "Music Theory",
|
||||
"avatar": "mxc://matrix.org/AQDaVFlbkQoErdOgqWRgiGSV",
|
||||
"topic": "Theory, Composition, Notation, Analysis",
|
||||
"canonical_alias": "#musictheory:matrix.org",
|
||||
"joined_members": 127,
|
||||
"joined_members": 127
|
||||
"joined_local_members": 2,
|
||||
"joined_local_devices": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": null,
|
||||
@@ -358,51 +342,23 @@ GET /_synapse/admin/v1/rooms/<room_id>/members
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"members": [
|
||||
"@foo:matrix.org",
|
||||
"@bar:matrix.org",
|
||||
"@foobar:matrix.org"
|
||||
],
|
||||
"@foobar:matrix.org
|
||||
],
|
||||
"total": 3
|
||||
}
|
||||
```
|
||||
|
||||
# Room State API
|
||||
|
||||
The Room State admin API allows server admins to get a list of all state events in a room.
|
||||
|
||||
The response includes the following fields:
|
||||
|
||||
* `state` - The current state of the room at the time of request.
|
||||
|
||||
## Usage
|
||||
|
||||
A standard request:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/state
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"state": [
|
||||
{"type": "m.room.create", "state_key": "", "etc": true},
|
||||
{"type": "m.room.power_levels", "state_key": "", "etc": true},
|
||||
{"type": "m.room.name", "state_key": "", "etc": true}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# Delete Room API
|
||||
|
||||
The Delete Room admin API allows server admins to remove rooms from server
|
||||
and block these rooms.
|
||||
It is a combination and improvement of "[Shutdown room](shutdown_room.md)"
|
||||
and "[Purge room](purge_room.md)" API.
|
||||
|
||||
Shuts down a room. Moves all local users and room aliases automatically to a
|
||||
new room if `new_room_user_id` is set. Otherwise local users only
|
||||
@@ -426,7 +382,7 @@ the new room. Users on other servers will be unaffected.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
```json
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
```
|
||||
|
||||
@@ -483,10 +439,6 @@ The following JSON body parameters are available:
|
||||
future attempts to join the room. Defaults to `false`.
|
||||
* `purge` - Optional. If set to `true`, it will remove all traces of the room from your database.
|
||||
Defaults to `true`.
|
||||
* `force_purge` - Optional, and ignored unless `purge` is `true`. If set to `true`, it
|
||||
will force a purge to go ahead even if there are local users still in the room. Do not
|
||||
use this unless a regular `purge` operation fails, as it could leave those users'
|
||||
clients in a confused state.
|
||||
|
||||
The JSON body must not be empty. The body must be at least `{}`.
|
||||
|
||||
@@ -499,217 +451,3 @@ The following fields are returned in the JSON response body:
|
||||
* `local_aliases` - An array of strings representing the local aliases that were migrated from
|
||||
the old room to the new.
|
||||
* `new_room_id` - A string representing the room ID of the new room.
|
||||
|
||||
|
||||
## Undoing room shutdowns
|
||||
|
||||
*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
|
||||
the structure can and does change without notice.
|
||||
|
||||
First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
|
||||
never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
|
||||
to recover at all:
|
||||
|
||||
* If the room was invite-only, your users will need to be re-invited.
|
||||
* If the room no longer has any members at all, it'll be impossible to rejoin.
|
||||
* The first user to rejoin will have to do so via an alias on a different server.
|
||||
|
||||
With all that being said, if you still want to try and recover the room:
|
||||
|
||||
1. For safety reasons, shut down Synapse.
|
||||
2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
|
||||
* For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
|
||||
* The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
|
||||
3. Restart Synapse.
|
||||
|
||||
You will have to manually handle, if you so choose, the following:
|
||||
|
||||
* Aliases that would have been redirected to the Content Violation room.
|
||||
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
||||
* Removal of the Content Violation room if desired.
|
||||
|
||||
|
||||
# Make Room Admin API
|
||||
|
||||
Grants another user the highest power available to a local user who is in the room.
|
||||
If the user is not in the room, and it is not publicly joinable, then invite the user.
|
||||
|
||||
By default the server admin (the caller) is granted power, but another user can
|
||||
optionally be specified, e.g.:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
|
||||
{
|
||||
"user_id": "@foo:example.com"
|
||||
}
|
||||
```
|
||||
|
||||
# Forward Extremities Admin API
|
||||
|
||||
Enables querying and deleting forward extremities from rooms. When a lot of forward
|
||||
extremities accumulate in a room, performance can become degraded. For details, see
|
||||
[#1760](https://github.com/matrix-org/synapse/issues/1760).
|
||||
|
||||
## Check for forward extremities
|
||||
|
||||
To check the status of forward extremities for a room:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
|
||||
```
|
||||
|
||||
A response as follows will be returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"count": 1,
|
||||
"results": [
|
||||
{
|
||||
"event_id": "$M5SP266vsnxctfwFgFLNceaCo3ujhRtg_NiiHabcdefgh",
|
||||
"state_group": 439,
|
||||
"depth": 123,
|
||||
"received_ts": 1611263016761
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Deleting forward extremities
|
||||
|
||||
**WARNING**: Please ensure you know what you're doing and have read
|
||||
the related issue [#1760](https://github.com/matrix-org/synapse/issues/1760).
|
||||
Under no situations should this API be executed as an automated maintenance task!
|
||||
|
||||
If a room has lots of forward extremities, the extra can be
|
||||
deleted as follows:
|
||||
|
||||
```
|
||||
DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
|
||||
```
|
||||
|
||||
A response as follows will be returned, indicating the amount of forward extremities
|
||||
that were deleted.
|
||||
|
||||
```json
|
||||
{
|
||||
"deleted": 1
|
||||
}
|
||||
```
|
||||
|
||||
# Event Context API
|
||||
|
||||
This API lets a client find the context of an event. This is designed primarily to investigate abuse reports.
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/context/<event_id>
|
||||
```
|
||||
|
||||
This API mimmicks [GET /_matrix/client/r0/rooms/{roomId}/context/{eventId}](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-rooms-roomid-context-eventid). Please refer to the link for all details on parameters and reseponse.
|
||||
|
||||
Example response:
|
||||
|
||||
```json
|
||||
{
|
||||
"end": "t29-57_2_0_2",
|
||||
"events_after": [
|
||||
{
|
||||
"content": {
|
||||
"body": "This is an example text message",
|
||||
"msgtype": "m.text",
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<b>This is an example text message</b>"
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
}
|
||||
],
|
||||
"event": {
|
||||
"content": {
|
||||
"body": "filename.jpg",
|
||||
"info": {
|
||||
"h": 398,
|
||||
"w": 394,
|
||||
"mimetype": "image/jpeg",
|
||||
"size": 31037
|
||||
},
|
||||
"url": "mxc://example.org/JWEIFJgwEIhweiWJE",
|
||||
"msgtype": "m.image"
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"event_id": "$f3h4d129462ha:example.com",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
"events_before": [
|
||||
{
|
||||
"content": {
|
||||
"body": "something-important.doc",
|
||||
"filename": "something-important.doc",
|
||||
"info": {
|
||||
"mimetype": "application/msword",
|
||||
"size": 46144
|
||||
},
|
||||
"msgtype": "m.file",
|
||||
"url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe"
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
}
|
||||
],
|
||||
"start": "t27-54_2_0_2",
|
||||
"state": [
|
||||
{
|
||||
"content": {
|
||||
"creator": "@example:example.org",
|
||||
"room_version": "1",
|
||||
"m.federate": true,
|
||||
"predecessor": {
|
||||
"event_id": "$something:example.org",
|
||||
"room_id": "!oldroom:example.org"
|
||||
}
|
||||
},
|
||||
"type": "m.room.create",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
},
|
||||
"state_key": ""
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"membership": "join",
|
||||
"avatar_url": "mxc://example.org/SEsfnsuifSDFSSEF",
|
||||
"displayname": "Alice Margatroid"
|
||||
},
|
||||
"type": "m.room.member",
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
},
|
||||
"state_key": "@alice:example.org"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
# Deprecated: Shutdown room API
|
||||
|
||||
**The old Shutdown room API is deprecated and will be removed in a future release.
|
||||
See the new [Delete Room API](rooms.md#delete-room-api) for more details.**
|
||||
# Shutdown room API
|
||||
|
||||
Shuts down a room, preventing new joins and moves local users and room aliases automatically
|
||||
to a new room. The new room will be created with the user specified by the
|
||||
@@ -13,6 +10,8 @@ disallow any further invites or joins.
|
||||
The local server will only have the power to move local user and room aliases to
|
||||
the new room. Users on other servers will be unaffected.
|
||||
|
||||
See also: [Delete Room API](rooms.md#delete-room-api)
|
||||
|
||||
## API
|
||||
|
||||
You will need to authenticate with an access token for an admin user.
|
||||
|
||||
@@ -29,14 +29,8 @@ It returns a JSON body like the following:
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": 0,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"password_hash": "$2b$12$p9B4GkqYdRTPGD",
|
||||
"creation_ts": 1560432506,
|
||||
"appservice_id": null,
|
||||
"consent_server_notice_sent": null,
|
||||
"consent_version": null
|
||||
"admin": false,
|
||||
"deactivated": false
|
||||
}
|
||||
|
||||
URL parameters:
|
||||
@@ -99,8 +93,6 @@ Body parameters:
|
||||
|
||||
- ``deactivated``, optional. If unspecified, deactivation state will be left
|
||||
unchanged on existing accounts and set to ``false`` for new accounts.
|
||||
A user cannot be erased by deactivating with this API. For details on deactivating users see
|
||||
`Deactivate Account <#deactivate-account>`_.
|
||||
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
|
||||
@@ -111,16 +103,35 @@ List Accounts
|
||||
=============
|
||||
|
||||
This API returns all local user accounts.
|
||||
By default, the response is ordered by ascending user ID.
|
||||
|
||||
The API is::
|
||||
The api is::
|
||||
|
||||
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
The parameter ``from`` is optional but used for pagination, denoting the
|
||||
offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token``
|
||||
from a previous call.
|
||||
|
||||
The parameter ``limit`` is optional but is used for pagination, denoting the
|
||||
maximum number of items to return in this call. Defaults to ``100``.
|
||||
|
||||
The parameter ``user_id`` is optional and filters to only return users with user IDs
|
||||
that contain this value. This parameter is ignored when using the ``name`` parameter.
|
||||
|
||||
The parameter ``name`` is optional and filters to only return users with user ID localparts
|
||||
**or** displaynames that contain this value.
|
||||
|
||||
The parameter ``guests`` is optional and if ``false`` will **exclude** guest users.
|
||||
Defaults to ``true`` to include guest users.
|
||||
|
||||
The parameter ``deactivated`` is optional and if ``true`` will **include** deactivated users.
|
||||
Defaults to ``false`` to exclude deactivated users.
|
||||
|
||||
A JSON body is returned with the following shape:
|
||||
|
||||
.. code:: json
|
||||
|
||||
@@ -128,20 +139,20 @@ A response body like the following is returned:
|
||||
"users": [
|
||||
{
|
||||
"name": "<user_id1>",
|
||||
"password_hash": "<password_hash1>",
|
||||
"is_guest": 0,
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null
|
||||
}, {
|
||||
"name": "<user_id2>",
|
||||
"password_hash": "<password_hash2>",
|
||||
"is_guest": 0,
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>"
|
||||
}
|
||||
@@ -156,66 +167,6 @@ with ``from`` set to the value of ``next_token``. This will return a new page.
|
||||
If the endpoint does not return a ``next_token`` then there are no more users
|
||||
to paginate through.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - Is optional and filters to only return users with user IDs
|
||||
that contain this value. This parameter is ignored when using the ``name`` parameter.
|
||||
- ``name`` - Is optional and filters to only return users with user ID localparts
|
||||
**or** displaynames that contain this value.
|
||||
- ``guests`` - string representing a bool - Is optional and if ``false`` will **exclude** guest users.
|
||||
Defaults to ``true`` to include guest users.
|
||||
- ``deactivated`` - string representing a bool - Is optional and if ``true`` will **include** deactivated users.
|
||||
Defaults to ``false`` to exclude deactivated users.
|
||||
- ``limit`` - string representing a positive integer - Is optional but is used for pagination,
|
||||
denoting the maximum number of items to return in this call. Defaults to ``100``.
|
||||
- ``from`` - string representing a positive integer - Is optional but used for pagination,
|
||||
denoting the offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
|
||||
Defaults to ``0``.
|
||||
- ``order_by`` - The method by which to sort the returned list of users.
|
||||
If the ordered field has duplicates, the second order is always by ascending ``name``,
|
||||
which guarantees a stable ordering. Valid values are:
|
||||
|
||||
- ``name`` - Users are ordered alphabetically by ``name``. This is the default.
|
||||
- ``is_guest`` - Users are ordered by ``is_guest`` status.
|
||||
- ``admin`` - Users are ordered by ``admin`` status.
|
||||
- ``user_type`` - Users are ordered alphabetically by ``user_type``.
|
||||
- ``deactivated`` - Users are ordered by ``deactivated`` status.
|
||||
- ``shadow_banned`` - Users are ordered by ``shadow_banned`` status.
|
||||
- ``displayname`` - Users are ordered alphabetically by ``displayname``.
|
||||
- ``avatar_url`` - Users are ordered alphabetically by avatar URL.
|
||||
|
||||
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
|
||||
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
|
||||
|
||||
Caution. The database only has indexes on the columns ``name`` and ``created_ts``.
|
||||
This means that if a different sort order is used (``is_guest``, ``admin``,
|
||||
``user_type``, ``deactivated``, ``shadow_banned``, ``avatar_url`` or ``displayname``),
|
||||
this can cause a large load on the database, especially for large environments.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``users`` - An array of objects, each containing information about an user.
|
||||
User objects contain the following fields:
|
||||
|
||||
- ``name`` - string - Fully-qualified user ID (ex. ``@user:server.com``).
|
||||
- ``is_guest`` - bool - Status if that user is a guest account.
|
||||
- ``admin`` - bool - Status if that user is a server administrator.
|
||||
- ``user_type`` - string - Type of the user. Normal users are type ``None``.
|
||||
This allows user type specific behaviour. There are also types ``support`` and ``bot``.
|
||||
- ``deactivated`` - bool - Status if that user has been marked as deactivated.
|
||||
- ``shadow_banned`` - bool - Status if that user has been marked as shadow banned.
|
||||
- ``displayname`` - string - The user's display name if they have set one.
|
||||
- ``avatar_url`` - string - The user's avatar URL if they have set one.
|
||||
|
||||
- ``next_token``: string representing a positive integer - Indication for pagination. See above.
|
||||
- ``total`` - integer - Total number of media.
|
||||
|
||||
|
||||
Query current sessions for a user
|
||||
=================================
|
||||
|
||||
@@ -225,13 +176,6 @@ The api is::
|
||||
|
||||
GET /_synapse/admin/v1/whois/<user_id>
|
||||
|
||||
and::
|
||||
|
||||
GET /_matrix/client/r0/admin/whois/<userId>
|
||||
|
||||
See also: `Client Server API Whois
|
||||
<https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
@@ -294,25 +238,6 @@ server admin: see `README.rst <README.rst>`_.
|
||||
The erase parameter is optional and defaults to ``false``.
|
||||
An empty body may be passed for backwards compatibility.
|
||||
|
||||
The following actions are performed when deactivating an user:
|
||||
|
||||
- Try to unpind 3PIDs from the identity server
|
||||
- Remove all 3PIDs from the homeserver
|
||||
- Delete all devices and E2EE keys
|
||||
- Delete all access tokens
|
||||
- Delete the password hash
|
||||
- Removal from all rooms the user is a member of
|
||||
- Remove the user from the user directory
|
||||
- Reject all pending invites
|
||||
- Remove all account validity information related to the user
|
||||
|
||||
The following additional actions are performed during deactivation if ``erase``
|
||||
is set to ``true``:
|
||||
|
||||
- Remove the user's display name
|
||||
- Remove the user's avatar URL
|
||||
- Mark the user as erased
|
||||
|
||||
|
||||
Reset password
|
||||
==============
|
||||
@@ -329,7 +254,7 @@ with a body of:
|
||||
|
||||
{
|
||||
"new_password": "<secret>",
|
||||
"logout_devices": true
|
||||
"logout_devices": true,
|
||||
}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
@@ -402,10 +327,6 @@ A response body like the following is returned:
|
||||
"total": 2
|
||||
}
|
||||
|
||||
The server returns the list of rooms of which the user and the server
|
||||
are member. If the user is local, all the rooms of which the user is
|
||||
member are returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
@@ -420,12 +341,11 @@ The following fields are returned in the JSON response body:
|
||||
- ``total`` - Number of rooms.
|
||||
|
||||
|
||||
List media of a user
|
||||
====================
|
||||
List media of an user
|
||||
================================
|
||||
Gets a list of all local media that a specific ``user_id`` has created.
|
||||
By default, the response is ordered by descending creation date and ascending media ID.
|
||||
The newest media is on top. You can change the order with parameters
|
||||
``order_by`` and ``dir``.
|
||||
The response is ordered by creation date descending and media ID descending.
|
||||
The newest media is on top.
|
||||
|
||||
The API is::
|
||||
|
||||
@@ -482,35 +402,6 @@ The following parameters should be set in the URL:
|
||||
denoting the offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
|
||||
Defaults to ``0``.
|
||||
- ``order_by`` - The method by which to sort the returned list of media.
|
||||
If the ordered field has duplicates, the second order is always by ascending ``media_id``,
|
||||
which guarantees a stable ordering. Valid values are:
|
||||
|
||||
- ``media_id`` - Media are ordered alphabetically by ``media_id``.
|
||||
- ``upload_name`` - Media are ordered alphabetically by name the media was uploaded with.
|
||||
- ``created_ts`` - Media are ordered by when the content was uploaded in ms.
|
||||
Smallest to largest. This is the default.
|
||||
- ``last_access_ts`` - Media are ordered by when the content was last accessed in ms.
|
||||
Smallest to largest.
|
||||
- ``media_length`` - Media are ordered by length of the media in bytes.
|
||||
Smallest to largest.
|
||||
- ``media_type`` - Media are ordered alphabetically by MIME-type.
|
||||
- ``quarantined_by`` - Media are ordered alphabetically by the user ID that
|
||||
initiated the quarantine request for this media.
|
||||
- ``safe_from_quarantine`` - Media are ordered by the status if this media is safe
|
||||
from quarantining.
|
||||
|
||||
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
|
||||
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
|
||||
|
||||
If neither ``order_by`` nor ``dir`` is set, the default order is newest media on top
|
||||
(corresponds to ``order_by`` = ``created_ts`` and ``dir`` = ``b``).
|
||||
|
||||
Caution. The database only has indexes on the columns ``media_id``,
|
||||
``user_id`` and ``created_ts``. This means that if a different sort order is used
|
||||
(``upload_name``, ``last_access_ts``, ``media_length``, ``media_type``,
|
||||
``quarantined_by`` or ``safe_from_quarantine``), this can cause a large load on the
|
||||
database, especially for large environments.
|
||||
|
||||
**Response**
|
||||
|
||||
@@ -533,41 +424,6 @@ The following fields are returned in the JSON response body:
|
||||
- ``next_token``: integer - Indication for pagination. See above.
|
||||
- ``total`` - integer - Total number of media.
|
||||
|
||||
Login as a user
|
||||
===============
|
||||
|
||||
Get an access token that can be used to authenticate as that user. Useful for
|
||||
when admins wish to do actions on behalf of a user.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/users/<user_id>/login
|
||||
{}
|
||||
|
||||
An optional ``valid_until_ms`` field can be specified in the request body as an
|
||||
integer timestamp that specifies when the token should expire. By default tokens
|
||||
do not expire.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"access_token": "<opaque_access_token_string>"
|
||||
}
|
||||
|
||||
|
||||
This API does *not* generate a new device for the user, and so will not appear
|
||||
their ``/devices`` list, and in general the target user should not be able to
|
||||
tell they have been logged in as.
|
||||
|
||||
To expire the token call the standard ``/logout`` API with the token.
|
||||
|
||||
Note: The token will expire if the *admin* user calls ``/logout/all`` from any
|
||||
of their devices, but the token will *not* expire if the target user does the
|
||||
same.
|
||||
|
||||
|
||||
User devices
|
||||
============
|
||||
|
||||
@@ -834,148 +690,3 @@ The following fields are returned in the JSON response body:
|
||||
- ``total`` - integer - Number of pushers.
|
||||
|
||||
See also `Client-Server API Spec <https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers>`_
|
||||
|
||||
Shadow-banning users
|
||||
====================
|
||||
|
||||
Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
|
||||
A shadow-banned users receives successful responses to their client-server API requests,
|
||||
but the events are not propagated into rooms. This can be an effective tool as it
|
||||
(hopefully) takes longer for the user to realise they are being moderated before
|
||||
pivoting to another account.
|
||||
|
||||
Shadow-banning a user should be used as a tool of last resort and may lead to confusing
|
||||
or broken behaviour for the client. A shadow-banned user will not receive any
|
||||
notification and it is generally more appropriate to ban or kick abusive users.
|
||||
A shadow-banned user will be unable to contact anyone on the server.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/users/<user_id>/shadow_ban
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
Override ratelimiting for users
|
||||
===============================
|
||||
|
||||
This API allows to override or disable ratelimiting for a specific user.
|
||||
There are specific APIs to set, get and delete a ratelimit.
|
||||
|
||||
Get status of ratelimit
|
||||
-----------------------
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"messages_per_second": 0,
|
||||
"burst_count": 0
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``messages_per_second`` - integer - The number of actions that can
|
||||
be performed in a second. `0` mean that ratelimiting is disabled for this user.
|
||||
- ``burst_count`` - integer - How many actions that can be performed before
|
||||
being limited.
|
||||
|
||||
If **no** custom ratelimit is set, an empty JSON dict is returned.
|
||||
|
||||
.. code:: json
|
||||
|
||||
{}
|
||||
|
||||
Set ratelimit
|
||||
-------------
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"messages_per_second": 0,
|
||||
"burst_count": 0
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
Body parameters:
|
||||
|
||||
- ``messages_per_second`` - positive integer, optional. The number of actions that can
|
||||
be performed in a second. Defaults to ``0``.
|
||||
- ``burst_count`` - positive integer, optional. How many actions that can be performed
|
||||
before being limited. Defaults to ``0``.
|
||||
|
||||
To disable users' ratelimit set both values to ``0``.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``messages_per_second`` - integer - The number of actions that can
|
||||
be performed in a second.
|
||||
- ``burst_count`` - integer - How many actions that can be performed before
|
||||
being limited.
|
||||
|
||||
Delete ratelimit
|
||||
----------------
|
||||
|
||||
The API is::
|
||||
|
||||
DELETE /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
.. code:: json
|
||||
|
||||
{}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
digraph auth {
|
||||
nodesep=0.5;
|
||||
rankdir="RL";
|
||||
|
||||
C [label="Create (1,1)"];
|
||||
|
||||
BJ [label="Bob's Join (2,1)", color=red];
|
||||
BJ2 [label="Bob's Join (2,2)", color=red];
|
||||
BJ2 -> BJ [color=red, dir=none];
|
||||
|
||||
subgraph cluster_foo {
|
||||
A1 [label="Alice's invite (4,1)", color=blue];
|
||||
A2 [label="Alice's Join (4,2)", color=blue];
|
||||
A3 [label="Alice's Join (4,3)", color=blue];
|
||||
A3 -> A2 -> A1 [color=blue, dir=none];
|
||||
color=none;
|
||||
}
|
||||
|
||||
PL1 [label="Power Level (3,1)", color=darkgreen];
|
||||
PL2 [label="Power Level (3,2)", color=darkgreen];
|
||||
PL2 -> PL1 [color=darkgreen, dir=none];
|
||||
|
||||
{rank = same; C; BJ; PL1; A1;}
|
||||
|
||||
A1 -> C [color=grey];
|
||||
A1 -> BJ [color=grey];
|
||||
PL1 -> C [color=grey];
|
||||
BJ2 -> PL1 [penwidth=2];
|
||||
|
||||
A3 -> PL2 [penwidth=2];
|
||||
A1 -> PL1 -> BJ -> C [penwidth=2];
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 41 KiB |
@@ -1,108 +0,0 @@
|
||||
# Auth Chain Difference Algorithm
|
||||
|
||||
The auth chain difference algorithm is used by V2 state resolution, where a
|
||||
naive implementation can be a significant source of CPU and DB usage.
|
||||
|
||||
### Definitions
|
||||
|
||||
A *state set* is a set of state events; e.g. the input of a state resolution
|
||||
algorithm is a collection of state sets.
|
||||
|
||||
The *auth chain* of a set of events are all the events' auth events and *their*
|
||||
auth events, recursively (i.e. the events reachable by walking the graph induced
|
||||
by an event's auth events links).
|
||||
|
||||
The *auth chain difference* of a collection of state sets is the union minus the
|
||||
intersection of the sets of auth chains corresponding to the state sets, i.e an
|
||||
event is in the auth chain difference if it is reachable by walking the auth
|
||||
event graph from at least one of the state sets but not from *all* of the state
|
||||
sets.
|
||||
|
||||
## Breadth First Walk Algorithm
|
||||
|
||||
A way of calculating the auth chain difference without calculating the full auth
|
||||
chains for each state set is to do a parallel breadth first walk (ordered by
|
||||
depth) of each state set's auth chain. By tracking which events are reachable
|
||||
from each state set we can finish early if every pending event is reachable from
|
||||
every state set.
|
||||
|
||||
This can work well for state sets that have a small auth chain difference, but
|
||||
can be very inefficient for larger differences. However, this algorithm is still
|
||||
used if we don't have a chain cover index for the room (e.g. because we're in
|
||||
the process of indexing it).
|
||||
|
||||
## Chain Cover Index
|
||||
|
||||
Synapse computes auth chain differences by pre-computing a "chain cover" index
|
||||
for the auth chain in a room, allowing efficient reachability queries like "is
|
||||
event A in the auth chain of event B". This is done by assigning every event a
|
||||
*chain ID* and *sequence number* (e.g. `(5,3)`), and having a map of *links*
|
||||
between chains (e.g. `(5,3) -> (2,4)`) such that A is reachable by B (i.e. `A`
|
||||
is in the auth chain of `B`) if and only if either:
|
||||
|
||||
1. A and B have the same chain ID and `A`'s sequence number is less than `B`'s
|
||||
sequence number; or
|
||||
2. there is a link `L` between `B`'s chain ID and `A`'s chain ID such that
|
||||
`L.start_seq_no` <= `B.seq_no` and `A.seq_no` <= `L.end_seq_no`.
|
||||
|
||||
There are actually two potential implementations, one where we store links from
|
||||
each chain to every other reachable chain (the transitive closure of the links
|
||||
graph), and one where we remove redundant links (the transitive reduction of the
|
||||
links graph) e.g. if we have chains `C3 -> C2 -> C1` then the link `C3 -> C1`
|
||||
would not be stored. Synapse uses the former implementations so that it doesn't
|
||||
need to recurse to test reachability between chains.
|
||||
|
||||
### Example
|
||||
|
||||
An example auth graph would look like the following, where chains have been
|
||||
formed based on type/state_key and are denoted by colour and are labelled with
|
||||
`(chain ID, sequence number)`. Links are denoted by the arrows (links in grey
|
||||
are those that would be remove in the second implementation described above).
|
||||
|
||||

|
||||
|
||||
Note that we don't include all links between events and their auth events, as
|
||||
most of those links would be redundant. For example, all events point to the
|
||||
create event, but each chain only needs the one link from it's base to the
|
||||
create event.
|
||||
|
||||
## Using the Index
|
||||
|
||||
This index can be used to calculate the auth chain difference of the state sets
|
||||
by looking at the chain ID and sequence numbers reachable from each state set:
|
||||
|
||||
1. For every state set lookup the chain ID/sequence numbers of each state event
|
||||
2. Use the index to find all chains and the maximum sequence number reachable
|
||||
from each state set.
|
||||
3. The auth chain difference is then all events in each chain that have sequence
|
||||
numbers between the maximum sequence number reachable from *any* state set and
|
||||
the minimum reachable by *all* state sets (if any).
|
||||
|
||||
Note that steps 2 is effectively calculating the auth chain for each state set
|
||||
(in terms of chain IDs and sequence numbers), and step 3 is calculating the
|
||||
difference between the union and intersection of the auth chains.
|
||||
|
||||
### Worked Example
|
||||
|
||||
For example, given the above graph, we can calculate the difference between
|
||||
state sets consisting of:
|
||||
|
||||
1. `S1`: Alice's invite `(4,1)` and Bob's second join `(2,2)`; and
|
||||
2. `S2`: Alice's second join `(4,3)` and Bob's first join `(2,1)`.
|
||||
|
||||
Using the index we see that the following auth chains are reachable from each
|
||||
state set:
|
||||
|
||||
1. `S1`: `(1,1)`, `(2,2)`, `(3,1)` & `(4,1)`
|
||||
2. `S2`: `(1,1)`, `(2,1)`, `(3,2)` & `(4,3)`
|
||||
|
||||
And so, for each the ranges that are in the auth chain difference:
|
||||
1. Chain 1: None, (since everything can reach the create event).
|
||||
2. Chain 2: The range `(1, 2]` (i.e. just `2`), as `1` is reachable by all state
|
||||
sets and the maximum reachable is `2` (corresponding to Bob's second join).
|
||||
3. Chain 3: Similarly the range `(1, 2]` (corresponding to the second power
|
||||
level).
|
||||
4. Chain 4: The range `(1, 3]` (corresponding to both of Alice's joins).
|
||||
|
||||
So the final result is: Bob's second join `(2,2)`, the second power level
|
||||
`(3,2)` and both of Alice's joins `(4,2)` & `(4,3)`.
|
||||
@@ -8,16 +8,16 @@ errors in code.
|
||||
|
||||
The necessary tools are detailed below.
|
||||
|
||||
First install them with:
|
||||
|
||||
pip install -e ".[lint,mypy]"
|
||||
|
||||
- **black**
|
||||
|
||||
The Synapse codebase uses [black](https://pypi.org/project/black/)
|
||||
as an opinionated code formatter, ensuring all comitted code is
|
||||
properly formatted.
|
||||
|
||||
First install `black` with:
|
||||
|
||||
pip install --upgrade black
|
||||
|
||||
Have `black` auto-format your code (it shouldn't change any
|
||||
functionality) with:
|
||||
|
||||
@@ -28,6 +28,10 @@ First install them with:
|
||||
`flake8` is a code checking tool. We require code to pass `flake8`
|
||||
before being merged into the codebase.
|
||||
|
||||
Install `flake8` with:
|
||||
|
||||
pip install --upgrade flake8 flake8-comprehensions
|
||||
|
||||
Check all application and test code with:
|
||||
|
||||
flake8 synapse tests
|
||||
@@ -37,6 +41,10 @@ First install them with:
|
||||
`isort` ensures imports are nicely formatted, and can suggest and
|
||||
auto-fix issues such as double-importing.
|
||||
|
||||
Install `isort` with:
|
||||
|
||||
pip install --upgrade isort
|
||||
|
||||
Auto-fix imports with:
|
||||
|
||||
isort -rc synapse tests
|
||||
@@ -128,9 +136,6 @@ Some guidelines follow:
|
||||
will be if no sub-options are enabled).
|
||||
- Lines should be wrapped at 80 characters.
|
||||
- Use two-space indents.
|
||||
- `true` and `false` are spelt thus (as opposed to `True`, etc.)
|
||||
- Use single quotes (`'`) rather than double-quotes (`"`) or backticks
|
||||
(`` ` ``) to refer to configuration options.
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
Deprecation Policy for Platform Dependencies
|
||||
============================================
|
||||
|
||||
Synapse has a number of platform dependencies, including Python and PostgreSQL.
|
||||
This document outlines the policy towards which versions we support, and when we
|
||||
drop support for versions in the future.
|
||||
|
||||
|
||||
Policy
|
||||
------
|
||||
|
||||
Synapse follows the upstream support life cycles for Python and PostgreSQL,
|
||||
i.e. when a version reaches End of Life Synapse will withdraw support for that
|
||||
version in future releases.
|
||||
|
||||
Details on the upstream support life cycles for Python and PostgreSQL are
|
||||
documented at https://endoflife.date/python and
|
||||
https://endoflife.date/postgresql.
|
||||
|
||||
|
||||
Context
|
||||
-------
|
||||
|
||||
It is important for system admins to have a clear understanding of the platform
|
||||
requirements of Synapse and its deprecation policies so that they can
|
||||
effectively plan upgrading their infrastructure ahead of time. This is
|
||||
especially important in contexts where upgrading the infrastructure requires
|
||||
auditing and approval from a security team, or where otherwise upgrading is a
|
||||
long process.
|
||||
|
||||
By following the upstream support life cycles Synapse can ensure that its
|
||||
dependencies continue to get security patches, while not requiring system admins
|
||||
to constantly update their platform dependencies to the latest versions.
|
||||
@@ -31,7 +31,7 @@ easy to run CAS implementation built on top of Django.
|
||||
You should now have a Django project configured to serve CAS authentication with
|
||||
a single user created.
|
||||
|
||||
## Configure Synapse (and Element) to use CAS
|
||||
## Configure Synapse (and Riot) to use CAS
|
||||
|
||||
1. Modify your `homeserver.yaml` to enable CAS and point it to your locally
|
||||
running Django test server:
|
||||
@@ -51,9 +51,9 @@ and that the CAS server is on port 8000, both on localhost.
|
||||
|
||||
## Testing the configuration
|
||||
|
||||
Then in Element:
|
||||
Then in Riot:
|
||||
|
||||
1. Visit the login page with a Element pointing at your homeserver.
|
||||
1. Visit the login page with a Riot pointing at your homeserver.
|
||||
2. Click the Single Sign-On button.
|
||||
3. Login using the credentials created with `createsuperuser`.
|
||||
4. You should be logged in.
|
||||
|
||||
@@ -13,12 +13,10 @@
|
||||
can be enabled by adding the \"metrics\" resource to the existing
|
||||
listener as such:
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- metrics
|
||||
```
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- metrics
|
||||
|
||||
This provides a simple way of adding metrics to your Synapse
|
||||
installation, and serves under `/_synapse/metrics`. If you do not
|
||||
@@ -33,13 +31,11 @@
|
||||
|
||||
Add a new listener to homeserver.yaml:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- type: metrics
|
||||
port: 9000
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
```
|
||||
listeners:
|
||||
- type: metrics
|
||||
port: 9000
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
|
||||
For both options, you will need to ensure that `enable_metrics` is
|
||||
set to `True`.
|
||||
@@ -51,13 +47,10 @@
|
||||
It needs to set the `metrics_path` to a non-default value (under
|
||||
`scrape_configs`):
|
||||
|
||||
```yaml
|
||||
- job_name: "synapse"
|
||||
scrape_interval: 15s
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
```
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
|
||||
where `my.server.here` is the IP address of Synapse, and `port` is
|
||||
the listener port configured with the `metrics` resource.
|
||||
@@ -67,8 +60,7 @@
|
||||
|
||||
1. Restart Prometheus.
|
||||
|
||||
1. Consider using the [grafana dashboard](https://github.com/matrix-org/synapse/tree/master/contrib/grafana/)
|
||||
and required [recording rules](https://github.com/matrix-org/synapse/tree/master/contrib/prometheus/)
|
||||
1. Consider using the [grafana dashboard](https://github.com/matrix-org/synapse/tree/master/contrib/grafana/) and required [recording rules](https://github.com/matrix-org/synapse/tree/master/contrib/prometheus/)
|
||||
|
||||
## Monitoring workers
|
||||
|
||||
@@ -84,9 +76,9 @@ To allow collecting metrics from a worker, you need to add a
|
||||
under `worker_listeners`:
|
||||
|
||||
```yaml
|
||||
- type: metrics
|
||||
bind_address: ''
|
||||
port: 9101
|
||||
- type: metrics
|
||||
bind_address: ''
|
||||
port: 9101
|
||||
```
|
||||
|
||||
The `bind_address` and `port` parameters should be set so that
|
||||
@@ -95,38 +87,6 @@ don't clash with an existing worker.
|
||||
With this example, the worker's metrics would then be available
|
||||
on `http://127.0.0.1:9101`.
|
||||
|
||||
Example Prometheus target for Synapse with workers:
|
||||
|
||||
```yaml
|
||||
- job_name: "synapse"
|
||||
scrape_interval: 15s
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "master"
|
||||
index: 1
|
||||
- targets: ["my.workerserver.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "generic_worker"
|
||||
index: 1
|
||||
- targets: ["my.workerserver.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "generic_worker"
|
||||
index: 2
|
||||
- targets: ["my.workerserver.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "media_repository"
|
||||
index: 1
|
||||
```
|
||||
|
||||
Labels (`instance`, `job`, `index`) can be defined as anything.
|
||||
The labels are used to group graphs in grafana.
|
||||
|
||||
## Renaming of metrics & deprecation of old names in 1.2
|
||||
|
||||
Synapse 1.2 updates the Prometheus metrics to match the naming
|
||||
|
||||
356
docs/openid.md
356
docs/openid.md
@@ -42,41 +42,40 @@ as follows:
|
||||
* For other installation mechanisms, see the documentation provided by the
|
||||
maintainer.
|
||||
|
||||
To enable the OpenID integration, you should then add a section to the `oidc_providers`
|
||||
setting in your configuration file (or uncomment one of the existing examples).
|
||||
See [sample_config.yaml](./sample_config.yaml) for some sample settings, as well as
|
||||
the text below for example configurations for specific providers.
|
||||
To enable the OpenID integration, you should then add an `oidc_config` section
|
||||
to your configuration file (or uncomment the `enabled: true` line in the
|
||||
existing section). See [sample_config.yaml](./sample_config.yaml) for some
|
||||
sample settings, as well as the text below for example configurations for
|
||||
specific providers.
|
||||
|
||||
## Sample configs
|
||||
|
||||
Here are a few configs for providers that should work with Synapse.
|
||||
|
||||
### Microsoft Azure Active Directory
|
||||
Azure AD can act as an OpenID Connect Provider. Register a new application under
|
||||
Azure AD can act as an OpenID Connect Provider. Register a new application under
|
||||
*App registrations* in the Azure AD management console. The RedirectURI for your
|
||||
application should point to your matrix server:
|
||||
`[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
application should point to your matrix server: `[synapse public baseurl]/_synapse/oidc/callback`
|
||||
|
||||
Go to *Certificates & secrets* and register a new client secret. Make note of your
|
||||
Go to *Certificates & secrets* and register a new client secret. Make note of your
|
||||
Directory (tenant) ID as it will be used in the Azure links.
|
||||
Edit your Synapse config file and change the `oidc_config` section:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: microsoft
|
||||
idp_name: Microsoft
|
||||
issuer: "https://login.microsoftonline.com/<tenant id>/v2.0"
|
||||
client_id: "<client id>"
|
||||
client_secret: "<client secret>"
|
||||
scopes: ["openid", "profile"]
|
||||
authorization_endpoint: "https://login.microsoftonline.com/<tenant id>/oauth2/v2.0/authorize"
|
||||
token_endpoint: "https://login.microsoftonline.com/<tenant id>/oauth2/v2.0/token"
|
||||
userinfo_endpoint: "https://graph.microsoft.com/oidc/userinfo"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://login.microsoftonline.com/<tenant id>/v2.0"
|
||||
client_id: "<client id>"
|
||||
client_secret: "<client secret>"
|
||||
scopes: ["openid", "profile"]
|
||||
authorization_endpoint: "https://login.microsoftonline.com/<tenant id>/oauth2/v2.0/authorize"
|
||||
token_endpoint: "https://login.microsoftonline.com/<tenant id>/oauth2/v2.0/token"
|
||||
userinfo_endpoint: "https://graph.microsoft.com/oidc/userinfo"
|
||||
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username.split('@')[0] }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username.split('@')[0] }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### [Dex][dex-idp]
|
||||
@@ -95,7 +94,7 @@ staticClients:
|
||||
- id: synapse
|
||||
secret: secret
|
||||
redirectURIs:
|
||||
- '[synapse public baseurl]/_synapse/client/oidc/callback'
|
||||
- '[synapse public baseurl]/_synapse/oidc/callback'
|
||||
name: 'Synapse'
|
||||
```
|
||||
|
||||
@@ -104,22 +103,21 @@ Run with `dex serve examples/config-dev.yaml`.
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: dex
|
||||
idp_name: "My Dex server"
|
||||
skip_verification: true # This is needed as Dex is served on an insecure endpoint
|
||||
issuer: "http://127.0.0.1:5556/dex"
|
||||
client_id: "synapse"
|
||||
client_secret: "secret"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.name }}"
|
||||
display_name_template: "{{ user.name|capitalize }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
skip_verification: true # This is needed as Dex is served on an insecure endpoint
|
||||
issuer: "http://127.0.0.1:5556/dex"
|
||||
client_id: "synapse"
|
||||
client_secret: "secret"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.name }}"
|
||||
display_name_template: "{{ user.name|capitalize }}"
|
||||
```
|
||||
### [Keycloak][keycloak-idp]
|
||||
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
|
||||
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
|
||||
|
||||
@@ -141,7 +139,7 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
|
||||
| Enabled | `On` |
|
||||
| Client Protocol | `openid-connect` |
|
||||
| Access Type | `confidential` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/oidc/callback` |
|
||||
|
||||
5. Click `Save`
|
||||
6. On the Credentials tab, update the fields:
|
||||
@@ -154,22 +152,17 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
|
||||
8. Copy Secret
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: keycloak
|
||||
idp_name: "My KeyCloak server"
|
||||
issuer: "https://127.0.0.1:8443/auth/realms/{realm_name}"
|
||||
client_id: "synapse"
|
||||
client_secret: "copy secret generated from above"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://127.0.0.1:8443/auth/realms/{realm_name}"
|
||||
client_id: "synapse"
|
||||
client_secret: "copy secret generated from above"
|
||||
scopes: ["openid", "profile"]
|
||||
```
|
||||
### [Auth0][auth0]
|
||||
|
||||
1. Create a regular web application for Synapse
|
||||
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/oidc/callback`
|
||||
3. Add a rule to add the `preferred_username` claim.
|
||||
<details>
|
||||
<summary>Code sample</summary>
|
||||
@@ -194,17 +187,16 @@ oidc_providers:
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: auth0
|
||||
idp_name: Auth0
|
||||
issuer: "https://your-tier.eu.auth0.com/" # TO BE FILLED
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://your-tier.eu.auth0.com/" # TO BE FILLED
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### GitHub
|
||||
@@ -218,28 +210,26 @@ login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new OAuth application: https://github.com/settings/applications/new.
|
||||
2. Set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
|
||||
2. Set the callback URL to `[synapse public baseurl]/_synapse/oidc/callback`.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: github
|
||||
idp_name: Github
|
||||
idp_brand: "github" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://github.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://api.github.com/user"
|
||||
scopes: ["read:user"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
discover: false
|
||||
issuer: "https://github.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
authorization_endpoint: "https://github.com/login/oauth/authorize"
|
||||
token_endpoint: "https://github.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://api.github.com/user"
|
||||
scopes: ["read:user"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### [Google][google-idp]
|
||||
@@ -249,200 +239,60 @@ oidc_providers:
|
||||
2. add an "OAuth Client ID" for a Web Application under "Credentials".
|
||||
3. Copy the Client ID and Client Secret, and add the following to your synapse config:
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: google
|
||||
idp_name: Google
|
||||
idp_brand: "google" # optional: styling hint for clients
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.given_name|lower }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.given_name|lower }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
4. Back in the Google console, add this Authorized redirect URI: `[synapse
|
||||
public baseurl]/_synapse/client/oidc/callback`.
|
||||
public baseurl]/_synapse/oidc/callback`.
|
||||
|
||||
### Twitch
|
||||
|
||||
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
|
||||
2. Obtain the OAuth 2.0 credentials by [creating an app](https://dev.twitch.tv/console/apps/)
|
||||
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
3. Add this OAuth Redirect URL: `[synapse public baseurl]/_synapse/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: twitch
|
||||
idp_name: Twitch
|
||||
issuer: "https://id.twitch.tv/oauth2/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://id.twitch.tv/oauth2/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### GitLab
|
||||
|
||||
1. Create a [new application](https://gitlab.com/profile/applications).
|
||||
2. Add the `read_user` and `openid` scopes.
|
||||
3. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
3. Add this Callback URL: `[synapse public baseurl]/_synapse/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: gitlab
|
||||
idp_name: Gitlab
|
||||
idp_brand: "gitlab" # optional: styling hint for clients
|
||||
issuer: "https://gitlab.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
scopes: ["openid", "read_user"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.nickname }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
### Facebook
|
||||
|
||||
Like Github, Facebook provide a custom OAuth2 API rather than an OIDC-compliant
|
||||
one so requires a little more configuration.
|
||||
|
||||
0. You will need a Facebook developer account. You can register for one
|
||||
[here](https://developers.facebook.com/async/registration/).
|
||||
1. On the [apps](https://developers.facebook.com/apps/) page of the developer
|
||||
console, "Create App", and choose "Build Connected Experiences".
|
||||
2. Once the app is created, add "Facebook Login" and choose "Web". You don't
|
||||
need to go through the whole form here.
|
||||
3. In the left-hand menu, open "Products"/"Facebook Login"/"Settings".
|
||||
* Add `[synapse public baseurl]/_synapse/client/oidc/callback` as an OAuth Redirect
|
||||
URL.
|
||||
4. In the left-hand menu, open "Settings/Basic". Here you can copy the "App ID"
|
||||
and "App Secret" for use below.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
- idp_id: facebook
|
||||
idp_name: Facebook
|
||||
idp_brand: "facebook" # optional: styling hint for clients
|
||||
discover: false
|
||||
issuer: "https://facebook.com"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "email"]
|
||||
authorization_endpoint: https://facebook.com/dialog/oauth
|
||||
token_endpoint: https://graph.facebook.com/v9.0/oauth/access_token
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
userinfo_endpoint: "https://graph.facebook.com/v9.0/me?fields=id,name,email,picture"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
Relevant documents:
|
||||
* https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow
|
||||
* Using Facebook's Graph API: https://developers.facebook.com/docs/graph-api/using-graph-api/
|
||||
* Reference to the User endpoint: https://developers.facebook.com/docs/graph-api/reference/user
|
||||
|
||||
### Gitea
|
||||
|
||||
Gitea is, like Github, not an OpenID provider, but just an OAuth2 provider.
|
||||
|
||||
The [`/user` API endpoint](https://try.gitea.io/api/swagger#/user/userGetCurrent)
|
||||
can be used to retrieve information on the authenticated user. As the Synapse
|
||||
login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new application.
|
||||
2. Add this Callback URL: `[synapse public baseurl]/_synapse/client/oidc/callback`
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: gitea
|
||||
idp_name: Gitea
|
||||
discover: false
|
||||
issuer: "https://your-gitea.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: client_secret_post
|
||||
scopes: [] # Gitea doesn't support Scopes
|
||||
authorization_endpoint: "https://your-gitea.com/login/oauth/authorize"
|
||||
token_endpoint: "https://your-gitea.com/login/oauth/access_token"
|
||||
userinfo_endpoint: "https://your-gitea.com/api/v1/user"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_claim: "id"
|
||||
localpart_template: "{{ user.login }}"
|
||||
display_name_template: "{{ user.full_name }}"
|
||||
```
|
||||
|
||||
### XWiki
|
||||
|
||||
Install [OpenID Connect Provider](https://extensions.xwiki.org/xwiki/bin/view/Extension/OpenID%20Connect/OpenID%20Connect%20Provider/) extension in your [XWiki](https://www.xwiki.org) instance.
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: xwiki
|
||||
idp_name: "XWiki"
|
||||
issuer: "https://myxwikihost/xwiki/oidc/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_auth_method: none
|
||||
scopes: ["openid", "profile"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
## Apple
|
||||
|
||||
Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account.
|
||||
|
||||
You will need to create a new "Services ID" for SiWA, and create and download a
|
||||
private key with "SiWA" enabled.
|
||||
|
||||
As well as the private key file, you will need:
|
||||
* Client ID: the "identifier" you gave the "Services ID"
|
||||
* Team ID: a 10-character ID associated with your developer account.
|
||||
* Key ID: the 10-character identifier for the key.
|
||||
|
||||
https://help.apple.com/developer-account/?lang=en#/dev77c875b7e has more
|
||||
documentation on setting up SiWA.
|
||||
|
||||
The synapse config will look like this:
|
||||
|
||||
```yaml
|
||||
- idp_id: apple
|
||||
idp_name: Apple
|
||||
issuer: "https://appleid.apple.com"
|
||||
client_id: "your-client-id" # Set to the "identifier" for your "ServicesID"
|
||||
client_auth_method: "client_secret_post"
|
||||
client_secret_jwt_key:
|
||||
key_file: "/path/to/AuthKey_KEYIDCODE.p8" # point to your key file
|
||||
jwt_header:
|
||||
alg: ES256
|
||||
kid: "KEYIDCODE" # Set to the 10-char Key ID
|
||||
jwt_payload:
|
||||
iss: TEAMIDCODE # Set to the 10-char Team ID
|
||||
scopes: ["name", "email", "openid"]
|
||||
authorization_endpoint: https://appleid.apple.com/auth/authorize?response_mode=form_post
|
||||
user_mapping_provider:
|
||||
config:
|
||||
email_template: "{{ user.email }}"
|
||||
oidc_config:
|
||||
enabled: true
|
||||
issuer: "https://gitlab.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
client_auth_method: "client_secret_post"
|
||||
scopes: ["openid", "read_user"]
|
||||
user_profile_method: "userinfo_endpoint"
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: '{{ user.nickname }}'
|
||||
display_name_template: '{{ user.name }}'
|
||||
```
|
||||
|
||||
@@ -26,7 +26,6 @@ Password auth provider classes must provide the following methods:
|
||||
|
||||
It should perform any appropriate sanity checks on the provided
|
||||
configuration, and return an object which is then passed into
|
||||
`__init__`.
|
||||
|
||||
This method should have the `@staticmethod` decoration.
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ connect to a postgres database.
|
||||
virtualenv](../INSTALL.md#installing-from-source), you can install
|
||||
the library with:
|
||||
|
||||
~/synapse/env/bin/pip install "matrix-synapse[postgres]"
|
||||
~/synapse/env/bin/pip install matrix-synapse[postgres]
|
||||
|
||||
(substituting the path to your virtualenv for `~/synapse/env`, if
|
||||
you used a different path). You will require the postgres
|
||||
|
||||
@@ -1,235 +0,0 @@
|
||||
# Presence Router Module
|
||||
|
||||
Synapse supports configuring a module that can specify additional users
|
||||
(local or remote) to should receive certain presence updates from local
|
||||
users.
|
||||
|
||||
Note that routing presence via Application Service transactions is not
|
||||
currently supported.
|
||||
|
||||
The presence routing module is implemented as a Python class, which will
|
||||
be imported by the running Synapse.
|
||||
|
||||
## Python Presence Router Class
|
||||
|
||||
The Python class is instantiated with two objects:
|
||||
|
||||
* A configuration object of some type (see below).
|
||||
* An instance of `synapse.module_api.ModuleApi`.
|
||||
|
||||
It then implements methods related to presence routing.
|
||||
|
||||
Note that one method of `ModuleApi` that may be useful is:
|
||||
|
||||
```python
|
||||
async def ModuleApi.send_local_online_presence_to(users: Iterable[str]) -> None
|
||||
```
|
||||
|
||||
which can be given a list of local or remote MXIDs to broadcast known, online user
|
||||
presence to (for those users that the receiving user is considered interested in).
|
||||
It does not include state for users who are currently offline, and it can only be
|
||||
called on workers that support sending federation.
|
||||
|
||||
### Module structure
|
||||
|
||||
Below is a list of possible methods that can be implemented, and whether they are
|
||||
required.
|
||||
|
||||
#### `parse_config`
|
||||
|
||||
```python
|
||||
def parse_config(config_dict: dict) -> Any
|
||||
```
|
||||
|
||||
**Required.** A static method that is passed a dictionary of config options, and
|
||||
should return a validated config object. This method is described further in
|
||||
[Configuration](#configuration).
|
||||
|
||||
#### `get_users_for_states`
|
||||
|
||||
```python
|
||||
async def get_users_for_states(
|
||||
self,
|
||||
state_updates: Iterable[UserPresenceState],
|
||||
) -> Dict[str, Set[UserPresenceState]]:
|
||||
```
|
||||
|
||||
**Required.** An asynchronous method that is passed an iterable of user presence
|
||||
state. This method can determine whether a given presence update should be sent to certain
|
||||
users. It does this by returning a dictionary with keys representing local or remote
|
||||
Matrix User IDs, and values being a python set
|
||||
of `synapse.handlers.presence.UserPresenceState` instances.
|
||||
|
||||
Synapse will then attempt to send the specified presence updates to each user when
|
||||
possible.
|
||||
|
||||
#### `get_interested_users`
|
||||
|
||||
```python
|
||||
async def get_interested_users(self, user_id: str) -> Union[Set[str], str]
|
||||
```
|
||||
|
||||
**Required.** An asynchronous method that is passed a single Matrix User ID. This
|
||||
method is expected to return the users that the passed in user may be interested in the
|
||||
presence of. Returned users may be local or remote. The presence routed as a result of
|
||||
what this method returns is sent in addition to the updates already sent between users
|
||||
that share a room together. Presence updates are deduplicated.
|
||||
|
||||
This method should return a python set of Matrix User IDs, or the object
|
||||
`synapse.events.presence_router.PresenceRouter.ALL_USERS` to indicate that the passed
|
||||
user should receive presence information for *all* known users.
|
||||
|
||||
For clarity, if the user `@alice:example.org` is passed to this method, and the Set
|
||||
`{"@bob:example.com", "@charlie:somewhere.org"}` is returned, this signifies that Alice
|
||||
should receive presence updates sent by Bob and Charlie, regardless of whether these
|
||||
users share a room.
|
||||
|
||||
### Example
|
||||
|
||||
Below is an example implementation of a presence router class.
|
||||
|
||||
```python
|
||||
from typing import Dict, Iterable, Set, Union
|
||||
from synapse.events.presence_router import PresenceRouter
|
||||
from synapse.handlers.presence import UserPresenceState
|
||||
from synapse.module_api import ModuleApi
|
||||
|
||||
class PresenceRouterConfig:
|
||||
def __init__(self):
|
||||
# Config options with their defaults
|
||||
# A list of users to always send all user presence updates to
|
||||
self.always_send_to_users = [] # type: List[str]
|
||||
|
||||
# A list of users to ignore presence updates for. Does not affect
|
||||
# shared-room presence relationships
|
||||
self.blacklisted_users = [] # type: List[str]
|
||||
|
||||
class ExamplePresenceRouter:
|
||||
"""An example implementation of synapse.presence_router.PresenceRouter.
|
||||
Supports routing all presence to a configured set of users, or a subset
|
||||
of presence from certain users to members of certain rooms.
|
||||
|
||||
Args:
|
||||
config: A configuration object.
|
||||
module_api: An instance of Synapse's ModuleApi.
|
||||
"""
|
||||
def __init__(self, config: PresenceRouterConfig, module_api: ModuleApi):
|
||||
self._config = config
|
||||
self._module_api = module_api
|
||||
|
||||
@staticmethod
|
||||
def parse_config(config_dict: dict) -> PresenceRouterConfig:
|
||||
"""Parse a configuration dictionary from the homeserver config, do
|
||||
some validation and return a typed PresenceRouterConfig.
|
||||
|
||||
Args:
|
||||
config_dict: The configuration dictionary.
|
||||
|
||||
Returns:
|
||||
A validated config object.
|
||||
"""
|
||||
# Initialise a typed config object
|
||||
config = PresenceRouterConfig()
|
||||
always_send_to_users = config_dict.get("always_send_to_users")
|
||||
blacklisted_users = config_dict.get("blacklisted_users")
|
||||
|
||||
# Do some validation of config options... otherwise raise a
|
||||
# synapse.config.ConfigError.
|
||||
config.always_send_to_users = always_send_to_users
|
||||
config.blacklisted_users = blacklisted_users
|
||||
|
||||
return config
|
||||
|
||||
async def get_users_for_states(
|
||||
self,
|
||||
state_updates: Iterable[UserPresenceState],
|
||||
) -> Dict[str, Set[UserPresenceState]]:
|
||||
"""Given an iterable of user presence updates, determine where each one
|
||||
needs to go. Returned results will not affect presence updates that are
|
||||
sent between users who share a room.
|
||||
|
||||
Args:
|
||||
state_updates: An iterable of user presence state updates.
|
||||
|
||||
Returns:
|
||||
A dictionary of user_id -> set of UserPresenceState that the user should
|
||||
receive.
|
||||
"""
|
||||
destination_users = {} # type: Dict[str, Set[UserPresenceState]
|
||||
|
||||
# Ignore any updates for blacklisted users
|
||||
desired_updates = set()
|
||||
for update in state_updates:
|
||||
if update.state_key not in self._config.blacklisted_users:
|
||||
desired_updates.add(update)
|
||||
|
||||
# Send all presence updates to specific users
|
||||
for user_id in self._config.always_send_to_users:
|
||||
destination_users[user_id] = desired_updates
|
||||
|
||||
return destination_users
|
||||
|
||||
async def get_interested_users(
|
||||
self,
|
||||
user_id: str,
|
||||
) -> Union[Set[str], PresenceRouter.ALL_USERS]:
|
||||
"""
|
||||
Retrieve a list of users that `user_id` is interested in receiving the
|
||||
presence of. This will be in addition to those they share a room with.
|
||||
Optionally, the object PresenceRouter.ALL_USERS can be returned to indicate
|
||||
that this user should receive all incoming local and remote presence updates.
|
||||
|
||||
Note that this method will only be called for local users.
|
||||
|
||||
Args:
|
||||
user_id: A user requesting presence updates.
|
||||
|
||||
Returns:
|
||||
A set of user IDs to return additional presence updates for, or
|
||||
PresenceRouter.ALL_USERS to return presence updates for all other users.
|
||||
"""
|
||||
if user_id in self._config.always_send_to_users:
|
||||
return PresenceRouter.ALL_USERS
|
||||
|
||||
return set()
|
||||
```
|
||||
|
||||
#### A note on `get_users_for_states` and `get_interested_users`
|
||||
|
||||
Both of these methods are effectively two different sides of the same coin. The logic
|
||||
regarding which users should receive updates for other users should be the same
|
||||
between them.
|
||||
|
||||
`get_users_for_states` is called when presence updates come in from either federation
|
||||
or local users, and is used to either direct local presence to remote users, or to
|
||||
wake up the sync streams of local users to collect remote presence.
|
||||
|
||||
In contrast, `get_interested_users` is used to determine the users that presence should
|
||||
be fetched for when a local user is syncing. This presence is then retrieved, before
|
||||
being fed through `get_users_for_states` once again, with only the syncing user's
|
||||
routing information pulled from the resulting dictionary.
|
||||
|
||||
Their routing logic should thus line up, else you may run into unintended behaviour.
|
||||
|
||||
## Configuration
|
||||
|
||||
Once you've crafted your module and installed it into the same Python environment as
|
||||
Synapse, amend your homeserver config file with the following.
|
||||
|
||||
```yaml
|
||||
presence:
|
||||
routing_module:
|
||||
module: my_module.ExamplePresenceRouter
|
||||
config:
|
||||
# Any configuration options for your module. The below is an example.
|
||||
# of setting options for ExamplePresenceRouter.
|
||||
always_send_to_users: ["@presence_gobbler:example.org"]
|
||||
blacklisted_users:
|
||||
- "@alice:example.com"
|
||||
- "@bob:example.com"
|
||||
...
|
||||
```
|
||||
|
||||
The contents of `config` will be passed as a Python dictionary to the static
|
||||
`parse_config` method of your class. The object returned by this method will
|
||||
then be passed to the `__init__` method of your module as `config`.
|
||||
@@ -3,31 +3,30 @@
|
||||
It is recommended to put a reverse proxy such as
|
||||
[nginx](https://nginx.org/en/docs/http/ngx_http_proxy_module.html),
|
||||
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
|
||||
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy),
|
||||
[HAProxy](https://www.haproxy.org/) or
|
||||
[relayd](https://man.openbsd.org/relayd.8) in front of Synapse. One advantage
|
||||
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy) or
|
||||
[HAProxy](https://www.haproxy.org/) in front of Synapse. One advantage
|
||||
of doing so is that it means that you can expose the default https port
|
||||
(443) to Matrix clients without needing to run Synapse with root
|
||||
privileges.
|
||||
|
||||
You should configure your reverse proxy to forward requests to `/_matrix` or
|
||||
`/_synapse/client` to Synapse, and have it set the `X-Forwarded-For` and
|
||||
`X-Forwarded-Proto` request headers.
|
||||
|
||||
You should remember that Matrix clients and other Matrix servers do not
|
||||
necessarily need to connect to your server via the same server name or
|
||||
port. Indeed, clients will use port 443 by default, whereas servers default to
|
||||
port 8448. Where these are different, we refer to the 'client port' and the
|
||||
'federation port'. See [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
|
||||
for more details of the algorithm used for federation connections, and
|
||||
[delegate.md](<delegate.md>) for instructions on setting up delegation.
|
||||
|
||||
**NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
|
||||
the requested URI in any way (for example, by decoding `%xx` escapes).
|
||||
Beware that Apache *will* canonicalise URIs unless you specify
|
||||
`nocanon`.
|
||||
|
||||
When setting up a reverse proxy, remember that Matrix clients and other
|
||||
Matrix servers do not necessarily need to connect to your server via the
|
||||
same server name or port. Indeed, clients will use port 443 by default,
|
||||
whereas servers default to port 8448. Where these are different, we
|
||||
refer to the 'client port' and the 'federation port'. See [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
|
||||
for more details of the algorithm used for federation connections, and
|
||||
[delegate.md](<delegate.md>) for instructions on setting up delegation.
|
||||
|
||||
Endpoints that are part of the standardised Matrix specification are
|
||||
located under `/_matrix`, whereas endpoints specific to Synapse are
|
||||
located under `/_synapse/client`.
|
||||
|
||||
Let's assume that we expect clients to connect to our server at
|
||||
`https://matrix.example.com`, and other servers to connect at
|
||||
`https://example.com:8448`. The following sections detail the configuration of
|
||||
@@ -41,21 +40,18 @@ the reverse proxy and the homeserver.
|
||||
|
||||
```
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
listen [::]:443 ssl http2;
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
# For the federation port
|
||||
listen 8448 ssl http2 default_server;
|
||||
listen [::]:8448 ssl http2 default_server;
|
||||
listen 8448 ssl default_server;
|
||||
listen [::]:8448 ssl default_server;
|
||||
|
||||
server_name matrix.example.com;
|
||||
|
||||
location ~* ^(\/_matrix|\/_synapse\/client) {
|
||||
proxy_pass http://localhost:8008;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||
client_max_body_size 50M;
|
||||
@@ -104,11 +100,9 @@ example.com:8448 {
|
||||
```
|
||||
<VirtualHost *:443>
|
||||
SSLEngine on
|
||||
ServerName matrix.example.com
|
||||
ServerName matrix.example.com;
|
||||
|
||||
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||
AllowEncodedSlashes NoDecode
|
||||
ProxyPreserveHost on
|
||||
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
|
||||
ProxyPass /_synapse/client http://127.0.0.1:8008/_synapse/client nocanon
|
||||
@@ -117,9 +111,8 @@ example.com:8448 {
|
||||
|
||||
<VirtualHost *:8448>
|
||||
SSLEngine on
|
||||
ServerName example.com
|
||||
ServerName example.com;
|
||||
|
||||
RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME}
|
||||
AllowEncodedSlashes NoDecode
|
||||
ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
|
||||
ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
|
||||
@@ -136,16 +129,11 @@ example.com:8448 {
|
||||
</IfModule>
|
||||
```
|
||||
|
||||
**NOTE 3**: Missing `ProxyPreserveHost on` can lead to a redirect loop.
|
||||
|
||||
### HAProxy
|
||||
|
||||
```
|
||||
frontend https
|
||||
bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
|
||||
http-request set-header X-Forwarded-For %[src]
|
||||
|
||||
# Matrix client traffic
|
||||
acl matrix-host hdr(host) -i matrix.example.com
|
||||
@@ -156,62 +144,12 @@ frontend https
|
||||
|
||||
frontend matrix-federation
|
||||
bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
|
||||
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
|
||||
http-request set-header X-Forwarded-For %[src]
|
||||
|
||||
default_backend matrix
|
||||
|
||||
backend matrix
|
||||
server matrix 127.0.0.1:8008
|
||||
```
|
||||
|
||||
### Relayd
|
||||
|
||||
```
|
||||
table <webserver> { 127.0.0.1 }
|
||||
table <matrixserver> { 127.0.0.1 }
|
||||
|
||||
http protocol "https" {
|
||||
tls { no tlsv1.0, ciphers "HIGH" }
|
||||
tls keypair "example.com"
|
||||
match header set "X-Forwarded-For" value "$REMOTE_ADDR"
|
||||
match header set "X-Forwarded-Proto" value "https"
|
||||
|
||||
# set CORS header for .well-known/matrix/server, .well-known/matrix/client
|
||||
# httpd does not support setting headers, so do it here
|
||||
match request path "/.well-known/matrix/*" tag "matrix-cors"
|
||||
match response tagged "matrix-cors" header set "Access-Control-Allow-Origin" value "*"
|
||||
|
||||
pass quick path "/_matrix/*" forward to <matrixserver>
|
||||
pass quick path "/_synapse/client/*" forward to <matrixserver>
|
||||
|
||||
# pass on non-matrix traffic to webserver
|
||||
pass forward to <webserver>
|
||||
}
|
||||
|
||||
relay "https_traffic" {
|
||||
listen on egress port 443 tls
|
||||
protocol "https"
|
||||
forward to <matrixserver> port 8008 check tcp
|
||||
forward to <webserver> port 8080 check tcp
|
||||
}
|
||||
|
||||
http protocol "matrix" {
|
||||
tls { no tlsv1.0, ciphers "HIGH" }
|
||||
tls keypair "example.com"
|
||||
block
|
||||
pass quick path "/_matrix/*" forward to <matrixserver>
|
||||
pass quick path "/_synapse/client/*" forward to <matrixserver>
|
||||
}
|
||||
|
||||
relay "matrix_federation" {
|
||||
listen on egress port 8448 tls
|
||||
protocol "matrix"
|
||||
forward to <matrixserver> port 8008 check tcp
|
||||
}
|
||||
```
|
||||
|
||||
## Homeserver Configuration
|
||||
|
||||
You will also want to set `bind_addresses: ['127.0.0.1']` and
|
||||
|
||||
@@ -67,12 +67,11 @@ pid_file: DATADIR/homeserver.pid
|
||||
#
|
||||
#web_client_location: https://riot.example.com/
|
||||
|
||||
# The public-facing base URL that clients use to access this Homeserver (not
|
||||
# including _matrix/...). This is the same URL a user might enter into the
|
||||
# 'Custom Homeserver URL' field on their client. If you use Synapse with a
|
||||
# reverse proxy, this should be the URL to reach Synapse via the proxy.
|
||||
# Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
|
||||
# 'listeners' below).
|
||||
# The public-facing base URL that clients use to access this HS
|
||||
# (not including _matrix/...). This is the same URL a user would
|
||||
# enter into the 'custom HS URL' field on their client. If you
|
||||
# use synapse with a reverse proxy, this should be the URL to reach
|
||||
# synapse via the proxy.
|
||||
#
|
||||
#public_baseurl: https://example.com/
|
||||
|
||||
@@ -82,33 +81,15 @@ pid_file: DATADIR/homeserver.pid
|
||||
#
|
||||
#soft_file_limit: 0
|
||||
|
||||
# Presence tracking allows users to see the state (e.g online/offline)
|
||||
# of other local and remote users.
|
||||
# Set to false to disable presence tracking on this homeserver.
|
||||
#
|
||||
presence:
|
||||
# Uncomment to disable presence tracking on this homeserver. This option
|
||||
# replaces the previous top-level 'use_presence' option.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# Presence routers are third-party modules that can specify additional logic
|
||||
# to where presence updates from users are routed.
|
||||
#
|
||||
presence_router:
|
||||
# The custom module's class. Uncomment to use a custom presence router module.
|
||||
#
|
||||
#module: "my_custom_router.PresenceRouter"
|
||||
|
||||
# Configuration options of the custom module. Refer to your module's
|
||||
# documentation for available options.
|
||||
#
|
||||
#config:
|
||||
# example_option: 'something'
|
||||
#use_presence: false
|
||||
|
||||
# Whether to require authentication to retrieve profile data (avatars,
|
||||
# display names) of other users through the client API. Defaults to
|
||||
# 'false'. Note that profile data is also available via the federation
|
||||
# API, unless allow_profile_lookup_over_federation is set to false.
|
||||
# API, so this setting is of limited value if federation is enabled on
|
||||
# the server.
|
||||
#
|
||||
#require_auth_for_profile_requests: true
|
||||
|
||||
@@ -119,14 +100,6 @@ presence:
|
||||
#
|
||||
#limit_profile_requests_to_users_who_share_rooms: true
|
||||
|
||||
# Uncomment to prevent a user's profile data from being retrieved and
|
||||
# displayed in a room until they have joined it. By default, a user's
|
||||
# profile data is included in an invite event, regardless of the values
|
||||
# of the above two settings, and whether or not the users share a server.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
#include_profile_data_on_invite: false
|
||||
|
||||
# If set to 'true', removes the need for authentication to access the server's
|
||||
# public rooms directory through the client API, meaning that anyone can
|
||||
# query the room directory. Defaults to 'false'.
|
||||
@@ -171,51 +144,6 @@ presence:
|
||||
#
|
||||
#enable_search: false
|
||||
|
||||
# Prevent outgoing requests from being sent to the following blacklisted IP address
|
||||
# CIDR ranges. If this option is not specified then it defaults to private IP
|
||||
# address ranges (see the example below).
|
||||
#
|
||||
# The blacklist applies to the outbound requests for federation, identity servers,
|
||||
# push servers, and for checking key validity for third-party invite events.
|
||||
#
|
||||
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
|
||||
# listed here, since they correspond to unroutable addresses.)
|
||||
#
|
||||
# This option replaces federation_ip_range_blacklist in Synapse v1.25.0.
|
||||
#
|
||||
#ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '192.0.0.0/24'
|
||||
# - '169.254.0.0/16'
|
||||
# - '192.88.99.0/24'
|
||||
# - '198.18.0.0/15'
|
||||
# - '192.0.2.0/24'
|
||||
# - '198.51.100.0/24'
|
||||
# - '203.0.113.0/24'
|
||||
# - '224.0.0.0/4'
|
||||
# - '::1/128'
|
||||
# - 'fe80::/10'
|
||||
# - 'fc00::/7'
|
||||
# - '2001:db8::/32'
|
||||
# - 'ff00::/8'
|
||||
# - 'fec0::/10'
|
||||
|
||||
# List of IP address CIDR ranges that should be allowed for federation,
|
||||
# identity servers, push servers, and for checking key validity for
|
||||
# third-party invite events. This is useful for specifying exceptions to
|
||||
# wide-ranging blacklisted target IP ranges - e.g. for communication with
|
||||
# a push server only visible in your network.
|
||||
#
|
||||
# This whitelist overrides ip_range_blacklist and defaults to an empty
|
||||
# list.
|
||||
#
|
||||
#ip_range_whitelist:
|
||||
# - '192.168.1.1'
|
||||
|
||||
# List of ports that Synapse should listen on, their purpose and their
|
||||
# configuration.
|
||||
#
|
||||
@@ -714,6 +642,27 @@ acme:
|
||||
# - nyc.example.com
|
||||
# - syd.example.com
|
||||
|
||||
# Prevent federation requests from being sent to the following
|
||||
# blacklist IP address CIDR ranges. If this option is not specified, or
|
||||
# specified with an empty list, no ip range blacklist will be enforced.
|
||||
#
|
||||
# As of Synapse v1.4.0 this option also affects any outbound requests to identity
|
||||
# servers provided by user input.
|
||||
#
|
||||
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
|
||||
# listed here, since they correspond to unroutable addresses.)
|
||||
#
|
||||
federation_ip_range_blacklist:
|
||||
- '127.0.0.0/8'
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- '::1/128'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
|
||||
# Report prometheus metrics on the age of PDUs being sent to and received from
|
||||
# the following domains. This can be used to give an idea of "delay" on inbound
|
||||
# and outbound federation, though be aware that any delay can be due to problems
|
||||
@@ -725,12 +674,6 @@ acme:
|
||||
# - matrix.org
|
||||
# - example.com
|
||||
|
||||
# Uncomment to disable profile lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain profile data of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
|
||||
## Caching ##
|
||||
|
||||
@@ -856,9 +799,6 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
# users are joining rooms the server is already in (this is cheap) vs
|
||||
# "remote" for when users are trying to join rooms not on the server (which
|
||||
# can be more expensive)
|
||||
# - one for ratelimiting how often a user or IP can attempt to validate a 3PID.
|
||||
# - two for ratelimiting how often invites can be sent in a room or to a
|
||||
# specific user.
|
||||
#
|
||||
# The defaults are as shown below.
|
||||
#
|
||||
@@ -888,22 +828,11 @@ log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
#rc_joins:
|
||||
# local:
|
||||
# per_second: 0.1
|
||||
# burst_count: 10
|
||||
# burst_count: 3
|
||||
# remote:
|
||||
# per_second: 0.01
|
||||
# burst_count: 10
|
||||
#
|
||||
#rc_3pid_validation:
|
||||
# per_second: 0.003
|
||||
# burst_count: 5
|
||||
#
|
||||
#rc_invites:
|
||||
# per_room:
|
||||
# per_second: 0.3
|
||||
# burst_count: 10
|
||||
# per_user:
|
||||
# per_second: 0.003
|
||||
# burst_count: 5
|
||||
# burst_count: 3
|
||||
|
||||
|
||||
# Ratelimiting settings for incoming federation
|
||||
#
|
||||
@@ -1024,20 +953,10 @@ media_store_path: "DATADIR/media_store"
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
# - '100.64.0.0/10'
|
||||
# - '192.0.0.0/24'
|
||||
# - '169.254.0.0/16'
|
||||
# - '192.88.99.0/24'
|
||||
# - '198.18.0.0/15'
|
||||
# - '192.0.2.0/24'
|
||||
# - '198.51.100.0/24'
|
||||
# - '203.0.113.0/24'
|
||||
# - '224.0.0.0/4'
|
||||
# - '::1/128'
|
||||
# - 'fe80::/10'
|
||||
# - 'fe80::/64'
|
||||
# - 'fc00::/7'
|
||||
# - '2001:db8::/32'
|
||||
# - 'ff00::/8'
|
||||
# - 'fec0::/10'
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is allowed
|
||||
# to access even if they are specified in url_preview_ip_range_blacklist.
|
||||
@@ -1265,9 +1184,9 @@ account_validity:
|
||||
#
|
||||
#allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '^[^@]+@matrix\.org$'
|
||||
# pattern: '.*@matrix\.org'
|
||||
# - medium: email
|
||||
# pattern: '^[^@]+@vector\.im$'
|
||||
# pattern: '.*@vector\.im'
|
||||
# - medium: msisdn
|
||||
# pattern: '\+44'
|
||||
|
||||
@@ -1311,9 +1230,8 @@ account_validity:
|
||||
# email will be globally disabled.
|
||||
#
|
||||
# Additionally, if `msisdn` is not set, registration and password resets via msisdn
|
||||
# will be disabled regardless, and users will not be able to associate an msisdn
|
||||
# identifier to their account. This is due to Synapse currently not supporting
|
||||
# any method of sending SMS messages on its own.
|
||||
# will be disabled regardless. This is due to Synapse currently not supporting any
|
||||
# method of sending SMS messages on its own.
|
||||
#
|
||||
# To enable using an identity server for operations regarding a particular third-party
|
||||
# identifier type, set the value to the URL of that identity server as shown in the
|
||||
@@ -1358,8 +1276,6 @@ account_threepid_delegates:
|
||||
# By default, any room aliases included in this list will be created
|
||||
# as a publicly joinable room when the first user registers for the
|
||||
# homeserver. This behaviour can be customised with the settings below.
|
||||
# If the room already exists, make certain it is a publicly joinable
|
||||
# room. The join rule of the room must be set to 'public'.
|
||||
#
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
@@ -1470,31 +1386,14 @@ metrics_flags:
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
# Controls for the state that is shared with users who receive an invite
|
||||
# to a room
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
#
|
||||
room_prejoin_state:
|
||||
# By default, the following state event types are shared with users who
|
||||
# receive invites to the room:
|
||||
#
|
||||
# - m.room.join_rules
|
||||
# - m.room.canonical_alias
|
||||
# - m.room.avatar
|
||||
# - m.room.encryption
|
||||
# - m.room.name
|
||||
#
|
||||
# Uncomment the following to disable these defaults (so that only the event
|
||||
# types listed in 'additional_event_types' are shared). Defaults to 'false'.
|
||||
#
|
||||
#disable_default_event_types: true
|
||||
|
||||
# Additional state event types to share with users when they are invited
|
||||
# to a room.
|
||||
#
|
||||
# By default, this list is empty (so only the default event types are shared).
|
||||
#
|
||||
#additional_event_types:
|
||||
# - org.example.custom.event.type
|
||||
#room_invite_state_types:
|
||||
# - "m.room.join_rules"
|
||||
# - "m.room.canonical_alias"
|
||||
# - "m.room.avatar"
|
||||
# - "m.room.encryption"
|
||||
# - "m.room.name"
|
||||
|
||||
|
||||
# A list of application service config files to use
|
||||
@@ -1623,10 +1522,10 @@ trusted_key_servers:
|
||||
# enable SAML login.
|
||||
#
|
||||
# Once SAML support is enabled, a metadata file will be exposed at
|
||||
# https://<server>:<port>/_synapse/client/saml2/metadata.xml, which you may be able to
|
||||
# https://<server>:<port>/_matrix/saml2/metadata.xml, which you may be able to
|
||||
# use to configure your SAML IdP with. Alternatively, you can manually configure
|
||||
# the IdP to use an ACS location of
|
||||
# https://<server>:<port>/_synapse/client/saml2/authn_response.
|
||||
# https://<server>:<port>/_matrix/saml2/authn_response.
|
||||
#
|
||||
saml2_config:
|
||||
# `sp_config` is the configuration for the pysaml2 Service Provider.
|
||||
@@ -1646,12 +1545,6 @@ saml2_config:
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
|
||||
# Allowed clock difference in seconds between the homeserver and IdP.
|
||||
#
|
||||
# Uncomment the below to increase the accepted time difference from 0 to 3 seconds.
|
||||
#
|
||||
#accepted_time_diff: 3
|
||||
|
||||
# By default, the user has to go to our login page first. If you'd like
|
||||
# to allow IdP-initiated login, set 'allow_unsolicited: true' in a
|
||||
# 'service.sp' section:
|
||||
@@ -1774,197 +1667,141 @@ saml2_config:
|
||||
# - attribute: department
|
||||
# value: "sales"
|
||||
|
||||
# If the metadata XML contains multiple IdP entities then the `idp_entityid`
|
||||
# option must be set to the entity to redirect users to.
|
||||
#
|
||||
# Most deployments only have a single IdP entity and so should omit this
|
||||
# option.
|
||||
#
|
||||
#idp_entityid: 'https://our_idp/entityid'
|
||||
|
||||
|
||||
# List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration
|
||||
# and login.
|
||||
#
|
||||
# Options for each entry include:
|
||||
#
|
||||
# idp_id: a unique identifier for this identity provider. Used internally
|
||||
# by Synapse; should be a single word such as 'github'.
|
||||
#
|
||||
# Note that, if this is changed, users authenticating via that provider
|
||||
# will no longer be recognised as the same user!
|
||||
#
|
||||
# (Use "oidc" here if you are migrating from an old "oidc_config"
|
||||
# configuration.)
|
||||
#
|
||||
# idp_name: A user-facing name for this identity provider, which is used to
|
||||
# offer the user a choice of login mechanisms.
|
||||
#
|
||||
# idp_icon: An optional icon for this identity provider, which is presented
|
||||
# by clients and Synapse's own IdP picker page. If given, must be an
|
||||
# MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to
|
||||
# obtain such an MXC URI is to upload an image to an (unencrypted) room
|
||||
# and then copy the "url" from the source of the event.)
|
||||
#
|
||||
# idp_brand: An optional brand for this identity provider, allowing clients
|
||||
# to style the login flow according to the identity provider in question.
|
||||
# See the spec for possible options here.
|
||||
#
|
||||
# discover: set to 'false' to disable the use of the OIDC discovery mechanism
|
||||
# to discover endpoints. Defaults to true.
|
||||
#
|
||||
# issuer: Required. The OIDC issuer. Used to validate tokens and (if discovery
|
||||
# is enabled) to discover the provider's endpoints.
|
||||
#
|
||||
# client_id: Required. oauth2 client id to use.
|
||||
#
|
||||
# client_secret: oauth2 client secret to use. May be omitted if
|
||||
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
|
||||
#
|
||||
# client_secret_jwt_key: Alternative to client_secret: details of a key used
|
||||
# to create a JSON Web Token to be used as an OAuth2 client secret. If
|
||||
# given, must be a dictionary with the following properties:
|
||||
#
|
||||
# key: a pem-encoded signing key. Must be a suitable key for the
|
||||
# algorithm specified. Required unless 'key_file' is given.
|
||||
#
|
||||
# key_file: the path to file containing a pem-encoded signing key file.
|
||||
# Required unless 'key' is given.
|
||||
#
|
||||
# jwt_header: a dictionary giving properties to include in the JWT
|
||||
# header. Must include the key 'alg', giving the algorithm used to
|
||||
# sign the JWT, such as "ES256", using the JWA identifiers in
|
||||
# RFC7518.
|
||||
#
|
||||
# jwt_payload: an optional dictionary giving properties to include in
|
||||
# the JWT payload. Normally this should include an 'iss' key.
|
||||
#
|
||||
# client_auth_method: auth method to use when exchanging the token. Valid
|
||||
# values are 'client_secret_basic' (default), 'client_secret_post' and
|
||||
# 'none'.
|
||||
#
|
||||
# scopes: list of scopes to request. This should normally include the "openid"
|
||||
# scope. Defaults to ["openid"].
|
||||
#
|
||||
# authorization_endpoint: the oauth2 authorization endpoint. Required if
|
||||
# provider discovery is disabled.
|
||||
#
|
||||
# token_endpoint: the oauth2 token endpoint. Required if provider discovery is
|
||||
# disabled.
|
||||
#
|
||||
# userinfo_endpoint: the OIDC userinfo endpoint. Required if discovery is
|
||||
# disabled and the 'openid' scope is not requested.
|
||||
#
|
||||
# jwks_uri: URI where to fetch the JWKS. Required if discovery is disabled and
|
||||
# the 'openid' scope is used.
|
||||
#
|
||||
# skip_verification: set to 'true' to skip metadata verification. Use this if
|
||||
# you are connecting to a provider that is not OpenID Connect compliant.
|
||||
# Defaults to false. Avoid this in production.
|
||||
#
|
||||
# user_profile_method: Whether to fetch the user profile from the userinfo
|
||||
# endpoint. Valid values are: 'auto' or 'userinfo_endpoint'.
|
||||
#
|
||||
# Defaults to 'auto', which fetches the userinfo endpoint if 'openid' is
|
||||
# included in 'scopes'. Set to 'userinfo_endpoint' to always fetch the
|
||||
# userinfo endpoint.
|
||||
#
|
||||
# allow_existing_users: set to 'true' to allow a user logging in via OIDC to
|
||||
# match a pre-existing account instead of failing. This could be used if
|
||||
# switching from password logins to OIDC. Defaults to false.
|
||||
#
|
||||
# user_mapping_provider: Configuration for how attributes returned from a OIDC
|
||||
# provider are mapped onto a matrix user. This setting has the following
|
||||
# sub-properties:
|
||||
#
|
||||
# module: The class name of a custom mapping module. Default is
|
||||
# 'synapse.handlers.oidc_handler.JinjaOidcMappingProvider'.
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers
|
||||
# for information on implementing a custom mapping provider.
|
||||
#
|
||||
# config: Configuration for the mapping provider module. This section will
|
||||
# be passed as a Python dictionary to the user mapping provider
|
||||
# module's `parse_config` method.
|
||||
#
|
||||
# For the default provider, the following settings are available:
|
||||
#
|
||||
# subject_claim: name of the claim containing a unique identifier
|
||||
# for the user. Defaults to 'sub', which OpenID Connect
|
||||
# compliant providers should provide.
|
||||
#
|
||||
# localpart_template: Jinja2 template for the localpart of the MXID.
|
||||
# If this is not set, the user will be prompted to choose their
|
||||
# own username (see 'sso_auth_account_details.html' in the 'sso'
|
||||
# section of this file).
|
||||
#
|
||||
# display_name_template: Jinja2 template for the display name to set
|
||||
# on first login. If unset, no displayname will be set.
|
||||
#
|
||||
# email_template: Jinja2 template for the email address of the user.
|
||||
# If unset, no email address will be added to the account.
|
||||
#
|
||||
# extra_attributes: a map of Jinja2 templates for extra attributes
|
||||
# to send back to the client during login.
|
||||
# Note that these are non-standard and clients will ignore them
|
||||
# without modifications.
|
||||
#
|
||||
# When rendering, the Jinja2 templates are given a 'user' variable,
|
||||
# which is set to the claims returned by the UserInfo Endpoint and/or
|
||||
# in the ID Token.
|
||||
#
|
||||
# It is possible to configure Synapse to only allow logins if certain attributes
|
||||
# match particular values in the OIDC userinfo. The requirements can be listed under
|
||||
# `attribute_requirements` as shown below. All of the listed attributes must
|
||||
# match for the login to be permitted. Additional attributes can be added to
|
||||
# userinfo by expanding the `scopes` section of the OIDC config to retrieve
|
||||
# additional information from the OIDC provider.
|
||||
#
|
||||
# If the OIDC claim is a list, then the attribute must match any value in the list.
|
||||
# Otherwise, it must exactly match the value of the claim. Using the example
|
||||
# below, the `family_name` claim MUST be "Stephensson", but the `groups`
|
||||
# claim MUST contain "admin".
|
||||
#
|
||||
# attribute_requirements:
|
||||
# - attribute: family_name
|
||||
# value: "Stephensson"
|
||||
# - attribute: groups
|
||||
# value: "admin"
|
||||
# Enable OpenID Connect (OIDC) / OAuth 2.0 for registration and login.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/openid.md
|
||||
# for information on how to configure these options.
|
||||
# for some example configurations.
|
||||
#
|
||||
# For backwards compatibility, it is also possible to configure a single OIDC
|
||||
# provider via an 'oidc_config' setting. This is now deprecated and admins are
|
||||
# advised to migrate to the 'oidc_providers' format. (When doing that migration,
|
||||
# use 'oidc' for the idp_id to ensure that existing users continue to be
|
||||
# recognised.)
|
||||
#
|
||||
oidc_providers:
|
||||
# Generic example
|
||||
oidc_config:
|
||||
# Uncomment the following to enable authorization against an OpenID Connect
|
||||
# server. Defaults to false.
|
||||
#
|
||||
#- idp_id: my_idp
|
||||
# idp_name: "My OpenID provider"
|
||||
# idp_icon: "mxc://example.com/mediaid"
|
||||
# discover: false
|
||||
# issuer: "https://accounts.example.com/"
|
||||
# client_id: "provided-by-your-issuer"
|
||||
# client_secret: "provided-by-your-issuer"
|
||||
# client_auth_method: client_secret_post
|
||||
# scopes: ["openid", "profile"]
|
||||
# authorization_endpoint: "https://accounts.example.com/oauth2/auth"
|
||||
# token_endpoint: "https://accounts.example.com/oauth2/token"
|
||||
# userinfo_endpoint: "https://accounts.example.com/userinfo"
|
||||
# jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
|
||||
# skip_verification: true
|
||||
# user_mapping_provider:
|
||||
# config:
|
||||
# subject_claim: "id"
|
||||
# localpart_template: "{{ user.login }}"
|
||||
# display_name_template: "{{ user.name }}"
|
||||
# email_template: "{{ user.email }}"
|
||||
# attribute_requirements:
|
||||
# - attribute: userGroup
|
||||
# value: "synapseUsers"
|
||||
#enabled: true
|
||||
|
||||
# Uncomment the following to disable use of the OIDC discovery mechanism to
|
||||
# discover endpoints. Defaults to true.
|
||||
#
|
||||
#discover: false
|
||||
|
||||
# the OIDC issuer. Used to validate tokens and (if discovery is enabled) to
|
||||
# discover the provider's endpoints.
|
||||
#
|
||||
# Required if 'enabled' is true.
|
||||
#
|
||||
#issuer: "https://accounts.example.com/"
|
||||
|
||||
# oauth2 client id to use.
|
||||
#
|
||||
# Required if 'enabled' is true.
|
||||
#
|
||||
#client_id: "provided-by-your-issuer"
|
||||
|
||||
# oauth2 client secret to use.
|
||||
#
|
||||
# Required if 'enabled' is true.
|
||||
#
|
||||
#client_secret: "provided-by-your-issuer"
|
||||
|
||||
# auth method to use when exchanging the token.
|
||||
# Valid values are 'client_secret_basic' (default), 'client_secret_post' and
|
||||
# 'none'.
|
||||
#
|
||||
#client_auth_method: client_secret_post
|
||||
|
||||
# list of scopes to request. This should normally include the "openid" scope.
|
||||
# Defaults to ["openid"].
|
||||
#
|
||||
#scopes: ["openid", "profile"]
|
||||
|
||||
# the oauth2 authorization endpoint. Required if provider discovery is disabled.
|
||||
#
|
||||
#authorization_endpoint: "https://accounts.example.com/oauth2/auth"
|
||||
|
||||
# the oauth2 token endpoint. Required if provider discovery is disabled.
|
||||
#
|
||||
#token_endpoint: "https://accounts.example.com/oauth2/token"
|
||||
|
||||
# the OIDC userinfo endpoint. Required if discovery is disabled and the
|
||||
# "openid" scope is not requested.
|
||||
#
|
||||
#userinfo_endpoint: "https://accounts.example.com/userinfo"
|
||||
|
||||
# URI where to fetch the JWKS. Required if discovery is disabled and the
|
||||
# "openid" scope is used.
|
||||
#
|
||||
#jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
|
||||
|
||||
# Uncomment to skip metadata verification. Defaults to false.
|
||||
#
|
||||
# Use this if you are connecting to a provider that is not OpenID Connect
|
||||
# compliant.
|
||||
# Avoid this in production.
|
||||
#
|
||||
#skip_verification: true
|
||||
|
||||
# Whether to fetch the user profile from the userinfo endpoint. Valid
|
||||
# values are: "auto" or "userinfo_endpoint".
|
||||
#
|
||||
# Defaults to "auto", which fetches the userinfo endpoint if "openid" is included
|
||||
# in `scopes`. Uncomment the following to always fetch the userinfo endpoint.
|
||||
#
|
||||
#user_profile_method: "userinfo_endpoint"
|
||||
|
||||
# Uncomment to allow a user logging in via OIDC to match a pre-existing account instead
|
||||
# of failing. This could be used if switching from password logins to OIDC. Defaults to false.
|
||||
#
|
||||
#allow_existing_users: true
|
||||
|
||||
# An external module can be provided here as a custom solution to mapping
|
||||
# attributes returned from a OIDC provider onto a matrix user.
|
||||
#
|
||||
user_mapping_provider:
|
||||
# The custom module's class. Uncomment to use a custom module.
|
||||
# Default is 'synapse.handlers.oidc_handler.JinjaOidcMappingProvider'.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers
|
||||
# for information on implementing a custom mapping provider.
|
||||
#
|
||||
#module: mapping_provider.OidcMappingProvider
|
||||
|
||||
# Custom configuration values for the module. This section will be passed as
|
||||
# a Python dictionary to the user mapping provider module's `parse_config`
|
||||
# method.
|
||||
#
|
||||
# The examples below are intended for the default provider: they should be
|
||||
# changed if using a custom provider.
|
||||
#
|
||||
config:
|
||||
# name of the claim containing a unique identifier for the user.
|
||||
# Defaults to `sub`, which OpenID Connect compliant providers should provide.
|
||||
#
|
||||
#subject_claim: "sub"
|
||||
|
||||
# Jinja2 template for the localpart of the MXID.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * user: The claims returned by the UserInfo Endpoint and/or in the ID
|
||||
# Token
|
||||
#
|
||||
# This must be configured if using the default mapping provider.
|
||||
#
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
|
||||
# Jinja2 template for the display name to set on first login.
|
||||
#
|
||||
# If unset, no displayname will be set.
|
||||
#
|
||||
#display_name_template: "{{ user.given_name }} {{ user.last_name }}"
|
||||
|
||||
# Jinja2 templates for extra attributes to send back to the client during
|
||||
# login.
|
||||
#
|
||||
# Note that these are non-standard and clients will ignore them without modifications.
|
||||
#
|
||||
#extra_attributes:
|
||||
#birthdate: "{{ user.birthdate }}"
|
||||
|
||||
|
||||
|
||||
# Enable Central Authentication Service (CAS) for registration and login.
|
||||
@@ -1979,6 +1816,10 @@ cas_config:
|
||||
#
|
||||
#server_url: "https://cas-server.com"
|
||||
|
||||
# The public URL of the homeserver.
|
||||
#
|
||||
#service_url: "https://homeserver.domain.com:8448"
|
||||
|
||||
# The attribute of the CAS response to use as the display name.
|
||||
#
|
||||
# If unset, no displayname will be set.
|
||||
@@ -2021,135 +1862,41 @@ sso:
|
||||
# - https://my.custom.client/
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, or the files named below are not found within the template
|
||||
# directory, default templates from within the Synapse package will be used.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
|
||||
# If you *do* uncomment it, you will need to make sure that all the templates
|
||||
# below are in the directory.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
# * HTML page to prompt the user to choose an Identity Provider during
|
||||
# login: 'sso_login_idp_picker.html'.
|
||||
#
|
||||
# This is only used if multiple SSO Identity Providers are configured.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL that the user will be redirected to after
|
||||
# login.
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * providers: a list of available Identity Providers. Each element is
|
||||
# an object with the following attributes:
|
||||
#
|
||||
# * idp_id: unique identifier for the IdP
|
||||
# * idp_name: user-facing name for the IdP
|
||||
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
|
||||
# for the IdP
|
||||
# * idp_brand: if specified in the IdP config, a textual identifier
|
||||
# for the brand of the IdP
|
||||
#
|
||||
# The rendered HTML page should contain a form which submits its results
|
||||
# back as a GET request, with the following query parameters:
|
||||
#
|
||||
# * redirectUrl: the client redirect URI (ie, the `redirect_url` passed
|
||||
# to the template)
|
||||
#
|
||||
# * idp: the 'idp_id' of the chosen IDP.
|
||||
#
|
||||
# * HTML page to prompt new users to enter a userid and confirm other
|
||||
# details: 'sso_auth_account_details.html'. This is only shown if the
|
||||
# SSO implementation (with any user_mapping_provider) does not return
|
||||
# a localpart.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * idp: details of the SSO Identity Provider that the user logged in
|
||||
# with: an object with the following attributes:
|
||||
#
|
||||
# * idp_id: unique identifier for the IdP
|
||||
# * idp_name: user-facing name for the IdP
|
||||
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
|
||||
# for the IdP
|
||||
# * idp_brand: if specified in the IdP config, a textual identifier
|
||||
# for the brand of the IdP
|
||||
#
|
||||
# * user_attributes: an object containing details about the user that
|
||||
# we received from the IdP. May have the following attributes:
|
||||
#
|
||||
# * display_name: the user's display_name
|
||||
# * emails: a list of email addresses
|
||||
#
|
||||
# The template should render a form which submits the following fields:
|
||||
#
|
||||
# * username: the localpart of the user's chosen user id
|
||||
#
|
||||
# * HTML page allowing the user to consent to the server's terms and
|
||||
# conditions. This is only shown for new users, and only if
|
||||
# `user_consent.require_at_registration` is set.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * user_id: the user's matrix proposed ID.
|
||||
#
|
||||
# * user_profile.display_name: the user's proposed display name, if any.
|
||||
#
|
||||
# * consent_version: the version of the terms that the user will be
|
||||
# shown
|
||||
#
|
||||
# * terms_url: a link to the page showing the terms.
|
||||
#
|
||||
# The template should render a form which submits the following fields:
|
||||
#
|
||||
# * accepted_version: the version of the terms accepted by the user
|
||||
# (ie, 'consent_version' from the input variables).
|
||||
#
|
||||
# * HTML page for a confirmation step before redirecting back to the client
|
||||
# with the login token: 'sso_redirect_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
#
|
||||
# * redirect_url: the URL the user is about to be redirected to.
|
||||
# When rendering, this template is given three variables:
|
||||
# * redirect_url: the URL the user is about to be redirected to. Needs
|
||||
# manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
#
|
||||
# * display_url: the same as `redirect_url`, but with the query
|
||||
# parameters stripped. The intention is to have a
|
||||
# human-readable URL to show to users, not to use it as
|
||||
# the final address to redirect to.
|
||||
# the final address to redirect to. Needs manual escaping
|
||||
# (see https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * new_user: a boolean indicating whether this is the user's first time
|
||||
# logging in.
|
||||
#
|
||||
# * user_id: the user's matrix ID.
|
||||
#
|
||||
# * user_profile.avatar_url: an MXC URI for the user's avatar, if any.
|
||||
# None if the user has not set an avatar.
|
||||
#
|
||||
# * user_profile.display_name: the user's display name. None if the user
|
||||
# has not set a display name.
|
||||
#
|
||||
# * HTML page which notifies the user that they are authenticating to confirm
|
||||
# an operation on their account during the user interactive authentication
|
||||
# process: 'sso_auth_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL the user is about to be redirected to.
|
||||
# * redirect_url: the URL the user is about to be redirected to. Needs
|
||||
# manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
#
|
||||
# * description: the operation which the user is being asked to confirm
|
||||
#
|
||||
# * idp: details of the Identity Provider that we will use to confirm
|
||||
# the user's identity: an object with the following attributes:
|
||||
#
|
||||
# * idp_id: unique identifier for the IdP
|
||||
# * idp_name: user-facing name for the IdP
|
||||
# * idp_icon: if specified in the IdP config, an MXC URI for an icon
|
||||
# for the IdP
|
||||
# * idp_brand: if specified in the IdP config, a textual identifier
|
||||
# for the brand of the IdP
|
||||
#
|
||||
# * HTML page shown after a successful user interactive authentication session:
|
||||
# 'sso_auth_success.html'.
|
||||
#
|
||||
@@ -2158,14 +1905,6 @@ sso:
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# * HTML page shown after a user-interactive authentication session which
|
||||
# does not map correctly onto the expected user: 'sso_auth_bad_user.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * server_name: the homeserver's name.
|
||||
# * user_id_to_verify: the MXID of the user that we are trying to
|
||||
# validate.
|
||||
#
|
||||
# * HTML page shown during single sign-on if a deactivated user (according to Synapse's database)
|
||||
# attempts to login: 'sso_account_deactivated.html'.
|
||||
#
|
||||
@@ -2291,21 +2030,6 @@ password_config:
|
||||
#
|
||||
#require_uppercase: true
|
||||
|
||||
ui_auth:
|
||||
# The amount of time to allow a user-interactive authentication session
|
||||
# to be active.
|
||||
#
|
||||
# This defaults to 0, meaning the user is queried for their credentials
|
||||
# before every action, but this can be overridden to allow a single
|
||||
# validation to be re-used. This weakens the protections afforded by
|
||||
# the user-interactive authentication process, by allowing for multiple
|
||||
# (and potentially different) operations to use the same validation session.
|
||||
#
|
||||
# Uncomment below to allow for credential validation to last for 15
|
||||
# seconds.
|
||||
#
|
||||
#session_timeout: "15s"
|
||||
|
||||
|
||||
# Configuration for sending emails from Synapse.
|
||||
#
|
||||
@@ -2371,15 +2095,10 @@ email:
|
||||
#
|
||||
#validation_token_lifetime: 15m
|
||||
|
||||
# The web client location to direct users to during an invite. This is passed
|
||||
# to the identity server as the org.matrix.web_client_location key. Defaults
|
||||
# to unset, giving no guidance to the identity server.
|
||||
#
|
||||
#invite_client_location: https://app.element.io
|
||||
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, or the files named below are not found within the template
|
||||
# directory, default templates from within the Synapse package will be used.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# Do not uncomment this setting unless you want to customise the templates.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
@@ -2517,35 +2236,20 @@ password_providers:
|
||||
|
||||
|
||||
|
||||
## Push ##
|
||||
|
||||
push:
|
||||
# Clients requesting push notifications can either have the body of
|
||||
# the message sent in the notification poke along with other details
|
||||
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
# If clients choose the former, this option controls whether the
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
#
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
# The default value is "true" to include message details. Uncomment to only
|
||||
# include the event ID and room ID in push notification payloads.
|
||||
#
|
||||
#include_content: false
|
||||
|
||||
# When a push notification is received, an unread count is also sent.
|
||||
# This number can either be calculated as the number of unread messages
|
||||
# for the user, or the number of *rooms* the user has unread messages in.
|
||||
#
|
||||
# The default value is "true", meaning push clients will see the number of
|
||||
# rooms with unread messages in them. Uncomment to instead send the number
|
||||
# of unread messages.
|
||||
#
|
||||
#group_unread_count_by_room: false
|
||||
# Clients requesting push notifications can either have the body of
|
||||
# the message sent in the notification poke along with other details
|
||||
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
# If clients choose the former, this option controls whether the
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
#
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
#push:
|
||||
# include_content: true
|
||||
|
||||
|
||||
# Spam checkers are third-party modules that can block specific actions
|
||||
@@ -2588,41 +2292,25 @@ spam_checker:
|
||||
# If enabled, non server admins can only create groups with local parts
|
||||
# starting with this prefix
|
||||
#
|
||||
#group_creation_prefix: "unofficial_"
|
||||
#group_creation_prefix: "unofficial/"
|
||||
|
||||
|
||||
|
||||
# User Directory configuration
|
||||
#
|
||||
user_directory:
|
||||
# Defines whether users can search the user directory. If false then
|
||||
# empty responses are returned to all queries. Defaults to true.
|
||||
#
|
||||
# Uncomment to disable the user directory.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# Defines whether to search all users visible to your HS when searching
|
||||
# the user directory, rather than limiting to users visible in public
|
||||
# rooms. Defaults to false.
|
||||
#
|
||||
# If you set it true, you'll have to rebuild the user_directory search
|
||||
# indexes, see:
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||
#
|
||||
# Uncomment to return search results containing all known users, even if that
|
||||
# user does not share a room with the requester.
|
||||
#
|
||||
#search_all_users: true
|
||||
|
||||
# Defines whether to prefer local users in search query results.
|
||||
# If True, local users are more likely to appear above remote users
|
||||
# when searching the user directory. Defaults to false.
|
||||
#
|
||||
# Uncomment to prefer local over remote users in user directory search
|
||||
# results.
|
||||
#
|
||||
#prefer_local_users: true
|
||||
# 'enabled' defines whether users can search the user directory. If
|
||||
# false then empty responses are returned to all queries. Defaults to
|
||||
# true.
|
||||
#
|
||||
# 'search_all_users' defines whether to search all users visible to your HS
|
||||
# when searching the user directory, rather than limiting to users visible
|
||||
# in public rooms. Defaults to false. If you set it True, you'll have to
|
||||
# rebuild the user_directory search indexes, see
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||
#
|
||||
#user_directory:
|
||||
# enabled: true
|
||||
# search_all_users: false
|
||||
|
||||
|
||||
# User Consent configuration
|
||||
@@ -2677,20 +2365,19 @@ user_directory:
|
||||
|
||||
|
||||
|
||||
# Settings for local room and user statistics collection. See
|
||||
# docs/room_and_user_statistics.md.
|
||||
# Local statistics collection. Used in populating the room directory.
|
||||
#
|
||||
stats:
|
||||
# Uncomment the following to disable room and user statistics. Note that doing
|
||||
# so may cause certain features (such as the room directory) not to work
|
||||
# correctly.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
# The size of each timeslice in the room_stats_historical and
|
||||
# user_stats_historical tables, as a time period. Defaults to "1d".
|
||||
#
|
||||
#bucket_size: 1h
|
||||
# 'bucket_size' controls how large each statistics timeslice is. It can
|
||||
# be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
# 'retention' controls how long historical statistics will be kept for.
|
||||
# It can be defined in a human readable short form -- e.g. "1d", "1y".
|
||||
#
|
||||
#
|
||||
#stats:
|
||||
# enabled: true
|
||||
# bucket_size: 1d
|
||||
# retention: 1y
|
||||
|
||||
|
||||
# Server Notices room configuration
|
||||
@@ -2870,13 +2557,6 @@ opentracing:
|
||||
#
|
||||
#run_background_tasks_on: worker1
|
||||
|
||||
# A shared secret used by the replication APIs to authenticate HTTP requests
|
||||
# from workers.
|
||||
#
|
||||
# By default this is unused and traffic is not authenticated.
|
||||
#
|
||||
#worker_replication_secret: ""
|
||||
|
||||
|
||||
# Configuration for Redis when using workers. This *must* be enabled when
|
||||
# using workers (unless using old style direct TCP configuration).
|
||||
|
||||
@@ -14,7 +14,6 @@ The Python class is instantiated with two objects:
|
||||
* An instance of `synapse.module_api.ModuleApi`.
|
||||
|
||||
It then implements methods which return a boolean to alter behavior in Synapse.
|
||||
All the methods must be defined.
|
||||
|
||||
There's a generic method for checking every event (`check_event_for_spam`), as
|
||||
well as some specific methods:
|
||||
@@ -23,63 +22,38 @@ well as some specific methods:
|
||||
* `user_may_create_room`
|
||||
* `user_may_create_room_alias`
|
||||
* `user_may_publish_room`
|
||||
* `check_username_for_spam`
|
||||
* `check_registration_for_spam`
|
||||
* `check_media_file_for_spam`
|
||||
|
||||
The details of each of these methods (as well as their inputs and outputs)
|
||||
The details of the each of these methods (as well as their inputs and outputs)
|
||||
are documented in the `synapse.events.spamcheck.SpamChecker` class.
|
||||
|
||||
The `ModuleApi` class provides a way for the custom spam checker class to
|
||||
call back into the homeserver internals.
|
||||
|
||||
Additionally, a `parse_config` method is mandatory and receives the plugin config
|
||||
dictionary. After parsing, It must return an object which will be
|
||||
passed to `__init__` later.
|
||||
|
||||
### Example
|
||||
|
||||
```python
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
|
||||
class ExampleSpamChecker:
|
||||
def __init__(self, config, api):
|
||||
self.config = config
|
||||
self.api = api
|
||||
|
||||
@staticmethod
|
||||
def parse_config(config):
|
||||
return config
|
||||
|
||||
async def check_event_for_spam(self, foo):
|
||||
def check_event_for_spam(self, foo):
|
||||
return False # allow all events
|
||||
|
||||
async def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||
return True # allow all invites
|
||||
|
||||
async def user_may_create_room(self, userid):
|
||||
def user_may_create_room(self, userid):
|
||||
return True # allow all room creations
|
||||
|
||||
async def user_may_create_room_alias(self, userid, room_alias):
|
||||
def user_may_create_room_alias(self, userid, room_alias):
|
||||
return True # allow all room aliases
|
||||
|
||||
async def user_may_publish_room(self, userid, room_id):
|
||||
def user_may_publish_room(self, userid, room_id):
|
||||
return True # allow publishing of all rooms
|
||||
|
||||
async def check_username_for_spam(self, user_profile):
|
||||
def check_username_for_spam(self, user_profile):
|
||||
return False # allow all usernames
|
||||
|
||||
async def check_registration_for_spam(
|
||||
self,
|
||||
email_threepid,
|
||||
username,
|
||||
request_info,
|
||||
auth_provider_id,
|
||||
):
|
||||
return RegistrationBehaviour.ALLOW # allow all registrations
|
||||
|
||||
async def check_media_file_for_spam(self, file_wrapper, file_info):
|
||||
return False # allow all media
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -15,21 +15,8 @@ where SAML mapping providers come into play.
|
||||
SSO mapping providers are currently supported for OpenID and SAML SSO
|
||||
configurations. Please see the details below for how to implement your own.
|
||||
|
||||
It is up to the mapping provider whether the user should be assigned a predefined
|
||||
Matrix ID based on the SSO attributes, or if the user should be allowed to
|
||||
choose their own username.
|
||||
|
||||
In the first case - where users are automatically allocated a Matrix ID - it is
|
||||
the responsibility of the mapping provider to normalise the SSO attributes and
|
||||
map them to a valid Matrix ID. The [specification for Matrix
|
||||
IDs](https://matrix.org/docs/spec/appendices#user-identifiers) has some
|
||||
information about what is considered valid.
|
||||
|
||||
If the mapping provider does not assign a Matrix ID, then Synapse will
|
||||
automatically serve an HTML page allowing the user to pick their own username.
|
||||
|
||||
External mapping providers are provided to Synapse in the form of an external
|
||||
Python module. You can retrieve this module from [PyPI](https://pypi.org) or elsewhere,
|
||||
Python module. You can retrieve this module from [PyPi](https://pypi.org) or elsewhere,
|
||||
but it must be importable via Synapse (e.g. it must be in the same virtualenv
|
||||
as Synapse). The Synapse config is then modified to point to the mapping provider
|
||||
(and optionally provide additional configuration for it).
|
||||
@@ -69,26 +56,16 @@ A custom mapping provider must specify the following methods:
|
||||
information from.
|
||||
- This method must return a string, which is the unique identifier for the
|
||||
user. Commonly the ``sub`` claim of the response.
|
||||
* `map_user_attributes(self, userinfo, token, failures)`
|
||||
* `map_user_attributes(self, userinfo, token)`
|
||||
- This method must be async.
|
||||
- Arguments:
|
||||
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
|
||||
information from.
|
||||
- `token` - A dictionary which includes information necessary to make
|
||||
further requests to the OpenID provider.
|
||||
- `failures` - An `int` that represents the amount of times the returned
|
||||
mxid localpart mapping has failed. This should be used
|
||||
to create a deduplicated mxid localpart which should be
|
||||
returned instead. For example, if this method returns
|
||||
`john.doe` as the value of `localpart` in the returned
|
||||
dict, and that is already taken on the homeserver, this
|
||||
method will be called again with the same parameters but
|
||||
with failures=1. The method should then return a different
|
||||
`localpart` value, such as `john.doe1`.
|
||||
- Returns a dictionary with two keys:
|
||||
- `localpart`: A string, used to generate the Matrix ID. If this is
|
||||
`None`, the user is prompted to pick their own username.
|
||||
- `displayname`: An optional string, the display name for the user.
|
||||
- localpart: A required string, used to generate the Matrix ID.
|
||||
- displayname: An optional string, the display name for the user.
|
||||
* `get_extra_attributes(self, userinfo, token)`
|
||||
- This method must be async.
|
||||
- Arguments:
|
||||
@@ -123,13 +100,11 @@ comment these options out and use those specified by the module instead.
|
||||
|
||||
A custom mapping provider must specify the following methods:
|
||||
|
||||
* `__init__(self, parsed_config, module_api)`
|
||||
* `__init__(self, parsed_config)`
|
||||
- Arguments:
|
||||
- `parsed_config` - A configuration object that is the return value of the
|
||||
`parse_config` method. You should set any configuration options needed by
|
||||
the module here.
|
||||
- `module_api` - a `synapse.module_api.ModuleApi` object which provides the
|
||||
stable API available for extension modules.
|
||||
* `parse_config(config)`
|
||||
- This method should have the `@staticmethod` decoration.
|
||||
- Arguments:
|
||||
@@ -172,20 +147,12 @@ A custom mapping provider must specify the following methods:
|
||||
redirected to.
|
||||
- This method must return a dictionary, which will then be used by Synapse
|
||||
to build a new user. The following keys are allowed:
|
||||
* `mxid_localpart` - The mxid localpart of the new user. If this is
|
||||
`None`, the user is prompted to pick their own username.
|
||||
* `mxid_localpart` - Required. The mxid localpart of the new user.
|
||||
* `displayname` - The displayname of the new user. If not provided, will default to
|
||||
the value of `mxid_localpart`.
|
||||
* `emails` - A list of emails for the new user. If not provided, will
|
||||
default to an empty list.
|
||||
|
||||
Alternatively it can raise a `synapse.api.errors.RedirectException` to
|
||||
redirect the user to another page. This is useful to prompt the user for
|
||||
additional information, e.g. if you want them to provide their own username.
|
||||
It is the responsibility of the mapping provider to either redirect back
|
||||
to `client_redirect_url` (including any additional information) or to
|
||||
complete registration using methods from the `ModuleApi`.
|
||||
|
||||
### Default SAML Mapping Provider
|
||||
|
||||
Synapse has a built-in SAML mapping provider if a custom provider isn't
|
||||
|
||||
@@ -31,7 +31,7 @@ There is no need for a separate configuration file for the master process.
|
||||
1. Adjust synapse configuration files as above.
|
||||
1. Copy the `*.service` and `*.target` files in [system](system) to
|
||||
`/etc/systemd/system`.
|
||||
1. Run `systemctl daemon-reload` to tell systemd to load the new unit files.
|
||||
1. Run `systemctl deamon-reload` to tell systemd to load the new unit files.
|
||||
1. Run `systemctl enable matrix-synapse.service`. This will configure the
|
||||
synapse master process to be started as part of the `matrix-synapse.target`
|
||||
target.
|
||||
|
||||
@@ -4,7 +4,6 @@ AssertPathExists=/etc/matrix-synapse/workers/%i.yaml
|
||||
|
||||
# This service should be restarted when the synapse target is restarted.
|
||||
PartOf=matrix-synapse.target
|
||||
ReloadPropagatedFrom=matrix-synapse.target
|
||||
|
||||
# if this is started at the same time as the main, let the main process start
|
||||
# first, to initialise the database schema.
|
||||
|
||||
@@ -3,7 +3,6 @@ Description=Synapse master
|
||||
|
||||
# This service should be restarted when the synapse target is restarted.
|
||||
PartOf=matrix-synapse.target
|
||||
ReloadPropagatedFrom=matrix-synapse.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
|
||||
@@ -216,6 +216,14 @@ Asks the server for the current position of all streams.
|
||||
|
||||
This is used when a worker is shutting down.
|
||||
|
||||
#### FEDERATION_ACK (C)
|
||||
|
||||
Acknowledge receipt of some federation data
|
||||
|
||||
#### REMOVE_PUSHER (C)
|
||||
|
||||
Inform the server a pusher should be removed
|
||||
|
||||
### REMOTE_SERVER_UP (S, C)
|
||||
|
||||
Inform other processes that a remote server may have come back online.
|
||||
|
||||
@@ -42,10 +42,10 @@ This will install and start a systemd service called `coturn`.
|
||||
|
||||
./configure
|
||||
|
||||
You may need to install `libevent2`: if so, you should do so in
|
||||
the way recommended by your operating system. You can ignore
|
||||
warnings about lack of database support: a database is unnecessary
|
||||
for this purpose.
|
||||
> You may need to install `libevent2`: if so, you should do so in
|
||||
> the way recommended by your operating system. You can ignore
|
||||
> warnings about lack of database support: a database is unnecessary
|
||||
> for this purpose.
|
||||
|
||||
1. Build and install it:
|
||||
|
||||
@@ -66,19 +66,6 @@ This will install and start a systemd service called `coturn`.
|
||||
|
||||
pwgen -s 64 1
|
||||
|
||||
A `realm` must be specified, but its value is somewhat arbitrary. (It is
|
||||
sent to clients as part of the authentication flow.) It is conventional to
|
||||
set it to be your server name.
|
||||
|
||||
1. You will most likely want to configure coturn to write logs somewhere. The
|
||||
easiest way is normally to send them to the syslog:
|
||||
|
||||
syslog
|
||||
|
||||
(in which case, the logs will be available via `journalctl -u coturn` on a
|
||||
systemd system). Alternatively, coturn can be configured to write to a
|
||||
logfile - check the example config file supplied with coturn.
|
||||
|
||||
1. Consider your security settings. TURN lets users request a relay which will
|
||||
connect to arbitrary IP addresses and ports. The following configuration is
|
||||
suggested as a minimum starting point:
|
||||
@@ -109,31 +96,11 @@ This will install and start a systemd service called `coturn`.
|
||||
# TLS private key file
|
||||
pkey=/path/to/privkey.pem
|
||||
|
||||
In this case, replace the `turn:` schemes in the `turn_uri` settings below
|
||||
with `turns:`.
|
||||
|
||||
We recommend that you only try to set up TLS/DTLS once you have set up a
|
||||
basic installation and got it working.
|
||||
|
||||
1. Ensure your firewall allows traffic into the TURN server on the ports
|
||||
you've configured it to listen on (By default: 3478 and 5349 for TURN
|
||||
you've configured it to listen on (By default: 3478 and 5349 for the TURN(s)
|
||||
traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
|
||||
for the UDP relay.)
|
||||
|
||||
1. We do not recommend running a TURN server behind NAT, and are not aware of
|
||||
anyone doing so successfully.
|
||||
|
||||
If you want to try it anyway, you will at least need to tell coturn its
|
||||
external IP address:
|
||||
|
||||
external-ip=192.88.99.1
|
||||
|
||||
... and your NAT gateway must forward all of the relayed ports directly
|
||||
(eg, port 56789 on the external IP must be always be forwarded to port
|
||||
56789 on the internal IP).
|
||||
|
||||
If you get this working, let us know!
|
||||
|
||||
1. (Re)start the turn server:
|
||||
|
||||
* If you used the Debian package (or have set up a systemd unit yourself):
|
||||
@@ -170,10 +137,9 @@ Your home server configuration file needs the following extra keys:
|
||||
without having gone through a CAPTCHA or similar to register a
|
||||
real account.
|
||||
|
||||
As an example, here is the relevant section of the config file for `matrix.org`. The
|
||||
`turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS.
|
||||
As an example, here is the relevant section of the config file for matrix.org:
|
||||
|
||||
turn_uris: [ "turn:turn.matrix.org?transport=udp", "turn:turn.matrix.org?transport=tcp" ]
|
||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||
turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons"
|
||||
turn_user_lifetime: 86400000
|
||||
turn_allow_guests: True
|
||||
@@ -187,94 +153,7 @@ After updating the homeserver configuration, you must restart synapse:
|
||||
```
|
||||
* If you use systemd:
|
||||
```
|
||||
systemctl restart matrix-synapse.service
|
||||
systemctl restart synapse.service
|
||||
```
|
||||
... and then reload any clients (or wait an hour for them to refresh their
|
||||
settings).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
The normal symptoms of a misconfigured TURN server are that calls between
|
||||
devices on different networks ring, but get stuck at "call
|
||||
connecting". Unfortunately, troubleshooting this can be tricky.
|
||||
|
||||
Here are a few things to try:
|
||||
|
||||
* Check that your TURN server is not behind NAT. As above, we're not aware of
|
||||
anyone who has successfully set this up.
|
||||
|
||||
* Check that you have opened your firewall to allow TCP and UDP traffic to the
|
||||
TURN ports (normally 3478 and 5479).
|
||||
|
||||
* Check that you have opened your firewall to allow UDP traffic to the UDP
|
||||
relay ports (49152-65535 by default).
|
||||
|
||||
* Some WebRTC implementations (notably, that of Google Chrome) appear to get
|
||||
confused by TURN servers which are reachable over IPv6 (this appears to be
|
||||
an unexpected side-effect of its handling of multiple IP addresses as
|
||||
defined by
|
||||
[`draft-ietf-rtcweb-ip-handling`](https://tools.ietf.org/html/draft-ietf-rtcweb-ip-handling-12)).
|
||||
|
||||
Try removing any AAAA records for your TURN server, so that it is only
|
||||
reachable over IPv4.
|
||||
|
||||
* Enable more verbose logging in coturn via the `verbose` setting:
|
||||
|
||||
```
|
||||
verbose
|
||||
```
|
||||
|
||||
... and then see if there are any clues in its logs.
|
||||
|
||||
* If you are using a browser-based client under Chrome, check
|
||||
`chrome://webrtc-internals/` for insights into the internals of the
|
||||
negotiation. On Firefox, check the "Connection Log" on `about:webrtc`.
|
||||
|
||||
(Understanding the output is beyond the scope of this document!)
|
||||
|
||||
* You can test your Matrix homeserver TURN setup with https://test.voip.librepush.net/.
|
||||
Note that this test is not fully reliable yet, so don't be discouraged if
|
||||
the test fails.
|
||||
[Here](https://github.com/matrix-org/voip-tester) is the github repo of the
|
||||
source of the tester, where you can file bug reports.
|
||||
|
||||
* There is a WebRTC test tool at
|
||||
https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/. To
|
||||
use it, you will need a username/password for your TURN server. You can
|
||||
either:
|
||||
|
||||
* look for the `GET /_matrix/client/r0/voip/turnServer` request made by a
|
||||
matrix client to your homeserver in your browser's network inspector. In
|
||||
the response you should see `username` and `password`. Or:
|
||||
|
||||
* Use the following shell commands:
|
||||
|
||||
```sh
|
||||
secret=staticAuthSecretHere
|
||||
|
||||
u=$((`date +%s` + 3600)):test
|
||||
p=$(echo -n $u | openssl dgst -hmac $secret -sha1 -binary | base64)
|
||||
echo -e "username: $u\npassword: $p"
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
* Temporarily configure coturn to accept a static username/password. To do
|
||||
this, comment out `use-auth-secret` and `static-auth-secret` and add the
|
||||
following:
|
||||
|
||||
```
|
||||
lt-cred-mech
|
||||
user=username:password
|
||||
```
|
||||
|
||||
**Note**: these settings will not take effect unless `use-auth-secret`
|
||||
and `static-auth-secret` are disabled.
|
||||
|
||||
Restart coturn after changing the configuration file.
|
||||
|
||||
Remember to restore the original settings to go back to testing with
|
||||
Matrix clients!
|
||||
|
||||
If the TURN server is working correctly, you should see at least one `relay`
|
||||
entry in the results.
|
||||
..and your Home Server now supports VoIP relaying!
|
||||
|
||||
@@ -16,9 +16,6 @@ workers only work with PostgreSQL-based Synapse deployments. SQLite should only
|
||||
be used for demo purposes and any admin considering workers should already be
|
||||
running PostgreSQL.
|
||||
|
||||
See also https://matrix.org/blog/2020/11/03/how-we-fixed-synapses-scalability
|
||||
for a higher level overview.
|
||||
|
||||
## Main process/worker communication
|
||||
|
||||
The processes communicate with each other via a Synapse-specific protocol called
|
||||
@@ -40,9 +37,6 @@ which relays replication commands between processes. This can give a significant
|
||||
cpu saving on the main process and will be a prerequisite for upcoming
|
||||
performance improvements.
|
||||
|
||||
If Redis support is enabled Synapse will use it as a shared cache, as well as a
|
||||
pub/sub mechanism.
|
||||
|
||||
See the [Architectural diagram](#architectural-diagram) section at the end for
|
||||
a visualisation of what this looks like.
|
||||
|
||||
@@ -62,7 +56,7 @@ The appropriate dependencies must also be installed for Synapse. If using a
|
||||
virtualenv, these can be installed with:
|
||||
|
||||
```sh
|
||||
pip install "matrix-synapse[redis]"
|
||||
pip install matrix-synapse[redis]
|
||||
```
|
||||
|
||||
Note that these dependencies are included when synapse is installed with `pip
|
||||
@@ -95,8 +89,7 @@ shared configuration file.
|
||||
Normally, only a couple of changes are needed to make an existing configuration
|
||||
file suitable for use with workers. First, you need to enable an "HTTP replication
|
||||
listener" for the main process; and secondly, you need to enable redis-based
|
||||
replication. Optionally, a shared secret can be used to authenticate HTTP
|
||||
traffic between workers. For example:
|
||||
replication. For example:
|
||||
|
||||
|
||||
```yaml
|
||||
@@ -110,9 +103,6 @@ listeners:
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
# Add a random shared secret to authenticate traffic.
|
||||
worker_replication_secret: ""
|
||||
|
||||
redis:
|
||||
enabled: true
|
||||
```
|
||||
@@ -220,7 +210,6 @@ expressions:
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/devices$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/query$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
|
||||
^/_matrix/client/versions$
|
||||
@@ -228,13 +217,14 @@ expressions:
|
||||
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
|
||||
^/_synapse/client/password_reset/email/submit_token$
|
||||
|
||||
# Registration/login requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||
^/_matrix/client/(r0|unstable)/register$
|
||||
^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$
|
||||
|
||||
# Event sending requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||
@@ -257,30 +247,25 @@ Additionally, the following endpoints should be included if Synapse is configure
|
||||
to use SSO (you only need to include the ones for whichever SSO provider you're
|
||||
using):
|
||||
|
||||
# for all SSO providers
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login/sso/redirect
|
||||
^/_synapse/client/pick_idp$
|
||||
^/_synapse/client/pick_username
|
||||
^/_synapse/client/new_user_consent$
|
||||
^/_synapse/client/sso_register$
|
||||
|
||||
# OpenID Connect requests.
|
||||
^/_synapse/client/oidc/callback$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login/sso/redirect$
|
||||
^/_synapse/oidc/callback$
|
||||
|
||||
# SAML requests.
|
||||
^/_synapse/client/saml2/authn_response$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login/sso/redirect$
|
||||
^/_matrix/saml2/authn_response$
|
||||
|
||||
# CAS requests.
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login/(cas|sso)/redirect$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login/cas/ticket$
|
||||
|
||||
Ensure that all SSO logins go to a single process.
|
||||
For multiple workers not handling the SSO endpoints properly, see
|
||||
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
|
||||
[#9427](https://github.com/matrix-org/synapse/issues/9427).
|
||||
|
||||
Note that a HTTP listener with `client` and `federation` resources must be
|
||||
configured in the `worker_listeners` option in the worker config.
|
||||
|
||||
Ensure that all SSO logins go to a single process (usually the main process).
|
||||
For multiple workers not handling the SSO endpoints properly, see
|
||||
[#7530](https://github.com/matrix-org/synapse/issues/7530).
|
||||
|
||||
#### Load balancing
|
||||
|
||||
It is possible to run multiple instances of this worker app, with incoming requests
|
||||
@@ -373,15 +358,7 @@ Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
REST endpoints itself, but you should set `start_pushers: False` in the
|
||||
shared configuration file to stop the main synapse sending push notifications.
|
||||
|
||||
To run multiple instances at once the `pusher_instances` option should list all
|
||||
pusher instances by their worker name, e.g.:
|
||||
|
||||
```yaml
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
- pusher_worker2
|
||||
```
|
||||
|
||||
Note this worker cannot be load-balanced: only one instance should be active.
|
||||
|
||||
### `synapse.app.appservice`
|
||||
|
||||
|
||||
81
mypy.ini
81
mypy.ini
@@ -1,85 +1,80 @@
|
||||
[mypy]
|
||||
namespace_packages = True
|
||||
plugins = mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
|
||||
follow_imports = normal
|
||||
follow_imports = silent
|
||||
check_untyped_defs = True
|
||||
show_error_codes = True
|
||||
show_traceback = True
|
||||
mypy_path = stubs
|
||||
warn_unreachable = True
|
||||
local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
|
||||
# To find all folders that pass mypy you run:
|
||||
#
|
||||
# find synapse/* -type d -not -name __pycache__ -exec bash -c "mypy '{}' > /dev/null" \; -print
|
||||
|
||||
files =
|
||||
scripts-dev/sign_json,
|
||||
synapse/api,
|
||||
synapse/appservice,
|
||||
synapse/config,
|
||||
synapse/crypto,
|
||||
synapse/event_auth.py,
|
||||
synapse/events/builder.py,
|
||||
synapse/events/spamcheck.py,
|
||||
synapse/events/third_party_rules.py,
|
||||
synapse/events/validator.py,
|
||||
synapse/events/spamcheck.py,
|
||||
synapse/federation,
|
||||
synapse/groups,
|
||||
synapse/handlers,
|
||||
synapse/http/client.py,
|
||||
synapse/http/federation/matrix_federation_agent.py,
|
||||
synapse/handlers/_base.py,
|
||||
synapse/handlers/account_data.py,
|
||||
synapse/handlers/account_validity.py,
|
||||
synapse/handlers/appservice.py,
|
||||
synapse/handlers/auth.py,
|
||||
synapse/handlers/cas_handler.py,
|
||||
synapse/handlers/deactivate_account.py,
|
||||
synapse/handlers/device.py,
|
||||
synapse/handlers/devicemessage.py,
|
||||
synapse/handlers/directory.py,
|
||||
synapse/handlers/events.py,
|
||||
synapse/handlers/federation.py,
|
||||
synapse/handlers/identity.py,
|
||||
synapse/handlers/initial_sync.py,
|
||||
synapse/handlers/message.py,
|
||||
synapse/handlers/oidc_handler.py,
|
||||
synapse/handlers/pagination.py,
|
||||
synapse/handlers/password_policy.py,
|
||||
synapse/handlers/presence.py,
|
||||
synapse/handlers/profile.py,
|
||||
synapse/handlers/read_marker.py,
|
||||
synapse/handlers/room.py,
|
||||
synapse/handlers/room_member.py,
|
||||
synapse/handlers/room_member_worker.py,
|
||||
synapse/handlers/saml_handler.py,
|
||||
synapse/handlers/sync.py,
|
||||
synapse/handlers/ui_auth,
|
||||
synapse/http/federation/well_known_resolver.py,
|
||||
synapse/http/matrixfederationclient.py,
|
||||
synapse/http/server.py,
|
||||
synapse/http/site.py,
|
||||
synapse/logging,
|
||||
synapse/metrics,
|
||||
synapse/module_api,
|
||||
synapse/notifier.py,
|
||||
synapse/push,
|
||||
synapse/push/pusherpool.py,
|
||||
synapse/push/push_rule_evaluator.py,
|
||||
synapse/replication,
|
||||
synapse/rest,
|
||||
synapse/secrets.py,
|
||||
synapse/server.py,
|
||||
synapse/server_notices,
|
||||
synapse/spam_checker_api,
|
||||
synapse/state,
|
||||
synapse/storage/__init__.py,
|
||||
synapse/storage/_base.py,
|
||||
synapse/storage/background_updates.py,
|
||||
synapse/storage/databases/main/appservice.py,
|
||||
synapse/storage/databases/main/events.py,
|
||||
synapse/storage/databases/main/keys.py,
|
||||
synapse/storage/databases/main/pusher.py,
|
||||
synapse/storage/databases/main/registration.py,
|
||||
synapse/storage/databases/main/stream.py,
|
||||
synapse/storage/databases/main/ui_auth.py,
|
||||
synapse/storage/database.py,
|
||||
synapse/storage/engines,
|
||||
synapse/storage/keys.py,
|
||||
synapse/storage/persist_events.py,
|
||||
synapse/storage/prepare_database.py,
|
||||
synapse/storage/purge_events.py,
|
||||
synapse/storage/push_rule.py,
|
||||
synapse/storage/relations.py,
|
||||
synapse/storage/roommember.py,
|
||||
synapse/storage/state.py,
|
||||
synapse/storage/types.py,
|
||||
synapse/storage/util,
|
||||
synapse/streams,
|
||||
synapse/types.py,
|
||||
synapse/util/async_helpers.py,
|
||||
synapse/util/caches,
|
||||
synapse/util/metrics.py,
|
||||
synapse/util/macaroons.py,
|
||||
synapse/util/stringutils.py,
|
||||
synapse/visibility.py,
|
||||
tests/replication,
|
||||
tests/test_utils,
|
||||
tests/handlers/test_password_providers.py,
|
||||
tests/rest/client/v1/test_login.py,
|
||||
tests/rest/client/v2_alpha/test_auth.py,
|
||||
tests/util/test_stream_change_cache.py
|
||||
|
||||
@@ -107,13 +102,10 @@ ignore_missing_imports = True
|
||||
[mypy-h11]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-msgpack]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-opentracing]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-OpenSSL.*]
|
||||
[mypy-OpenSSL]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-netaddr]
|
||||
@@ -122,6 +114,9 @@ ignore_missing_imports = True
|
||||
[mypy-saml2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-unpaddedbase64]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-canonicaljson]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -166,9 +161,3 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-hiredis]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-josepy.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-txacme.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
showcontent = true
|
||||
|
||||
[tool.black]
|
||||
target-version = ['py36']
|
||||
target-version = ['py35']
|
||||
exclude = '''
|
||||
|
||||
(
|
||||
|
||||
@@ -18,9 +18,11 @@ import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
DISTS = (
|
||||
"debian:stretch",
|
||||
"debian:buster",
|
||||
"debian:bullseye",
|
||||
"debian:sid",
|
||||
"ubuntu:xenial",
|
||||
"ubuntu:bionic",
|
||||
"ubuntu:focal",
|
||||
"ubuntu:groovy",
|
||||
@@ -41,7 +43,7 @@ class Builder(object):
|
||||
self._lock = threading.Lock()
|
||||
self._failed = False
|
||||
|
||||
def run_build(self, dist, skip_tests=False):
|
||||
def run_build(self, dist):
|
||||
"""Build deb for a single distribution"""
|
||||
|
||||
if self._failed:
|
||||
@@ -49,13 +51,13 @@ class Builder(object):
|
||||
raise Exception("failed")
|
||||
|
||||
try:
|
||||
self._inner_build(dist, skip_tests)
|
||||
self._inner_build(dist)
|
||||
except Exception as e:
|
||||
print("build of %s failed: %s" % (dist, e), file=sys.stderr)
|
||||
self._failed = True
|
||||
raise
|
||||
|
||||
def _inner_build(self, dist, skip_tests=False):
|
||||
def _inner_build(self, dist):
|
||||
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
os.chdir(projdir)
|
||||
|
||||
@@ -99,7 +101,6 @@ class Builder(object):
|
||||
"--volume=" + debsdir + ":/debs",
|
||||
"-e", "TARGET_USERID=%i" % (os.getuid(), ),
|
||||
"-e", "TARGET_GROUPID=%i" % (os.getgid(), ),
|
||||
"-e", "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
|
||||
"dh-venv-builder:" + tag,
|
||||
], stdout=stdout, stderr=subprocess.STDOUT)
|
||||
|
||||
@@ -123,7 +124,7 @@ class Builder(object):
|
||||
self.active_containers.remove(c)
|
||||
|
||||
|
||||
def run_builds(dists, jobs=1, skip_tests=False):
|
||||
def run_builds(dists, jobs=1):
|
||||
builder = Builder(redirect_stdout=(jobs > 1))
|
||||
|
||||
def sig(signum, _frame):
|
||||
@@ -132,7 +133,7 @@ def run_builds(dists, jobs=1, skip_tests=False):
|
||||
signal.signal(signal.SIGINT, sig)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=jobs) as e:
|
||||
res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)
|
||||
res = e.map(builder.run_build, dists)
|
||||
|
||||
# make sure we consume the iterable so that exceptions are raised.
|
||||
for r in res:
|
||||
@@ -147,13 +148,9 @@ if __name__ == '__main__':
|
||||
'-j', '--jobs', type=int, default=1,
|
||||
help='specify the number of builds to run in parallel',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--no-check', action='store_true',
|
||||
help='skip running tests after building',
|
||||
)
|
||||
parser.add_argument(
|
||||
'dist', nargs='*', default=DISTS,
|
||||
help='a list of distributions to build for. Default: %(default)s',
|
||||
)
|
||||
args = parser.parse_args()
|
||||
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
|
||||
run_builds(dists=args.dist, jobs=args.jobs)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
#
|
||||
# A script which checks that an appropriate news file has been added on this
|
||||
# branch.
|
||||
|
||||
@@ -1,49 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
#! /bin/bash -eu
|
||||
# This script is designed for developers who want to test their code
|
||||
# against Complement.
|
||||
#
|
||||
# It makes a Synapse image which represents the current checkout,
|
||||
# builds a synapse-complement image on top, then runs tests with it.
|
||||
#
|
||||
# By default the script will fetch the latest Complement master branch and
|
||||
# run tests with that. This can be overridden to use a custom Complement
|
||||
# checkout by setting the COMPLEMENT_DIR environment variable to the
|
||||
# filepath of a local Complement checkout.
|
||||
#
|
||||
# A regular expression of test method names can be supplied as the first
|
||||
# argument to the script. Complement will then only run those tests. If
|
||||
# no regex is supplied, all tests are run. For example;
|
||||
#
|
||||
# ./complement.sh "TestOutboundFederation(Profile|Send)"
|
||||
#
|
||||
# then downloads Complement and runs it with that image.
|
||||
|
||||
# Exit if a line returns a non-zero exit code
|
||||
set -e
|
||||
|
||||
# Change to the repository root
|
||||
cd "$(dirname $0)/.."
|
||||
|
||||
# Check for a user-specified Complement checkout
|
||||
if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
echo "COMPLEMENT_DIR not set. Fetching the latest Complement checkout..."
|
||||
wget -Nq https://github.com/matrix-org/complement/archive/master.tar.gz
|
||||
tar -xzf master.tar.gz
|
||||
COMPLEMENT_DIR=complement-master
|
||||
echo "Checkout available at 'complement-master'"
|
||||
fi
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
# Build the Synapse monolith image from Complement, based on the above image we just built
|
||||
docker build -t complement-synapse -f "$COMPLEMENT_DIR/dockerfiles/Synapse.Dockerfile" "$COMPLEMENT_DIR/dockerfiles"
|
||||
docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
|
||||
|
||||
cd "$COMPLEMENT_DIR"
|
||||
# Download Complement
|
||||
wget -N https://github.com/matrix-org/complement/archive/master.tar.gz
|
||||
tar -xzf master.tar.gz
|
||||
cd complement-master
|
||||
|
||||
EXTRA_COMPLEMENT_ARGS=""
|
||||
if [[ -n "$1" ]]; then
|
||||
# A test name regex has been set, supply it to Complement
|
||||
EXTRA_COMPLEMENT_ARGS+="-run $1 "
|
||||
fi
|
||||
# Build the Synapse image from Complement, based on the above image we just built
|
||||
docker build -t complement-synapse -f dockerfiles/Synapse.Dockerfile ./dockerfiles
|
||||
|
||||
# Run the tests!
|
||||
COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
|
||||
# Run the tests on the resulting image!
|
||||
COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -count=1 ./tests
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
# Find linting errors in Synapse's default config file.
|
||||
# Exits with 0 if there are no problems, or another code otherwise.
|
||||
|
||||
# cd to the root of the repository
|
||||
cd `dirname $0`/..
|
||||
|
||||
# Restore backup of sample config upon script exit
|
||||
trap "mv docs/sample_config.yaml.bak docs/sample_config.yaml" EXIT
|
||||
|
||||
# Fix non-lowercase true/false values
|
||||
sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml
|
||||
rm docs/sample_config.yaml.bak
|
||||
|
||||
# Check if anything changed
|
||||
diff docs/sample_config.yaml docs/sample_config.yaml.bak
|
||||
git diff --exit-code docs/sample_config.yaml
|
||||
|
||||
@@ -22,8 +22,8 @@ import sys
|
||||
from typing import Any, Optional
|
||||
from urllib import parse as urlparse
|
||||
|
||||
import nacl.signing
|
||||
import requests
|
||||
import signedjson.key
|
||||
import signedjson.types
|
||||
import srvlookup
|
||||
import yaml
|
||||
@@ -44,6 +44,18 @@ def encode_base64(input_bytes):
|
||||
return output_string
|
||||
|
||||
|
||||
def decode_base64(input_string):
|
||||
"""Decode a base64 string to bytes inferring padding from the length of the
|
||||
string."""
|
||||
|
||||
input_bytes = input_string.encode("ascii")
|
||||
input_len = len(input_bytes)
|
||||
padding = b"=" * (3 - ((input_len + 3) % 4))
|
||||
output_len = 3 * ((input_len + 2) // 4) + (input_len + 2) % 4 - 2
|
||||
output_bytes = base64.b64decode(input_bytes + padding)
|
||||
return output_bytes[:output_len]
|
||||
|
||||
|
||||
def encode_canonical_json(value):
|
||||
return json.dumps(
|
||||
value,
|
||||
@@ -76,6 +88,42 @@ def sign_json(
|
||||
return json_object
|
||||
|
||||
|
||||
NACL_ED25519 = "ed25519"
|
||||
|
||||
|
||||
def decode_signing_key_base64(algorithm, version, key_base64):
|
||||
"""Decode a base64 encoded signing key
|
||||
Args:
|
||||
algorithm (str): The algorithm the key is for (currently "ed25519").
|
||||
version (str): Identifies this key out of the keys for this entity.
|
||||
key_base64 (str): Base64 encoded bytes of the key.
|
||||
Returns:
|
||||
A SigningKey object.
|
||||
"""
|
||||
if algorithm == NACL_ED25519:
|
||||
key_bytes = decode_base64(key_base64)
|
||||
key = nacl.signing.SigningKey(key_bytes)
|
||||
key.version = version
|
||||
key.alg = NACL_ED25519
|
||||
return key
|
||||
else:
|
||||
raise ValueError("Unsupported algorithm %s" % (algorithm,))
|
||||
|
||||
|
||||
def read_signing_keys(stream):
|
||||
"""Reads a list of keys from a stream
|
||||
Args:
|
||||
stream : A stream to iterate for keys.
|
||||
Returns:
|
||||
list of SigningKey objects.
|
||||
"""
|
||||
keys = []
|
||||
for line in stream:
|
||||
algorithm, version, key_base64 = line.split()
|
||||
keys.append(decode_signing_key_base64(algorithm, version, key_base64))
|
||||
return keys
|
||||
|
||||
|
||||
def request(
|
||||
method: Optional[str],
|
||||
origin_name: str,
|
||||
@@ -175,28 +223,23 @@ def main():
|
||||
parser.add_argument("--body", help="Data to send as the body of the HTTP request")
|
||||
|
||||
parser.add_argument(
|
||||
"path", help="request path, including the '/_matrix/federation/...' prefix."
|
||||
"path", help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
args.signing_key = None
|
||||
if args.signing_key_path:
|
||||
with open(args.signing_key_path) as f:
|
||||
args.signing_key = f.readline()
|
||||
|
||||
if not args.server_name or not args.signing_key:
|
||||
if not args.server_name or not args.signing_key_path:
|
||||
read_args_from_config(args)
|
||||
|
||||
algorithm, version, key_base64 = args.signing_key.split()
|
||||
key = signedjson.key.decode_signing_key_base64(algorithm, version, key_base64)
|
||||
with open(args.signing_key_path) as f:
|
||||
key = read_signing_keys(f)[0]
|
||||
|
||||
result = request(
|
||||
args.method,
|
||||
args.server_name,
|
||||
key,
|
||||
args.destination,
|
||||
args.path,
|
||||
"/_matrix/federation/v1/" + args.path,
|
||||
content=args.body,
|
||||
)
|
||||
|
||||
@@ -212,16 +255,10 @@ def main():
|
||||
def read_args_from_config(args):
|
||||
with open(args.config, "r") as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
|
||||
if not args.server_name:
|
||||
args.server_name = config["server_name"]
|
||||
|
||||
if not args.signing_key:
|
||||
if "signing_key" in config:
|
||||
args.signing_key = config["signing_key"]
|
||||
else:
|
||||
with open(config["signing_key_path"]) as f:
|
||||
args.signing_key = f.readline()
|
||||
if not args.signing_key_path:
|
||||
args.signing_key_path = config["signing_key_path"]
|
||||
|
||||
|
||||
class MatrixConnectionAdapter(HTTPAdapter):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
#
|
||||
# Update/check the docs/sample_config.yaml
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
#
|
||||
# Runs linting scripts over the local Synapse checkout
|
||||
# isort - sorts import statements
|
||||
@@ -80,8 +80,7 @@ else
|
||||
# then lint everything!
|
||||
if [[ -z ${files+x} ]]; then
|
||||
# Lint all source code files and directories
|
||||
# Note: this list aims the mirror the one in tox.ini
|
||||
files=("synapse" "docker" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite")
|
||||
files=("synapse" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py" "synmark")
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script generates SQL files for creating a brand new Synapse DB with the latest
|
||||
# schema, on both SQLite3 and Postgres.
|
||||
@@ -162,23 +162,12 @@ else
|
||||
fi
|
||||
|
||||
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
|
||||
# Also delete any shadow tables from fts4
|
||||
# This needs to be done after synapse_port_db is run
|
||||
echo "Dropping unwanted db tables..."
|
||||
SQL="
|
||||
DROP TABLE schema_version;
|
||||
DROP TABLE applied_schema_deltas;
|
||||
DROP TABLE applied_module_schemas;
|
||||
DROP TABLE event_search_content;
|
||||
DROP TABLE event_search_segments;
|
||||
DROP TABLE event_search_segdir;
|
||||
DROP TABLE event_search_docsize;
|
||||
DROP TABLE event_search_stat;
|
||||
DROP TABLE user_directory_search_content;
|
||||
DROP TABLE user_directory_search_segments;
|
||||
DROP TABLE user_directory_search_segdir;
|
||||
DROP TABLE user_directory_search_docsize;
|
||||
DROP TABLE user_directory_search_stat;
|
||||
"
|
||||
sqlite3 "$SQLITE_DB" <<< "$SQL"
|
||||
psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -30,8 +31,6 @@ class SynapsePlugin(Plugin):
|
||||
) -> Optional[Callable[[MethodSigContext], CallableType]]:
|
||||
if fullname.startswith(
|
||||
"synapse.util.caches.descriptors._CachedFunction.__call__"
|
||||
) or fullname.startswith(
|
||||
"synapse.util.caches.descriptors._LruCachedFunction.__call__"
|
||||
):
|
||||
return cached_function_method_signature
|
||||
return None
|
||||
@@ -86,9 +85,7 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
||||
arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg.
|
||||
|
||||
signature = signature.copy_modified(
|
||||
arg_types=arg_types,
|
||||
arg_names=arg_names,
|
||||
arg_kinds=arg_kinds,
|
||||
arg_types=arg_types, arg_names=arg_names, arg_kinds=arg_kinds,
|
||||
)
|
||||
|
||||
return signature
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
@@ -6,4 +6,4 @@ set -e
|
||||
# next PR number.
|
||||
CURRENT_NUMBER=`curl -s "https://api.github.com/repos/matrix-org/synapse/issues?state=all&per_page=1" | jq -r ".[0].number"`
|
||||
CURRENT_NUMBER=$((CURRENT_NUMBER+1))
|
||||
echo $CURRENT_NUMBER
|
||||
echo $CURRENT_NUMBER
|
||||
@@ -1,244 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""An interactive script for doing a release. See `run()` below.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
import git
|
||||
from packaging import version
|
||||
from redbaron import RedBaron
|
||||
|
||||
|
||||
@click.command()
|
||||
def run():
|
||||
"""An interactive script to walk through the initial stages of creating a
|
||||
release, including creating release branch, updating changelog and pushing to
|
||||
GitHub.
|
||||
|
||||
Requires the dev dependencies be installed, which can be done via:
|
||||
|
||||
pip install -e .[dev]
|
||||
|
||||
"""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
try:
|
||||
repo = git.Repo()
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
|
||||
click.secho("Updating git repo...")
|
||||
repo.remote().fetch()
|
||||
|
||||
# Parse the AST and load the `__version__` node so that we can edit it
|
||||
# later.
|
||||
with open("synapse/__init__.py") as f:
|
||||
red = RedBaron(f.read())
|
||||
|
||||
version_node = None
|
||||
for node in red:
|
||||
if node.type != "assignment":
|
||||
continue
|
||||
|
||||
if node.target.type != "name":
|
||||
continue
|
||||
|
||||
if node.target.value != "__version__":
|
||||
continue
|
||||
|
||||
version_node = node
|
||||
break
|
||||
|
||||
if not version_node:
|
||||
print("Failed to find '__version__' definition in synapse/__init__.py")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse the current version.
|
||||
current_version = version.parse(version_node.value.value.strip('"'))
|
||||
assert isinstance(current_version, version.Version)
|
||||
|
||||
# Figure out what sort of release we're doing and calcuate the new version.
|
||||
rc = click.confirm("RC", default=True)
|
||||
if current_version.pre:
|
||||
# If the current version is an RC we don't need to bump any of the
|
||||
# version numbers (other than the RC number).
|
||||
base_version = "{}.{}.{}".format(
|
||||
current_version.major,
|
||||
current_version.minor,
|
||||
current_version.micro,
|
||||
)
|
||||
|
||||
if rc:
|
||||
new_version = "{}.{}.{}rc{}".format(
|
||||
current_version.major,
|
||||
current_version.minor,
|
||||
current_version.micro,
|
||||
current_version.pre[1] + 1,
|
||||
)
|
||||
else:
|
||||
new_version = base_version
|
||||
else:
|
||||
# If this is a new release cycle then we need to know if its a major
|
||||
# version bump or a hotfix.
|
||||
release_type = click.prompt(
|
||||
"Release type",
|
||||
type=click.Choice(("major", "hotfix")),
|
||||
show_choices=True,
|
||||
default="major",
|
||||
)
|
||||
|
||||
if release_type == "major":
|
||||
base_version = new_version = "{}.{}.{}".format(
|
||||
current_version.major,
|
||||
current_version.minor + 1,
|
||||
0,
|
||||
)
|
||||
if rc:
|
||||
new_version = "{}.{}.{}rc1".format(
|
||||
current_version.major,
|
||||
current_version.minor + 1,
|
||||
0,
|
||||
)
|
||||
|
||||
else:
|
||||
base_version = new_version = "{}.{}.{}".format(
|
||||
current_version.major,
|
||||
current_version.minor,
|
||||
current_version.micro + 1,
|
||||
)
|
||||
if rc:
|
||||
new_version = "{}.{}.{}rc1".format(
|
||||
current_version.major,
|
||||
current_version.minor,
|
||||
current_version.micro + 1,
|
||||
)
|
||||
|
||||
# Confirm the calculated version is OK.
|
||||
if not click.confirm(f"Create new version: {new_version}?", default=True):
|
||||
click.get_current_context().abort()
|
||||
|
||||
# Switch to the release branch.
|
||||
release_branch_name = f"release-v{base_version}"
|
||||
release_branch = find_ref(repo, release_branch_name)
|
||||
if release_branch:
|
||||
if release_branch.is_remote():
|
||||
# If the release branch only exists on the remote we check it out
|
||||
# locally.
|
||||
repo.git.checkout(release_branch_name)
|
||||
release_branch = repo.active_branch
|
||||
else:
|
||||
# If a branch doesn't exist we create one. We ask which one branch it
|
||||
# should be based off, defaulting to sensible values depending on the
|
||||
# release type.
|
||||
if current_version.is_prerelease:
|
||||
default = release_branch_name
|
||||
elif release_type == "major":
|
||||
default = "develop"
|
||||
else:
|
||||
default = "master"
|
||||
|
||||
branch_name = click.prompt(
|
||||
"Which branch should the release be based on?", default=default
|
||||
)
|
||||
|
||||
base_branch = find_ref(repo, branch_name)
|
||||
if not base_branch:
|
||||
print(f"Could not find base branch {branch_name}!")
|
||||
click.get_current_context().abort()
|
||||
|
||||
# Check out the base branch and ensure it's up to date
|
||||
repo.head.reference = base_branch
|
||||
repo.head.reset(index=True, working_tree=True)
|
||||
if not base_branch.is_remote():
|
||||
update_branch(repo)
|
||||
|
||||
# Create the new release branch
|
||||
release_branch = repo.create_head(release_branch_name, commit=base_branch)
|
||||
|
||||
# Switch to the release branch and ensure its up to date.
|
||||
repo.git.checkout(release_branch_name)
|
||||
update_branch(repo)
|
||||
|
||||
# Update the `__version__` variable and write it back to the file.
|
||||
version_node.value = '"' + new_version + '"'
|
||||
with open("synapse/__init__.py", "w") as f:
|
||||
f.write(red.dumps())
|
||||
|
||||
# Generate changelogs
|
||||
subprocess.run("python3 -m towncrier", shell=True)
|
||||
|
||||
# Generate debian changelogs if its not an RC.
|
||||
if not rc:
|
||||
subprocess.run(
|
||||
f'dch -M -v {new_version} "New synapse release {new_version}."', shell=True
|
||||
)
|
||||
subprocess.run('dch -M -r -D stable ""', shell=True)
|
||||
|
||||
# Show the user the changes and ask if they want to edit the change log.
|
||||
repo.git.add("-u")
|
||||
subprocess.run("git diff --cached", shell=True)
|
||||
|
||||
if click.confirm("Edit changelog?", default=False):
|
||||
click.edit(filename="CHANGES.md")
|
||||
|
||||
# Commit the changes.
|
||||
repo.git.add("-u")
|
||||
repo.git.commit(f"-m {new_version}")
|
||||
|
||||
# We give the option to bail here in case the user wants to make sure things
|
||||
# are OK before pushing.
|
||||
if not click.confirm("Push branch to github?", default=True):
|
||||
print("")
|
||||
print("Run when ready to push:")
|
||||
print("")
|
||||
print(f"\tgit push -u {repo.remote().name} {repo.active_branch.name}")
|
||||
print("")
|
||||
sys.exit(0)
|
||||
|
||||
# Otherwise, push and open the changelog in the browser.
|
||||
repo.git.push("-u", repo.remote().name, repo.active_branch.name)
|
||||
|
||||
click.launch(
|
||||
f"https://github.com/matrix-org/synapse/blob/{repo.active_branch.name}/CHANGES.md"
|
||||
)
|
||||
|
||||
|
||||
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
|
||||
"""Find the branch/ref, looking first locally then in the remote."""
|
||||
if ref_name in repo.refs:
|
||||
return repo.refs[ref_name]
|
||||
elif ref_name in repo.remote().refs:
|
||||
return repo.remote().refs[ref_name]
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def update_branch(repo: git.Repo):
|
||||
"""Ensure branch is up to date if it has a remote"""
|
||||
if repo.active_branch.tracking_branch():
|
||||
repo.git.merge(repo.active_branch.tracking_branch().name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
@@ -1,126 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from json import JSONDecodeError
|
||||
|
||||
import yaml
|
||||
from signedjson.key import read_signing_keys
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
from synapse.util import json_encoder
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="""Adds a signature to a JSON object.
|
||||
|
||||
Example usage:
|
||||
|
||||
$ scripts-dev/sign_json.py -N test -k localhost.signing.key "{}"
|
||||
{"signatures":{"test":{"ed25519:a_ZnZh":"LmPnml6iM0iR..."}}}
|
||||
""",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-N",
|
||||
"--server-name",
|
||||
help="Name to give as the local homeserver. If unspecified, will be "
|
||||
"read from the config file.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-k",
|
||||
"--signing-key-path",
|
||||
help="Path to the file containing the private ed25519 key to sign the "
|
||||
"request with.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
default="homeserver.yaml",
|
||||
help=(
|
||||
"Path to synapse config file, from which the server name and/or signing "
|
||||
"key path will be read. Ignored if --server-name and --signing-key-path "
|
||||
"are both given."
|
||||
),
|
||||
)
|
||||
|
||||
input_args = parser.add_mutually_exclusive_group()
|
||||
|
||||
input_args.add_argument("input_data", nargs="?", help="Raw JSON to be signed.")
|
||||
|
||||
input_args.add_argument(
|
||||
"-i",
|
||||
"--input",
|
||||
type=argparse.FileType("r"),
|
||||
default=sys.stdin,
|
||||
help=(
|
||||
"A file from which to read the JSON to be signed. If neither --input nor "
|
||||
"input_data are given, JSON will be read from stdin."
|
||||
),
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
type=argparse.FileType("w"),
|
||||
default=sys.stdout,
|
||||
help="Where to write the signed JSON. Defaults to stdout.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.server_name or not args.signing_key_path:
|
||||
read_args_from_config(args)
|
||||
|
||||
with open(args.signing_key_path) as f:
|
||||
key = read_signing_keys(f)[0]
|
||||
|
||||
json_to_sign = args.input_data
|
||||
if json_to_sign is None:
|
||||
json_to_sign = args.input.read()
|
||||
|
||||
try:
|
||||
obj = json.loads(json_to_sign)
|
||||
except JSONDecodeError as e:
|
||||
print("Unable to parse input as JSON: %s" % e, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if not isinstance(obj, dict):
|
||||
print("Input json was not an object", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
sign_json(obj, args.server_name, key)
|
||||
for c in json_encoder.iterencode(obj):
|
||||
args.output.write(c)
|
||||
args.output.write("\n")
|
||||
|
||||
|
||||
def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
with open(args.config, "r") as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
if not args.server_name:
|
||||
args.server_name = config["server_name"]
|
||||
if not args.signing_key_path:
|
||||
args.signing_key_path = config["signing_key_path"]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user