Compare commits
1 Commits
squah/leav
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
936f461148 |
13
.buildkite/.env
Normal file
13
.buildkite/.env
Normal file
@@ -0,0 +1,13 @@
|
||||
CI
|
||||
BUILDKITE
|
||||
BUILDKITE_BUILD_NUMBER
|
||||
BUILDKITE_BRANCH
|
||||
BUILDKITE_BUILD_NUMBER
|
||||
BUILDKITE_JOB_ID
|
||||
BUILDKITE_BUILD_URL
|
||||
BUILDKITE_PROJECT_SLUG
|
||||
BUILDKITE_COMMIT
|
||||
BUILDKITE_PULL_REQUEST
|
||||
BUILDKITE_TAG
|
||||
CODECOV_TOKEN
|
||||
TRIAL_FLAGS
|
||||
35
.buildkite/merge_base_branch.sh
Executable file
35
.buildkite/merge_base_branch.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
if [[ "$BUILDKITE_BRANCH" =~ ^(develop|master|dinsic|shhs|release-.*)$ ]]; then
|
||||
echo "Not merging forward, as this is a release branch"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
|
||||
echo "Not a pull request, or hasn't had a PR opened yet..."
|
||||
|
||||
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
|
||||
# can probably assume it's based on it and will be merged into it.
|
||||
GITBASE="develop"
|
||||
else
|
||||
# Get the reference, using the GitHub API
|
||||
GITBASE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
|
||||
fi
|
||||
|
||||
echo "--- merge_base_branch $GITBASE"
|
||||
|
||||
# Show what we are before
|
||||
git --no-pager show -s
|
||||
|
||||
# Set up username so it can do a merge
|
||||
git config --global user.email bot@matrix.org
|
||||
git config --global user.name "A robot"
|
||||
|
||||
# Fetch and merge. If it doesn't work, it will raise due to set -e.
|
||||
git fetch -u origin $GITBASE
|
||||
git merge --no-edit --no-commit origin/$GITBASE
|
||||
|
||||
# Show what we are after.
|
||||
git --no-pager show -s
|
||||
@@ -3,7 +3,7 @@
|
||||
# CI's Docker setup at the point where this file is considered.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: ".ci/test.signing.key"
|
||||
signing_key_path: ".buildkite/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
@@ -11,7 +11,7 @@ database:
|
||||
name: "psycopg2"
|
||||
args:
|
||||
user: postgres
|
||||
host: localhost
|
||||
host: postgres
|
||||
password: postgres
|
||||
database: synapse
|
||||
|
||||
@@ -23,7 +23,7 @@ import psycopg2
|
||||
# We use "postgres" as a database because it's bound to exist and the "synapse" one
|
||||
# doesn't exist yet.
|
||||
db_conn = psycopg2.connect(
|
||||
user="postgres", host="localhost", password="postgres", dbname="postgres"
|
||||
user="postgres", host="postgres", password="postgres", dbname="postgres"
|
||||
)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# this script is run by GitHub Actions in a plain `bionic` container; it installs the
|
||||
# this script is run by buildkite in a plain `bionic` container; it installs the
|
||||
# minimal requirements for tox and hands over to the py3-old tox environment.
|
||||
|
||||
set -ex
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
|
||||
set -xe
|
||||
cd "$(dirname "$0")/../.."
|
||||
cd `dirname $0`/../..
|
||||
|
||||
echo "--- Install dependencies"
|
||||
|
||||
@@ -20,22 +20,22 @@ pip install -e .
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
./.buildkite/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
# We should be able to run twice against the same database.
|
||||
echo "+++ Run synapse_port_db a second time"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
#####
|
||||
|
||||
@@ -44,14 +44,14 @@ coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres
|
||||
echo "--- Prepare empty SQLite database"
|
||||
|
||||
# we do this by deleting the sqlite db, and then doing the same again.
|
||||
rm .ci/test_db.db
|
||||
rm .buildkite/test_db.db
|
||||
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
|
||||
# re-create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py \
|
||||
./.buildkite/scripts/postgres_exec.py \
|
||||
"DROP DATABASE synapse" \
|
||||
"CREATE DATABASE synapse"
|
||||
|
||||
echo "+++ Run synapse_port_db against empty database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
@@ -3,14 +3,14 @@
|
||||
# schema and run background updates on it.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: ".ci/test.signing.key"
|
||||
signing_key_path: ".buildkite/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
database:
|
||||
name: "sqlite3"
|
||||
args:
|
||||
database: ".ci/test_db.db"
|
||||
database: ".buildkite/test_db.db"
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers: []
|
||||
10
.buildkite/worker-blacklist
Normal file
10
.buildkite/worker-blacklist
Normal file
@@ -0,0 +1,10 @@
|
||||
# This file serves as a blacklist for SyTest tests that we expect will fail in
|
||||
# Synapse when run under worker mode. For more details, see sytest-blacklist.
|
||||
|
||||
Can re-join room if re-invited
|
||||
|
||||
# new failures as of https://github.com/matrix-org/sytest/pull/732
|
||||
Device list doesn't change if remote server is down
|
||||
|
||||
# https://buildkite.com/matrix-dot-org/synapse/builds/6134#6f67bf47-e234-474d-80e8-c6e1868b15c5
|
||||
Server correctly handles incoming m.device_list_update
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# replaces the dependency on Twisted in `python_dependencies` with trunk.
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
sed -i -e 's#"Twisted.*"#"Twisted @ git+https://github.com/twisted/twisted"#' synapse/python_dependencies.py
|
||||
@@ -1,57 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Test for the export-data admin command against sqlite and postgres
|
||||
|
||||
set -xe
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
echo "--- Install dependencies"
|
||||
|
||||
# Install dependencies for this test.
|
||||
pip install psycopg2
|
||||
|
||||
# Install Synapse itself. This won't update any libraries.
|
||||
pip install -e .
|
||||
|
||||
echo "--- Generate the signing key"
|
||||
|
||||
# Generate the server's signing key.
|
||||
python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
|
||||
|
||||
echo "--- Prepare test database"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
|
||||
|
||||
# Run the export-data command on the sqlite test database
|
||||
python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
--output-directory /tmp/export_data
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
dir="/tmp/export_data/rooms"
|
||||
if [ -d "$dir" ]; then
|
||||
echo "Command successful, this test passes"
|
||||
else
|
||||
echo "No output directories found, the command fails against a sqlite database."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
|
||||
|
||||
# Port the SQLite databse to postgres so we can check command works against postgres
|
||||
echo "+++ Port SQLite3 databse to postgres"
|
||||
scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
|
||||
|
||||
# Run the export-data command on postgres database
|
||||
python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
|
||||
--output-directory /tmp/export_data2
|
||||
|
||||
# Test that the output directory exists and contains the rooms directory
|
||||
dir2="/tmp/export_data2/rooms"
|
||||
if [ -d "$dir2" ]; then
|
||||
echo "Command successful, this test passes"
|
||||
else
|
||||
echo "No output directories found, the command fails against a postgres database."
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
title: CI run against Twisted trunk is failing
|
||||
---
|
||||
See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}
|
||||
@@ -1,2 +0,0 @@
|
||||
# This file serves as a blacklist for SyTest tests that we expect will fail in
|
||||
# Synapse when run under worker mode. For more details, see sytest-blacklist.
|
||||
78
.circleci/config.yml
Normal file
78
.circleci/config.yml
Normal file
@@ -0,0 +1,78 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
dockerhubuploadrelease:
|
||||
docker:
|
||||
- image: docker:git
|
||||
steps:
|
||||
- checkout
|
||||
- docker_prepare
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
# for release builds, we want to get the amd64 image out asap, so first
|
||||
# we do an amd64-only build, before following up with a multiarch build.
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
platforms: linux/amd64
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
dockerhubuploadlatest:
|
||||
docker:
|
||||
- image: docker:git
|
||||
steps:
|
||||
- checkout
|
||||
- docker_prepare
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
# for `latest`, we don't want the arm images to disappear, so don't update the tag
|
||||
# until all of the platforms are built.
|
||||
- docker_build:
|
||||
tag: -t matrixdotorg/synapse:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
workflows:
|
||||
build:
|
||||
jobs:
|
||||
- dockerhubuploadrelease:
|
||||
filters:
|
||||
tags:
|
||||
only: /v[0-9].[0-9]+.[0-9]+.*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
- dockerhubuploadlatest:
|
||||
filters:
|
||||
branches:
|
||||
only: [ master, main ]
|
||||
|
||||
commands:
|
||||
docker_prepare:
|
||||
description: Sets up a remote docker server, downloads the buildx cli plugin, and enables multiarch images
|
||||
parameters:
|
||||
buildx_version:
|
||||
type: string
|
||||
default: "v0.4.1"
|
||||
steps:
|
||||
- setup_remote_docker:
|
||||
# 19.03.13 was the most recent available on circleci at the time of
|
||||
# writing.
|
||||
version: 19.03.13
|
||||
- run: apk add --no-cache curl
|
||||
- run: mkdir -vp ~/.docker/cli-plugins/ ~/dockercache
|
||||
- run: curl --silent -L "https://github.com/docker/buildx/releases/download/<< parameters.buildx_version >>/buildx-<< parameters.buildx_version >>.linux-amd64" > ~/.docker/cli-plugins/docker-buildx
|
||||
- run: chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||
# install qemu links in /proc/sys/fs/binfmt_misc on the docker instance running the circleci job
|
||||
- run: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||
# create a context named `builder` for the builds
|
||||
- run: docker context create builder
|
||||
# create a buildx builder using the new context, and set it as the default
|
||||
- run: docker buildx create builder --use
|
||||
|
||||
docker_build:
|
||||
description: Builds and pushed images to dockerhub using buildx
|
||||
parameters:
|
||||
platforms:
|
||||
type: string
|
||||
default: linux/amd64
|
||||
tag:
|
||||
type: string
|
||||
steps:
|
||||
- run: docker buildx build -f docker/Dockerfile --push --platform << parameters.platforms >> --label gitsha1=${CIRCLE_SHA1} << parameters.tag >> --progress=plain .
|
||||
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1,2 +0,0 @@
|
||||
# Automatically request reviews from the synapse-core team when a pull request comes in.
|
||||
* @matrix-org/synapse-core
|
||||
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,13 +1,12 @@
|
||||
### Pull Request Checklist
|
||||
|
||||
<!-- Please read https://matrix-org.github.io/synapse/latest/development/contributing_guide.html before submitting your pull request -->
|
||||
<!-- Please read CONTRIBUTING.md before submitting your pull request -->
|
||||
|
||||
* [ ] Pull request is based on the develop branch
|
||||
* [ ] Pull request includes a [changelog file](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should:
|
||||
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#changelog). The entry should:
|
||||
- Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
|
||||
- Use markdown where necessary, mostly for `code blocks`.
|
||||
- End with either a period (.) or an exclamation mark (!).
|
||||
- Start with a capital letter.
|
||||
* [ ] Pull request includes a [sign off](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#sign-off)
|
||||
* [ ] [Code style](https://matrix-org.github.io/synapse/latest/code_style.html) is correct
|
||||
(run the [linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
|
||||
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#sign-off)
|
||||
* [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.md#code-style))
|
||||
|
||||
75
.github/workflows/docker.yml
vendored
75
.github/workflows/docker.yml
vendored
@@ -1,75 +0,0 @@
|
||||
# GitHub actions workflow which builds and publishes the docker images.
|
||||
|
||||
name: Build docker images
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["v*"]
|
||||
branches: [ master, main, develop ]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Inspect builder
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Calculate docker image tag
|
||||
id: set-tag
|
||||
run: |
|
||||
case "${GITHUB_REF}" in
|
||||
refs/heads/develop)
|
||||
tag=develop
|
||||
;;
|
||||
refs/heads/master|refs/heads/main)
|
||||
tag=latest
|
||||
;;
|
||||
refs/tags/*)
|
||||
tag=${GITHUB_REF#refs/tags/}
|
||||
;;
|
||||
*)
|
||||
tag=${GITHUB_SHA}
|
||||
;;
|
||||
esac
|
||||
echo "::set-output name=tag::$tag"
|
||||
|
||||
# for release builds, we want to get the amd64 image out asap, so first
|
||||
# we do an amd64-only build, before following up with a multiarch build.
|
||||
- name: Build and push amd64
|
||||
uses: docker/build-push-action@v2
|
||||
if: "${{ startsWith(github.ref, 'refs/tags/v') }}"
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64
|
||||
|
||||
- name: Build and push all platforms
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
1
.github/workflows/docs.yaml
vendored
1
.github/workflows/docs.yaml
vendored
@@ -61,5 +61,6 @@ jobs:
|
||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
keep_files: true
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
64
.github/workflows/release-artifacts.yml
vendored
64
.github/workflows/release-artifacts.yml
vendored
@@ -3,37 +3,28 @@
|
||||
name: Build release artifacts
|
||||
|
||||
on:
|
||||
# we build on PRs and develop to (hopefully) get early warning
|
||||
# of things breaking (but only build one set of debs)
|
||||
pull_request:
|
||||
push:
|
||||
branches: ["develop"]
|
||||
# we build on develop and release branches to (hopefully) get early warning
|
||||
# of things breaking
|
||||
branches: ["develop", "release-*"]
|
||||
|
||||
# we do the full build on tags.
|
||||
# we also rebuild on tags, so that we can be sure of picking the artifacts
|
||||
# from the right tag.
|
||||
tags: ["v*"]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
# first get the list of distros to build for.
|
||||
get-distros:
|
||||
name: "Calculate list of debian distros"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- id: set-distros
|
||||
run: |
|
||||
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
||||
dists='["debian:sid"]'
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
dists=$(scripts-dev/build_debian_packages --show-dists-json)
|
||||
fi
|
||||
echo "::set-output name=distros::$dists"
|
||||
echo "::set-output name=distros::$(scripts-dev/build_debian_packages --show-dists-json)"
|
||||
# map the step outputs to job outputs
|
||||
outputs:
|
||||
distros: ${{ steps.set-distros.outputs.distros }}
|
||||
@@ -48,43 +39,12 @@ jobs:
|
||||
distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: src
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
install: true
|
||||
|
||||
- name: Set up docker layer caching
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up python
|
||||
uses: actions/setup-python@v2
|
||||
|
||||
- name: Build the packages
|
||||
# see https://github.com/docker/build-push-action/issues/252
|
||||
# for the cache magic here
|
||||
run: |
|
||||
./src/scripts-dev/build_debian_packages \
|
||||
--docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \
|
||||
--docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \
|
||||
--docker-build-arg=--progress=plain \
|
||||
--docker-build-arg=--load \
|
||||
"${{ matrix.distro }}"
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Upload debs as artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: ./src/scripts-dev/build_debian_packages "${{ matrix.distro }}"
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: debs
|
||||
path: debs/*
|
||||
@@ -106,7 +66,7 @@ jobs:
|
||||
# if it's a tag, create a release and attach the artifacts to it
|
||||
attach-assets:
|
||||
name: "Attach assets to release"
|
||||
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
needs:
|
||||
- build-debs
|
||||
- build-sdist
|
||||
|
||||
141
.github/workflows/tests.yml
vendored
141
.github/workflows/tests.yml
vendored
@@ -5,10 +5,6 @@ on:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -38,15 +34,20 @@ jobs:
|
||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Note: This and the script can be simplified once we drop Buildkite. See:
|
||||
# https://github.com/actions/checkout/issues/266#issuecomment-638346893
|
||||
# https://github.com/actions/checkout/issues/416
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install tox
|
||||
- name: Patch Buildkite-specific test script
|
||||
run: |
|
||||
sed -i -e 's/\$BUILDKITE_PULL_REQUEST/${{ github.event.number }}/' \
|
||||
scripts-dev/check-newsfragment
|
||||
- run: scripts-dev/check-newsfragment
|
||||
env:
|
||||
PULL_REQUEST_NUMBER: ${{ github.event.number }}
|
||||
|
||||
lint-sdist:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -64,37 +65,34 @@ jobs:
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
if: ${{ always() }} # Run this even if prior jobs were skipped
|
||||
needs: [lint, lint-crlf, lint-newsfile, lint-sdist]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
|
||||
trial:
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"]
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9"]
|
||||
database: ["sqlite"]
|
||||
toxenv: ["py"]
|
||||
include:
|
||||
# Newest Python without optional deps
|
||||
- python-version: "3.10"
|
||||
toxenv: "py-noextras"
|
||||
- python-version: "3.9"
|
||||
toxenv: "py-noextras,combine"
|
||||
|
||||
# Oldest Python with PostgreSQL
|
||||
- python-version: "3.6"
|
||||
database: "postgres"
|
||||
postgres-version: "9.6"
|
||||
toxenv: "py"
|
||||
|
||||
# Newest Python with newest PostgreSQL
|
||||
- python-version: "3.10"
|
||||
# Newest Python with PostgreSQL
|
||||
- python-version: "3.9"
|
||||
database: "postgres"
|
||||
postgres-version: "14"
|
||||
toxenv: "py"
|
||||
postgres-version: "13"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -114,7 +112,7 @@ jobs:
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: tox -e ${{ matrix.toxenv }}
|
||||
- run: tox -e py,combine
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
@@ -122,8 +120,6 @@ jobs:
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
@@ -135,7 +131,7 @@ jobs:
|
||||
|| true
|
||||
|
||||
trial-olddeps:
|
||||
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -144,12 +140,10 @@ jobs:
|
||||
uses: docker://ubuntu:bionic # For old python and sqlite
|
||||
with:
|
||||
workdir: /github/workspace
|
||||
entrypoint: .ci/scripts/test_old_deps.sh
|
||||
entrypoint: .buildkite/scripts/test_old_deps.sh
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
@@ -162,7 +156,7 @@ jobs:
|
||||
|
||||
trial-pypy:
|
||||
# Very slow; only run if the branch name includes 'pypy'
|
||||
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
|
||||
if: ${{ contains(github.ref, 'pypy') && !failure() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
@@ -176,12 +170,10 @@ jobs:
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install tox
|
||||
- run: tox -e py
|
||||
- run: tox -e py,combine
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
@@ -193,7 +185,7 @@ jobs:
|
||||
|| true
|
||||
|
||||
sytest:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
if: ${{ !failure() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
@@ -201,13 +193,12 @@ jobs:
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
BUILDKITE_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
REDIS: ${{ matrix.redis && 1 }}
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -237,7 +228,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
run: cat sytest-blacklist .buildkite/worker-blacklist > synapse-blacklist-with-workers
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
@@ -253,49 +244,18 @@ jobs:
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
export-data:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: [linting-done, portdb]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres
|
||||
ports:
|
||||
- 5432:5432
|
||||
env:
|
||||
POSTGRES_PASSWORD: "postgres"
|
||||
POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.9"
|
||||
- run: .ci/scripts/test_export_data_command.sh
|
||||
|
||||
portdb:
|
||||
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
|
||||
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TOP: ${{ github.workspace }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.6"
|
||||
postgres-version: "9.6"
|
||||
|
||||
- python-version: "3.10"
|
||||
postgres-version: "14"
|
||||
- python-version: "3.9"
|
||||
postgres-version: "13"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
@@ -317,10 +277,16 @@ jobs:
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
- name: Patch Buildkite-specific test scripts
|
||||
run: |
|
||||
sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/postgres_exec.py
|
||||
sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml
|
||||
sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml
|
||||
sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc
|
||||
- run: .buildkite/scripts/test_synapse_port_db.sh
|
||||
|
||||
complement:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
if: ${{ !failure() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
@@ -374,44 +340,7 @@ jobs:
|
||||
working-directory: complement/dockerfiles
|
||||
|
||||
# Run Complement
|
||||
- run: go test -v -tags synapse_blacklist,msc2403 ./tests/...
|
||||
- run: go test -v -tags synapse_blacklist,msc2403,msc2946,msc3083 ./tests
|
||||
env:
|
||||
COMPLEMENT_BASE_IMAGE: complement-synapse:latest
|
||||
working-directory: complement
|
||||
|
||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
needs:
|
||||
- lint
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- lint-sdist
|
||||
- trial
|
||||
- trial-olddeps
|
||||
- sytest
|
||||
- portdb
|
||||
- complement
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set build result
|
||||
env:
|
||||
NEEDS_CONTEXT: ${{ toJSON(needs) }}
|
||||
# the `jq` incantation dumps out a series of "<job> <result>" lines.
|
||||
# we set it to an intermediate variable to avoid a pipe, which makes it
|
||||
# hard to set $rc.
|
||||
run: |
|
||||
rc=0
|
||||
results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT)
|
||||
while read job result ; do
|
||||
# The newsfile lint may be skipped on non PR builds
|
||||
if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ "$result" != "success" ]; then
|
||||
echo "::set-failed ::Job $job returned $result"
|
||||
rc=1
|
||||
fi
|
||||
done <<< $results
|
||||
exit $rc
|
||||
|
||||
92
.github/workflows/twisted_trunk.yml
vendored
92
.github/workflows/twisted_trunk.yml
vendored
@@ -1,92 +0,0 @@
|
||||
name: Twisted Trunk
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: 0 8 * * *
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
mypy:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: .ci/patch_for_twisted_trunk.sh
|
||||
- run: pip install tox
|
||||
- run: tox -e mypy
|
||||
|
||||
trial:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.6
|
||||
- run: .ci/patch_for_twisted_trunk.sh
|
||||
- run: pip install tox
|
||||
- run: tox -e py
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
# Note: Dumps to workflow logs instead of using actions/upload-artifact
|
||||
# This keeps logs colocated with failing jobs
|
||||
# It also ignores find's exit code; this is a best effort affair
|
||||
run: >-
|
||||
find _trial_temp -name '*.log'
|
||||
-exec echo "::group::{}" \;
|
||||
-exec cat {} \;
|
||||
-exec echo "::endgroup::" \;
|
||||
|| true
|
||||
|
||||
sytest:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:buster
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Patch dependencies
|
||||
run: .ci/patch_for_twisted_trunk.sh
|
||||
working-directory: /src
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
- name: Summarise results.tap
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
# open an issue if the build fails, so we know about it.
|
||||
open-issue:
|
||||
if: failure()
|
||||
needs:
|
||||
- mypy
|
||||
- trial
|
||||
- sytest
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
update_existing: true
|
||||
filename: .ci/twisted_trunk_build_failed_issue_template.md
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -40,7 +40,6 @@ __pycache__/
|
||||
/.coverage*
|
||||
/.mypy_cache/
|
||||
/.tox
|
||||
/.tox-pg-container
|
||||
/build/
|
||||
/coverage.*
|
||||
/dist/
|
||||
|
||||
1149
CHANGES.md
1149
CHANGES.md
File diff suppressed because it is too large
Load Diff
405
CONTRIBUTING.md
405
CONTRIBUTING.md
@@ -1,3 +1,404 @@
|
||||
# Welcome to Synapse
|
||||
Welcome to Synapse
|
||||
|
||||
Please see the [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html) in our rendered documentation.
|
||||
This document aims to get you started with contributing to this repo!
|
||||
|
||||
- [1. Who can contribute to Synapse?](#1-who-can-contribute-to-synapse)
|
||||
- [2. What do I need?](#2-what-do-i-need)
|
||||
- [3. Get the source.](#3-get-the-source)
|
||||
- [4. Install the dependencies](#4-install-the-dependencies)
|
||||
* [Under Unix (macOS, Linux, BSD, ...)](#under-unix-macos-linux-bsd-)
|
||||
* [Under Windows](#under-windows)
|
||||
- [5. Get in touch.](#5-get-in-touch)
|
||||
- [6. Pick an issue.](#6-pick-an-issue)
|
||||
- [7. Turn coffee and documentation into code and documentation!](#7-turn-coffee-and-documentation-into-code-and-documentation)
|
||||
- [8. Test, test, test!](#8-test-test-test)
|
||||
* [Run the linters.](#run-the-linters)
|
||||
* [Run the unit tests.](#run-the-unit-tests)
|
||||
* [Run the integration tests.](#run-the-integration-tests)
|
||||
- [9. Submit your patch.](#9-submit-your-patch)
|
||||
* [Changelog](#changelog)
|
||||
+ [How do I know what to call the changelog file before I create the PR?](#how-do-i-know-what-to-call-the-changelog-file-before-i-create-the-pr)
|
||||
+ [Debian changelog](#debian-changelog)
|
||||
* [Sign off](#sign-off)
|
||||
- [10. Turn feedback into better code.](#10-turn-feedback-into-better-code)
|
||||
- [11. Find a new issue.](#11-find-a-new-issue)
|
||||
- [Notes for maintainers on merging PRs etc](#notes-for-maintainers-on-merging-prs-etc)
|
||||
- [Conclusion](#conclusion)
|
||||
|
||||
# 1. Who can contribute to Synapse?
|
||||
|
||||
Everyone is welcome to contribute code to [matrix.org
|
||||
projects](https://github.com/matrix-org), provided that they are willing to
|
||||
license their contributions under the same license as the project itself. We
|
||||
follow a simple 'inbound=outbound' model for contributions: the act of
|
||||
submitting an 'inbound' contribution means that the contributor agrees to
|
||||
license the code under the same terms as the project's overall 'outbound'
|
||||
license - in our case, this is almost always Apache Software License v2 (see
|
||||
[LICENSE](LICENSE)).
|
||||
|
||||
# 2. What do I need?
|
||||
|
||||
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download).
|
||||
|
||||
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
|
||||
# 3. Get the source.
|
||||
|
||||
The preferred and easiest way to contribute changes is to fork the relevant
|
||||
project on GitHub, and then [create a pull request](
|
||||
https://help.github.com/articles/using-pull-requests/) to ask us to pull your
|
||||
changes into our repo.
|
||||
|
||||
Please base your changes on the `develop` branch.
|
||||
|
||||
```sh
|
||||
git clone git@github.com:YOUR_GITHUB_USER_NAME/synapse.git
|
||||
git checkout develop
|
||||
```
|
||||
|
||||
If you need help getting started with git, this is beyond the scope of the document, but you
|
||||
can find many good git tutorials on the web.
|
||||
|
||||
# 4. Install the dependencies
|
||||
|
||||
## Under Unix (macOS, Linux, BSD, ...)
|
||||
|
||||
Once you have installed Python 3 and added the source, please open a terminal and
|
||||
setup a *virtualenv*, as follows:
|
||||
|
||||
```sh
|
||||
cd path/where/you/have/cloned/the/repository
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install -e ".[all,lint,mypy,test]"
|
||||
pip install tox
|
||||
```
|
||||
|
||||
This will install the developer dependencies for the project.
|
||||
|
||||
## Under Windows
|
||||
|
||||
TBD
|
||||
|
||||
|
||||
# 5. Get in touch.
|
||||
|
||||
Join our developer community on Matrix: #synapse-dev:matrix.org !
|
||||
|
||||
|
||||
# 6. Pick an issue.
|
||||
|
||||
Fix your favorite problem or perhaps find a [Good First Issue](https://github.com/matrix-org/synapse/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22)
|
||||
to work on.
|
||||
|
||||
|
||||
# 7. Turn coffee and documentation into code and documentation!
|
||||
|
||||
Synapse's code style is documented [here](docs/code_style.md). Please follow
|
||||
it, including the conventions for the [sample configuration
|
||||
file](docs/code_style.md#configuration-file-format).
|
||||
|
||||
There is a growing amount of documentation located in the [docs](docs)
|
||||
directory. This documentation is intended primarily for sysadmins running their
|
||||
own Synapse instance, as well as developers interacting externally with
|
||||
Synapse. [docs/dev](docs/dev) exists primarily to house documentation for
|
||||
Synapse developers. [docs/admin_api](docs/admin_api) houses documentation
|
||||
regarding Synapse's Admin API, which is used mostly by sysadmins and external
|
||||
service developers.
|
||||
|
||||
If you add new files added to either of these folders, please use [GitHub-Flavoured
|
||||
Markdown](https://guides.github.com/features/mastering-markdown/).
|
||||
|
||||
Some documentation also exists in [Synapse's GitHub
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
|
||||
# 8. Test, test, test!
|
||||
<a name="test-test-test"></a>
|
||||
|
||||
While you're developing and before submitting a patch, you'll
|
||||
want to test your code.
|
||||
|
||||
## Run the linters.
|
||||
|
||||
The linters look at your code and do two things:
|
||||
|
||||
- ensure that your code follows the coding style adopted by the project;
|
||||
- catch a number of errors in your code.
|
||||
|
||||
They're pretty fast, don't hesitate!
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh
|
||||
```
|
||||
|
||||
Note that this script *will modify your files* to fix styling errors.
|
||||
Make sure that you have saved all your files.
|
||||
|
||||
If you wish to restrict the linters to only the files changed since the last commit
|
||||
(much faster!), you can instead run:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh -d
|
||||
```
|
||||
|
||||
Or if you know exactly which files you wish to lint, you can instead run:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||
```
|
||||
|
||||
## Run the unit tests.
|
||||
|
||||
The unit tests run parts of Synapse, including your changes, to see if anything
|
||||
was broken. They are slower than the linters but will typically catch more errors.
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests
|
||||
```
|
||||
|
||||
If you wish to only run *some* unit tests, you may specify
|
||||
another module instead of `tests` - or a test class or a method:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
|
||||
```
|
||||
|
||||
If your tests fail, you may wish to look at the logs (the default log level is `ERROR`):
|
||||
|
||||
```sh
|
||||
less _trial_temp/test.log
|
||||
```
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
|
||||
|
||||
```sh
|
||||
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
|
||||
```
|
||||
|
||||
|
||||
## Run the integration tests.
|
||||
|
||||
The integration tests are a more comprehensive suite of tests. They
|
||||
run a full version of Synapse, including your changes, to check if
|
||||
anything was broken. They are slower than the unit tests but will
|
||||
typically catch more errors.
|
||||
|
||||
The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:py37
|
||||
```
|
||||
|
||||
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
||||
|
||||
|
||||
# 9. Submit your patch.
|
||||
|
||||
Once you're happy with your patch, it's time to prepare a Pull Request.
|
||||
|
||||
To prepare a Pull Request, please:
|
||||
|
||||
1. verify that [all the tests pass](#test-test-test), including the coding style;
|
||||
2. [sign off](#sign-off) your contribution;
|
||||
3. `git push` your commit to your fork of Synapse;
|
||||
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
|
||||
5. add a [changelog entry](#changelog) and push it to your Pull Request;
|
||||
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
|
||||
|
||||
|
||||
## Changelog
|
||||
|
||||
All changes, even minor ones, need a corresponding changelog / newsfragment
|
||||
entry. These are managed by [Towncrier](https://github.com/hawkowl/towncrier).
|
||||
|
||||
To create a changelog entry, make a new file in the `changelog.d` directory named
|
||||
in the format of `PRnumber.type`. The type can be one of the following:
|
||||
|
||||
* `feature`
|
||||
* `bugfix`
|
||||
* `docker` (for updates to the Docker image)
|
||||
* `doc` (for updates to the documentation)
|
||||
* `removal` (also used for deprecations)
|
||||
* `misc` (for internal-only changes)
|
||||
|
||||
This file will become part of our [changelog](
|
||||
https://github.com/matrix-org/synapse/blob/master/CHANGES.md) at the next
|
||||
release, so the content of the file should be a short description of your
|
||||
change in the same style as the rest of the changelog. The file can contain Markdown
|
||||
formatting, and should end with a full stop (.) or an exclamation mark (!) for
|
||||
consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
contributions and would like to have you shouted out in the release notes!
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
`changelog.d/1234.bugfix`, and contain content like:
|
||||
|
||||
> The security levels of Florbs are now validated when received
|
||||
> via the `/federation/florb` endpoint. Contributed by Jane Matrix.
|
||||
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc,
|
||||
then the content for each `changelog.d` file should be the same. Towncrier will
|
||||
merge the matching files together into a single changelog entry when we come to
|
||||
release.
|
||||
|
||||
### How do I know what to call the changelog file before I create the PR?
|
||||
|
||||
Obviously, you don't know if you should call your newsfile
|
||||
`1234.bugfix` or `5678.bugfix` until you create the PR, which leads to a
|
||||
chicken-and-egg problem.
|
||||
|
||||
There are two options for solving this:
|
||||
|
||||
1. Open the PR without a changelog file, see what number you got, and *then*
|
||||
add the changelog file to your branch (see [Updating your pull
|
||||
request](#updating-your-pull-request)), or:
|
||||
|
||||
1. Look at the [list of all
|
||||
issues/PRs](https://github.com/matrix-org/synapse/issues?q=), add one to the
|
||||
highest number you see, and quickly open the PR before somebody else claims
|
||||
your number.
|
||||
|
||||
[This
|
||||
script](https://github.com/richvdh/scripts/blob/master/next_github_number.sh)
|
||||
might be helpful if you find yourself doing this a lot.
|
||||
|
||||
Sorry, we know it's a bit fiddly, but it's *really* helpful for us when we come
|
||||
to put together a release!
|
||||
|
||||
### Debian changelog
|
||||
|
||||
Changes which affect the debian packaging files (in `debian`) are an
|
||||
exception to the rule that all changes require a `changelog.d` file.
|
||||
|
||||
In this case, you will need to add an entry to the debian changelog for the
|
||||
next release. For this, run the following command:
|
||||
|
||||
```
|
||||
dch
|
||||
```
|
||||
|
||||
This will make up a new version number (if there isn't already an unreleased
|
||||
version in flight), and open an editor where you can add a new changelog entry.
|
||||
(Our release process will ensure that the version number and maintainer name is
|
||||
corrected for the release.)
|
||||
|
||||
If your change affects both the debian packaging *and* files outside the debian
|
||||
directory, you will need both a regular newsfragment *and* an entry in the
|
||||
debian changelog. (Though typically such changes should be submitted as two
|
||||
separate pull requests.)
|
||||
|
||||
## Sign off
|
||||
|
||||
In order to have a concrete record that your contribution is intentional
|
||||
and you agree to license it under the same terms as the project's license, we've adopted the
|
||||
same lightweight approach that the Linux Kernel
|
||||
[submitting patches process](
|
||||
https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>),
|
||||
[Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
||||
projects use: the DCO (Developer Certificate of Origin:
|
||||
http://developercertificate.org/). This is a simple declaration that you wrote
|
||||
the contribution or otherwise have the right to contribute it to Matrix:
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
If you agree to this for your contribution, then all that's needed is to
|
||||
include the line in your commit or pull request comment:
|
||||
|
||||
```
|
||||
Signed-off-by: Your Name <your@email.example.org>
|
||||
```
|
||||
|
||||
We accept contributions under a legally identifiable name, such as
|
||||
your name on government documentation or common-law names (names
|
||||
claimed by legitimate usage or repute). Unfortunately, we cannot
|
||||
accept anonymous contributions at this time.
|
||||
|
||||
Git allows you to add this signoff automatically when using the `-s`
|
||||
flag to `git commit`, which uses the name and email set in your
|
||||
`user.name` and `user.email` git configs.
|
||||
|
||||
|
||||
# 10. Turn feedback into better code.
|
||||
|
||||
Once the Pull Request is opened, you will see a few things:
|
||||
|
||||
1. our automated CI (Continuous Integration) pipeline will run (again) the linters, the unit tests, the integration tests and more;
|
||||
2. one or more of the developers will take a look at your Pull Request and offer feedback.
|
||||
|
||||
From this point, you should:
|
||||
|
||||
1. Look at the results of the CI pipeline.
|
||||
- If there is any error, fix the error.
|
||||
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
|
||||
3. Create a new commit with the changes.
|
||||
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
|
||||
- Push this commits to your Pull Request.
|
||||
4. Back to 1.
|
||||
|
||||
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
|
||||
|
||||
# 11. Find a new issue.
|
||||
|
||||
By now, you know the drill!
|
||||
|
||||
# Notes for maintainers on merging PRs etc
|
||||
|
||||
There are some notes for those with commit access to the project on how we
|
||||
manage git [here](docs/dev/git.md).
|
||||
|
||||
# Conclusion
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect
|
||||
given our obsession with open communication. If we're going to successfully
|
||||
matrix together all the fragmented communication technologies out there we are
|
||||
reliant on contributions and collaboration from the community to do so. So
|
||||
please get involved - and we hope you have as much fun hacking on Matrix as we
|
||||
do!
|
||||
|
||||
@@ -8,7 +8,6 @@ include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
include demo/*.sh
|
||||
|
||||
include synapse/py.typed
|
||||
recursive-include synapse/storage *.sql
|
||||
recursive-include synapse/storage *.sql.postgres
|
||||
recursive-include synapse/storage *.sql.sqlite
|
||||
@@ -45,9 +44,9 @@ include book.toml
|
||||
include pyproject.toml
|
||||
recursive-include changelog.d *
|
||||
|
||||
prune .buildkite
|
||||
prune .circleci
|
||||
prune .github
|
||||
prune .ci
|
||||
prune contrib
|
||||
prune debian
|
||||
prune demo/etc
|
||||
|
||||
48
README.rst
48
README.rst
@@ -1,6 +1,6 @@
|
||||
=========================================================================
|
||||
Synapse |support| |development| |documentation| |license| |pypi| |python|
|
||||
=========================================================================
|
||||
=========================================================
|
||||
Synapse |support| |development| |license| |pypi| |python|
|
||||
=========================================================
|
||||
|
||||
.. contents::
|
||||
|
||||
@@ -55,8 +55,11 @@ solutions. The hope is for Matrix to act as the building blocks for a new
|
||||
generation of fully open and interoperable messaging and VoIP apps for the
|
||||
internet.
|
||||
|
||||
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
|
||||
team, written in Python 3/Twisted.
|
||||
Synapse is a reference "homeserver" implementation of Matrix from the core
|
||||
development team at matrix.org, written in Python/Twisted. It is intended to
|
||||
showcase the concept of Matrix and let folks see the spec in the context of a
|
||||
codebase and let you run your own homeserver and generally help bootstrap the
|
||||
ecosystem.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||
@@ -82,14 +85,9 @@ For support installing or managing Synapse, please join |room|_ (from a matrix.o
|
||||
account if necessary) and ask questions there. We do not use GitHub issues for
|
||||
support requests, only for bug reports and feature requests.
|
||||
|
||||
Synapse's documentation is `nicely rendered on GitHub Pages <https://matrix-org.github.io/synapse>`_,
|
||||
with its source available in |docs|_.
|
||||
|
||||
.. |room| replace:: ``#synapse:matrix.org``
|
||||
.. _room: https://matrix.to/#/#synapse:matrix.org
|
||||
|
||||
.. |docs| replace:: ``docs``
|
||||
.. _docs: docs
|
||||
|
||||
Synapse Installation
|
||||
====================
|
||||
@@ -265,27 +263,11 @@ Then update the ``users`` table in the database::
|
||||
Synapse Development
|
||||
===================
|
||||
|
||||
The best place to get started is our
|
||||
`guide for contributors <https://matrix-org.github.io/synapse/latest/development/contributing_guide.html>`_.
|
||||
This is part of our larger `documentation <https://matrix-org.github.io/synapse/latest>`_, which includes
|
||||
information for synapse developers as well as synapse administrators.
|
||||
|
||||
Developers might be particularly interested in:
|
||||
|
||||
* `Synapse's database schema <https://matrix-org.github.io/synapse/latest/development/database_schema.html>`_,
|
||||
* `notes on Synapse's implementation details <https://matrix-org.github.io/synapse/latest/development/internal_documentation/index.html>`_, and
|
||||
* `how we use git <https://matrix-org.github.io/synapse/latest/development/git.html>`_.
|
||||
|
||||
Alongside all that, join our developer community on Matrix:
|
||||
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
|
||||
|
||||
|
||||
Quick start
|
||||
-----------
|
||||
Join our developer community on Matrix: `#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Platform-specific prerequisites <https://matrix-org.github.io/synapse/latest/setup/installation.html#platform-specific-prerequisites>`_.
|
||||
`Installing from source <https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source>`_.
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
@@ -298,7 +280,7 @@ to install using pip and a virtualenv::
|
||||
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install -e ".[all,dev]"
|
||||
pip install -e ".[all,test]"
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env. If any dependencies fail to install,
|
||||
@@ -326,7 +308,7 @@ If you just want to start a single instance of the app and run it directly::
|
||||
|
||||
|
||||
Running the unit tests
|
||||
----------------------
|
||||
======================
|
||||
|
||||
After getting up and running, you may wish to run Synapse's unit tests to
|
||||
check that everything is installed correctly::
|
||||
@@ -345,7 +327,7 @@ to see the logging output, see the `CONTRIBUTING doc <CONTRIBUTING.md#run-the-un
|
||||
|
||||
|
||||
Running the Integration Tests
|
||||
-----------------------------
|
||||
=============================
|
||||
|
||||
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
|
||||
a Matrix homeserver integration testing suite, which uses HTTP requests to
|
||||
@@ -463,10 +445,6 @@ This is normally caused by a misconfiguration in your reverse-proxy. See
|
||||
:alt: (discuss development on #synapse-dev:matrix.org)
|
||||
:target: https://matrix.to/#/#synapse-dev:matrix.org
|
||||
|
||||
.. |documentation| image:: https://img.shields.io/badge/documentation-%E2%9C%93-success
|
||||
:alt: (Rendered documentation on GitHub Pages)
|
||||
:target: https://matrix-org.github.io/synapse/latest/
|
||||
|
||||
.. |license| image:: https://img.shields.io/github/license/matrix-org/synapse
|
||||
:alt: (check license in LICENSE file)
|
||||
:target: LICENSE
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Upgrading Synapse
|
||||
=================
|
||||
|
||||
This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/latest/upgrade>`_.
|
||||
This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/latest/upgrading>`_.
|
||||
Please update your links.
|
||||
|
||||
The markdown source is available in `docs/upgrade.md <docs/upgrade.md>`_.
|
||||
|
||||
12
book.toml
12
book.toml
@@ -34,6 +34,14 @@ additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
"docs/website_files/version-picker.css",
|
||||
]
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
additional-js = [
|
||||
"docs/website_files/table-of-contents.js",
|
||||
"docs/website_files/version-picker.js",
|
||||
"docs/website_files/version.js",
|
||||
]
|
||||
theme = "docs/website_files/theme"
|
||||
|
||||
[preprocessor.schema_versions]
|
||||
command = "./scripts-dev/schema_versions.py"
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Send and handle cross-signing messages using the stable prefix.
|
||||
@@ -1 +0,0 @@
|
||||
A test helper (`wait_for_background_updates`) no longer depends on classes defining a `store` property.
|
||||
@@ -1 +0,0 @@
|
||||
Add an admin API endpoint to force a local user to leave all non-public rooms in a space.
|
||||
@@ -56,7 +56,7 @@ services:
|
||||
- POSTGRES_USER=synapse
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
# ensure the database gets created correctly
|
||||
# https://matrix-org.github.io/synapse/latest/postgres.html#set-up-database
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
|
||||
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
||||
volumes:
|
||||
# You may store the database tables in a local folder..
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Using the Synapse Grafana dashboard
|
||||
|
||||
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
|
||||
1. Have your Prometheus scrape your Synapse. https://matrix-org.github.io/synapse/latest/metrics-howto.html
|
||||
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
|
||||
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
|
||||
3. Set up required recording rules. [contrib/prometheus](../prometheus)
|
||||
3. Set up required recording rules. https://github.com/matrix-org/synapse/tree/master/contrib/prometheus
|
||||
|
||||
@@ -54,7 +54,7 @@
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1628606819564,
|
||||
"iteration": 1621258266004,
|
||||
"links": [
|
||||
{
|
||||
"asDropdown": false,
|
||||
@@ -307,6 +307,7 @@
|
||||
],
|
||||
"thresholds": [
|
||||
{
|
||||
"$$hashKey": "object:283",
|
||||
"colorMode": "warning",
|
||||
"fill": false,
|
||||
"line": true,
|
||||
@@ -315,6 +316,7 @@
|
||||
"yaxis": "left"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:284",
|
||||
"colorMode": "critical",
|
||||
"fill": false,
|
||||
"line": true,
|
||||
@@ -342,6 +344,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:255",
|
||||
"decimals": null,
|
||||
"format": "s",
|
||||
"label": "",
|
||||
@@ -351,6 +354,7 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:256",
|
||||
"format": "hertz",
|
||||
"label": "",
|
||||
"logBase": 1,
|
||||
@@ -425,6 +429,7 @@
|
||||
],
|
||||
"thresholds": [
|
||||
{
|
||||
"$$hashKey": "object:566",
|
||||
"colorMode": "critical",
|
||||
"fill": true,
|
||||
"line": true,
|
||||
@@ -452,6 +457,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:538",
|
||||
"decimals": null,
|
||||
"format": "percentunit",
|
||||
"label": null,
|
||||
@@ -461,6 +467,7 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:539",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -566,6 +573,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1560",
|
||||
"format": "bytes",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
@@ -573,6 +581,7 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1561",
|
||||
"format": "short",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
@@ -632,6 +641,7 @@
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"$$hashKey": "object:639",
|
||||
"alias": "/max$/",
|
||||
"color": "#890F02",
|
||||
"fill": 0,
|
||||
@@ -683,6 +693,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:650",
|
||||
"decimals": null,
|
||||
"format": "none",
|
||||
"label": "",
|
||||
@@ -692,6 +703,7 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:651",
|
||||
"decimals": null,
|
||||
"format": "short",
|
||||
"label": null,
|
||||
@@ -771,9 +783,11 @@
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"$$hashKey": "object:1240",
|
||||
"alias": "/user/"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1241",
|
||||
"alias": "/system/"
|
||||
}
|
||||
],
|
||||
@@ -803,6 +817,7 @@
|
||||
],
|
||||
"thresholds": [
|
||||
{
|
||||
"$$hashKey": "object:1278",
|
||||
"colorMode": "custom",
|
||||
"fillColor": "rgba(255, 255, 255, 1)",
|
||||
"line": true,
|
||||
@@ -812,6 +827,7 @@
|
||||
"yaxis": "left"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1279",
|
||||
"colorMode": "custom",
|
||||
"fillColor": "rgba(255, 255, 255, 1)",
|
||||
"line": true,
|
||||
@@ -821,6 +837,7 @@
|
||||
"yaxis": "left"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1498",
|
||||
"colorMode": "critical",
|
||||
"fill": true,
|
||||
"line": true,
|
||||
@@ -848,6 +865,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1250",
|
||||
"decimals": null,
|
||||
"format": "percentunit",
|
||||
"label": "",
|
||||
@@ -857,6 +875,7 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1251",
|
||||
"format": "short",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
@@ -1408,6 +1427,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:572",
|
||||
"format": "percentunit",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -1416,6 +1436,7 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:573",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -1699,6 +1720,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:102",
|
||||
"format": "hertz",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
@@ -1706,6 +1728,7 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:103",
|
||||
"format": "short",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
@@ -3402,7 +3425,7 @@
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 6
|
||||
"y": 33
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 79,
|
||||
@@ -3419,12 +3442,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pluginVersion": "7.1.3",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -3506,7 +3526,7 @@
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 6
|
||||
"y": 33
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 83,
|
||||
@@ -3523,12 +3543,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pluginVersion": "7.1.3",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -3612,7 +3629,7 @@
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 15
|
||||
"y": 42
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 109,
|
||||
@@ -3629,12 +3646,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pluginVersion": "7.1.3",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -3719,7 +3733,7 @@
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 15
|
||||
"y": 42
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 111,
|
||||
@@ -3736,12 +3750,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pluginVersion": "7.1.3",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -3820,7 +3831,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
"y": 51
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 142,
|
||||
@@ -3836,11 +3847,8 @@
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pluginVersion": "7.1.3",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -3923,7 +3931,7 @@
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
"y": 51
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 140,
|
||||
@@ -3940,12 +3948,9 @@
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pluginVersion": "7.1.3",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -4074,7 +4079,7 @@
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 32
|
||||
"y": 59
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
@@ -4140,7 +4145,7 @@
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 33
|
||||
"y": 60
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 162,
|
||||
@@ -4158,12 +4163,9 @@
|
||||
"linewidth": 0,
|
||||
"links": [],
|
||||
"nullPointMode": "connected",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pluginVersion": "7.1.3",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
@@ -4348,7 +4350,7 @@
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 41
|
||||
"y": 68
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
@@ -4394,311 +4396,6 @@
|
||||
"yBucketBound": "auto",
|
||||
"yBucketNumber": null,
|
||||
"yBucketSize": null
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"links": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"grid": {},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 42
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 203,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "rss {{index}}",
|
||||
"refId": "A",
|
||||
"step": 4
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Age of oldest event in staging area",
|
||||
"tooltip": {
|
||||
"msResolution": false,
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "cumulative"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "ms",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": 0,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"links": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"grid": {},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 50
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 202,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "rss {{index}}",
|
||||
"refId": "A",
|
||||
"step": 4
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Number of events in federation staging area",
|
||||
"tooltip": {
|
||||
"msResolution": false,
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "cumulative"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "none",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": 0,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 51
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 205,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(synapse_federation_soft_failed_events_total{instance=\"$instance\"}[$bucket_size]))",
|
||||
"interval": "",
|
||||
"legendFormat": "soft-failed events",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Soft-failed event rate",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "hertz",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "Federation",
|
||||
@@ -4950,7 +4647,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 33
|
||||
"y": 8
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 48,
|
||||
@@ -5052,7 +4749,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 33
|
||||
"y": 8
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 104,
|
||||
@@ -5180,7 +4877,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 40
|
||||
"y": 15
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 10,
|
||||
@@ -5284,7 +4981,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 40
|
||||
"y": 15
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 11,
|
||||
@@ -5389,7 +5086,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 47
|
||||
"y": 22
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 180,
|
||||
@@ -5471,126 +5168,6 @@
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$datasource",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"links": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 6,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 47
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 200,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.7",
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "99%",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.9, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "90%",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.75, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "75%",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.5, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "50%",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Time waiting for DB connection quantiles",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 2,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"decimals": null,
|
||||
"format": "s",
|
||||
"label": "",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"repeat": null,
|
||||
@@ -6339,7 +5916,7 @@
|
||||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 35
|
||||
"y": 84
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 1,
|
||||
@@ -6445,7 +6022,7 @@
|
||||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 35
|
||||
"y": 84
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 8,
|
||||
@@ -6549,7 +6126,7 @@
|
||||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 45
|
||||
"y": 94
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 38,
|
||||
@@ -6649,7 +6226,7 @@
|
||||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 45
|
||||
"y": 94
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 39,
|
||||
@@ -6681,9 +6258,8 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "topk(10, rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"expr": "topk(10, rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=\"$job\",instance=\"$instance\"}[$bucket_size]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
||||
"refId": "A",
|
||||
@@ -6750,7 +6326,7 @@
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 55
|
||||
"y": 104
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 65,
|
||||
@@ -6785,7 +6361,7 @@
|
||||
"expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{name}} ({{reason}}) {{job}}-{{index}}",
|
||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
@@ -9475,7 +9051,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 41
|
||||
"y": 119
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 156,
|
||||
@@ -9513,7 +9089,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "synapse_admin_mau:current{instance=\"$instance\", job=~\"$job\"}",
|
||||
"expr": "synapse_admin_mau:current{instance=\"$instance\"}",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -9521,7 +9097,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "synapse_admin_mau:max{instance=\"$instance\", job=~\"$job\"}",
|
||||
"expr": "synapse_admin_mau:max{instance=\"$instance\"}",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -9588,7 +9164,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 41
|
||||
"y": 119
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 160,
|
||||
@@ -9908,7 +9484,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 43
|
||||
"y": 73
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 168,
|
||||
@@ -9940,7 +9516,7 @@
|
||||
{
|
||||
"expr": "rate(synapse_appservice_api_sent_events{instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "{{service}}",
|
||||
"legendFormat": "{{exported_service}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
@@ -10003,7 +9579,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 43
|
||||
"y": 73
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 171,
|
||||
@@ -10035,7 +9611,7 @@
|
||||
{
|
||||
"expr": "rate(synapse_appservice_api_sent_transactions{instance=\"$instance\"}[$bucket_size])",
|
||||
"interval": "",
|
||||
"legendFormat": "{{service}}",
|
||||
"legendFormat": "{{exported_service}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
@@ -10383,6 +9959,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:165",
|
||||
"format": "hertz",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -10391,6 +9968,7 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:166",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -10493,6 +10071,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:390",
|
||||
"format": "hertz",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -10501,6 +10080,7 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:391",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -10589,6 +10169,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:390",
|
||||
"format": "hertz",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -10597,6 +10178,7 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:391",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -10888,5 +10470,5 @@
|
||||
"timezone": "",
|
||||
"title": "Synapse",
|
||||
"uid": "000000012",
|
||||
"version": 100
|
||||
"version": 90
|
||||
}
|
||||
@@ -34,7 +34,7 @@ Add a new job to the main prometheus.yml file:
|
||||
```
|
||||
|
||||
An example of a Prometheus configuration with workers can be found in
|
||||
[metrics-howto.md](https://matrix-org.github.io/synapse/latest/metrics-howto.html).
|
||||
[metrics-howto.md](https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md).
|
||||
|
||||
To use `synapse.rules` add
|
||||
|
||||
|
||||
@@ -3,9 +3,8 @@ Purge history API examples
|
||||
|
||||
# `purge_history.sh`
|
||||
|
||||
A bash file, that uses the
|
||||
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
|
||||
to purge all messages in a list of rooms up to a certain event. You can select a
|
||||
A bash file, that uses the [purge history API](/docs/admin_api/purge_history_api.rst) to
|
||||
purge all messages in a list of rooms up to a certain event. You can select a
|
||||
timeframe or a number of messages that you want to keep in the room.
|
||||
|
||||
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
|
||||
@@ -13,6 +12,5 @@ the script.
|
||||
|
||||
# `purge_remote_media.sh`
|
||||
|
||||
A bash file, that uses the
|
||||
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
|
||||
to purge all old cached remote media.
|
||||
A bash file, that uses the [purge history API](/docs/admin_api/purge_history_api.rst) to
|
||||
purge all old cached remote media.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# this script will use the api:
|
||||
# https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
|
||||
#
|
||||
# It will purge all messages in a list of rooms up to a cetrain event
|
||||
|
||||
@@ -84,9 +84,7 @@ AUTH="Authorization: Bearer $TOKEN"
|
||||
###################################################################################################
|
||||
# finally start pruning the room:
|
||||
###################################################################################################
|
||||
# this will really delete local events, so the messages in the room really
|
||||
# disappear unless they are restored by remote federation. This is because
|
||||
# we pass {"delete_local_events":true} to the curl invocation below.
|
||||
POSTDATA='{"delete_local_events":"true"}' # this will really delete local events, so the messages in the room really disappear unless they are restored by remote federation
|
||||
|
||||
for ROOM in "${ROOMS_ARRAY[@]}"; do
|
||||
echo "########################################### $(date) ################# "
|
||||
@@ -106,7 +104,7 @@ for ROOM in "${ROOMS_ARRAY[@]}"; do
|
||||
SLEEP=2
|
||||
set -x
|
||||
# call purge
|
||||
OUT=$(curl --header "$AUTH" -s -d '{"delete_local_events":true}' POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
|
||||
OUT=$(curl --header "$AUTH" -s -d $POSTDATA POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
|
||||
PURGE_ID=$(echo "$OUT" |grep purge_id|cut -d'"' -f4 )
|
||||
if [ "$PURGE_ID" == "" ]; then
|
||||
# probably the history purge is already in progress for $ROOM
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
The documentation for using systemd to manage synapse workers is now part of
|
||||
the main synapse distribution. See
|
||||
[docs/systemd-with-workers](https://matrix-org.github.io/synapse/latest/systemd-with-workers/index.html).
|
||||
the main synapse distribution. See [docs/systemd-with-workers](../../docs/systemd-with-workers).
|
||||
|
||||
30
debian/build_virtualenv
vendored
30
debian/build_virtualenv
vendored
@@ -15,7 +15,7 @@ export DH_VIRTUALENV_INSTALL_ROOT=/opt/venvs
|
||||
# python won't look in the right directory. At least this way, the error will
|
||||
# be a *bit* more obvious.
|
||||
#
|
||||
SNAKE=$(readlink -e /usr/bin/python3)
|
||||
SNAKE=`readlink -e /usr/bin/python3`
|
||||
|
||||
# try to set the CFLAGS so any compiled C extensions are compiled with the most
|
||||
# generic as possible x64 instructions, so that compiling it on a new Intel chip
|
||||
@@ -24,7 +24,7 @@ SNAKE=$(readlink -e /usr/bin/python3)
|
||||
# TODO: add similar things for non-amd64, or figure out a more generic way to
|
||||
# do this.
|
||||
|
||||
case $(dpkg-architecture -q DEB_HOST_ARCH) in
|
||||
case `dpkg-architecture -q DEB_HOST_ARCH` in
|
||||
amd64)
|
||||
export CFLAGS=-march=x86-64
|
||||
;;
|
||||
@@ -33,14 +33,15 @@ esac
|
||||
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
|
||||
# than the 2/3 compatible `virtualenv`.
|
||||
|
||||
# Pin pip to 20.3.4 to fix breakage in 21.0 on py3.5 (xenial)
|
||||
|
||||
dh_virtualenv \
|
||||
--install-suffix "matrix-synapse" \
|
||||
--builtin-venv \
|
||||
--python "$SNAKE" \
|
||||
--upgrade-pip \
|
||||
--upgrade-pip-to="20.3.4" \
|
||||
--preinstall="lxml" \
|
||||
--preinstall="mock" \
|
||||
--preinstall="wheel" \
|
||||
--extra-pip-arg="--no-cache-dir" \
|
||||
--extra-pip-arg="--compile" \
|
||||
--extras="all,systemd,test"
|
||||
@@ -57,8 +58,8 @@ case "$DEB_BUILD_OPTIONS" in
|
||||
*)
|
||||
# Copy tests to a temporary directory so that we can put them on the
|
||||
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
||||
tmpdir=$(mktemp -d)
|
||||
trap 'rm -r $tmpdir' EXIT
|
||||
tmpdir=`mktemp -d`
|
||||
trap "rm -r $tmpdir" EXIT
|
||||
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
@@ -99,20 +100,5 @@ esac
|
||||
--output-file="${PACKAGE_BUILD_DIR}/etc/matrix-synapse/log.yaml"
|
||||
|
||||
# add a dependency on the right version of python to substvars.
|
||||
PYPKG=$(basename "$SNAKE")
|
||||
PYPKG=`basename $SNAKE`
|
||||
echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars
|
||||
|
||||
|
||||
# add a couple of triggers. This is needed so that dh-virtualenv can rebuild
|
||||
# the venv when the system python changes (see
|
||||
# https://dh-virtualenv.readthedocs.io/en/latest/tutorial.html#step-2-set-up-packaging-for-your-project)
|
||||
#
|
||||
# we do it here rather than the more conventional way of just adding it to
|
||||
# debian/matrix-synapse-py3.triggers, because we need to add a trigger on the
|
||||
# right version of python.
|
||||
cat >>"debian/.debhelper/generated/matrix-synapse-py3/triggers" <<EOF
|
||||
# triggers for dh-virtualenv
|
||||
interest-noawait $SNAKE
|
||||
interest dh-virtualenv-interpreter-update
|
||||
|
||||
EOF
|
||||
|
||||
220
debian/changelog
vendored
220
debian/changelog
vendored
@@ -1,229 +1,9 @@
|
||||
matrix-synapse-py3 (1.49.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.49.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Dec 2021 13:52:21 +0000
|
||||
|
||||
matrix-synapse-py3 (1.48.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.48.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 Nov 2021 11:24:15 +0000
|
||||
|
||||
matrix-synapse-py3 (1.48.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.48.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Nov 2021 15:56:03 +0000
|
||||
|
||||
matrix-synapse-py3 (1.47.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.47.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 19 Nov 2021 13:44:32 +0000
|
||||
|
||||
matrix-synapse-py3 (1.47.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.47.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 17 Nov 2021 13:09:43 +0000
|
||||
|
||||
matrix-synapse-py3 (1.47.0~rc3) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.47.0~rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 Nov 2021 14:32:47 +0000
|
||||
|
||||
matrix-synapse-py3 (1.47.0~rc2) stable; urgency=medium
|
||||
|
||||
[ Dan Callahan ]
|
||||
* Update scripts to pass Shellcheck lints.
|
||||
* Remove unused Vagrant scripts from debian/ directory.
|
||||
* Allow building Debian packages for any architecture, not just amd64.
|
||||
* Preinstall the "wheel" package when building virtualenvs.
|
||||
* Do not error if /etc/default/matrix-synapse is missing.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.47.0~rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 10 Nov 2021 09:41:01 +0000
|
||||
|
||||
matrix-synapse-py3 (1.46.0) stable; urgency=medium
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
* Compress debs with xz, to fix incompatibility of impish debs with reprepro.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.46.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Nov 2021 13:22:53 +0000
|
||||
|
||||
matrix-synapse-py3 (1.46.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.46.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Oct 2021 14:04:04 +0100
|
||||
|
||||
matrix-synapse-py3 (1.45.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.45.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Oct 2021 11:58:27 +0100
|
||||
|
||||
matrix-synapse-py3 (1.45.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.45.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Oct 2021 11:18:53 +0100
|
||||
|
||||
matrix-synapse-py3 (1.45.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.45.0~rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 14 Oct 2021 10:58:24 +0100
|
||||
|
||||
matrix-synapse-py3 (1.45.0~rc1) stable; urgency=medium
|
||||
|
||||
[ Nick @ Beeper ]
|
||||
* Include an `update_synapse_database` script in the distribution.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.45.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Oct 2021 10:46:27 +0100
|
||||
|
||||
matrix-synapse-py3 (1.44.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.44.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Oct 2021 13:43:57 +0100
|
||||
|
||||
matrix-synapse-py3 (1.44.0~rc3) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.44.0~rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 04 Oct 2021 14:57:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.44.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.44.0~rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 30 Sep 2021 12:39:10 +0100
|
||||
|
||||
matrix-synapse-py3 (1.44.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.44.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Sep 2021 13:41:28 +0100
|
||||
|
||||
matrix-synapse-py3 (1.43.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.43.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Sep 2021 11:49:05 +0100
|
||||
|
||||
matrix-synapse-py3 (1.43.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.43.0~rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 17 Sep 2021 10:43:21 +0100
|
||||
|
||||
matrix-synapse-py3 (1.43.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.43.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Sep 2021 11:39:46 +0100
|
||||
|
||||
matrix-synapse-py3 (1.42.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.42.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Sep 2021 16:19:09 +0100
|
||||
|
||||
matrix-synapse-py3 (1.42.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.42.0~rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 06 Sep 2021 15:25:13 +0100
|
||||
|
||||
matrix-synapse-py3 (1.42.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.42.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 01 Sep 2021 11:37:48 +0100
|
||||
|
||||
matrix-synapse-py3 (1.41.1) stable; urgency=high
|
||||
|
||||
* New synapse release 1.41.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 31 Aug 2021 12:59:10 +0100
|
||||
|
||||
matrix-synapse-py3 (1.41.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.41.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 24 Aug 2021 15:31:45 +0100
|
||||
|
||||
matrix-synapse-py3 (1.41.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.41.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 18 Aug 2021 15:52:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.40.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.40.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 10 Aug 2021 13:50:48 +0100
|
||||
|
||||
matrix-synapse-py3 (1.40.0~rc3) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.40.0~rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 09 Aug 2021 13:41:08 +0100
|
||||
|
||||
matrix-synapse-py3 (1.40.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.40.0~rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 04 Aug 2021 17:08:55 +0100
|
||||
|
||||
matrix-synapse-py3 (1.40.0~rc1) stable; urgency=medium
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
* Drop backwards-compatibility code that was required to support Ubuntu Xenial.
|
||||
* Update package triggers so that the virtualenv is correctly rebuilt
|
||||
when the system python is rebuilt, on recent Python versions.
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.40.0~rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Aug 2021 11:31:49 +0100
|
||||
|
||||
matrix-synapse-py3 (1.39.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.39.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 29 Jul 2021 09:59:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.39.0~rc3) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.39.0~rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 28 Jul 2021 13:30:58 +0100
|
||||
|
||||
matrix-synapse-py3 (1.38.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.38.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 22 Jul 2021 15:37:06 +0100
|
||||
|
||||
matrix-synapse-py3 (1.39.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.39.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Jul 2021 14:28:34 +0100
|
||||
|
||||
matrix-synapse-py3 (1.38.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.38.0.
|
||||
|
||||
2
debian/compat
vendored
2
debian/compat
vendored
@@ -1 +1 @@
|
||||
10
|
||||
9
|
||||
|
||||
7
debian/control
vendored
7
debian/control
vendored
@@ -3,8 +3,11 @@ Section: contrib/python
|
||||
Priority: extra
|
||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
||||
# TODO: Remove the dependency on dh-systemd after dropping support for Ubuntu xenial
|
||||
# On all other supported releases, it's merely a transitional package which
|
||||
# does nothing but depends on debhelper (> 9.20160709)
|
||||
Build-Depends:
|
||||
debhelper (>= 10),
|
||||
debhelper (>= 9.20160709) | dh-systemd,
|
||||
dh-virtualenv (>= 1.1),
|
||||
libsystemd-dev,
|
||||
libpq-dev,
|
||||
@@ -19,7 +22,7 @@ Standards-Version: 3.9.8
|
||||
Homepage: https://github.com/matrix-org/synapse
|
||||
|
||||
Package: matrix-synapse-py3
|
||||
Architecture: any
|
||||
Architecture: amd64
|
||||
Provides: matrix-synapse
|
||||
Conflicts:
|
||||
matrix-synapse (<< 0.34.0.1-0matrix2),
|
||||
|
||||
1
debian/matrix-synapse-py3.config
vendored
1
debian/matrix-synapse-py3.config
vendored
@@ -2,7 +2,6 @@
|
||||
|
||||
set -e
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
. /usr/share/debconf/confmodule
|
||||
|
||||
# try to update the debconf db according to whatever is in the config files
|
||||
|
||||
1
debian/matrix-synapse-py3.links
vendored
1
debian/matrix-synapse-py3.links
vendored
@@ -3,4 +3,3 @@ opt/venvs/matrix-synapse/bin/register_new_matrix_user usr/bin/register_new_matri
|
||||
opt/venvs/matrix-synapse/bin/synapse_port_db usr/bin/synapse_port_db
|
||||
opt/venvs/matrix-synapse/bin/synapse_review_recent_signups usr/bin/synapse_review_recent_signups
|
||||
opt/venvs/matrix-synapse/bin/synctl usr/bin/synctl
|
||||
opt/venvs/matrix-synapse/bin/update_synapse_database usr/bin/update_synapse_database
|
||||
|
||||
1
debian/matrix-synapse-py3.postinst
vendored
1
debian/matrix-synapse-py3.postinst
vendored
@@ -1,6 +1,5 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
. /usr/share/debconf/confmodule
|
||||
|
||||
CONFIGFILE_SERVERNAME="/etc/matrix-synapse/conf.d/server_name.yaml"
|
||||
|
||||
9
debian/matrix-synapse-py3.triggers
vendored
Normal file
9
debian/matrix-synapse-py3.triggers
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# Register interest in Python interpreter changes and
|
||||
# don't make the Python package dependent on the virtualenv package
|
||||
# processing (noawait)
|
||||
interest-noawait /usr/bin/python3.5
|
||||
interest-noawait /usr/bin/python3.6
|
||||
interest-noawait /usr/bin/python3.7
|
||||
|
||||
# Also provide a symbolic trigger for all dh-virtualenv packages
|
||||
interest dh-virtualenv-interpreter-update
|
||||
2
debian/matrix-synapse.service
vendored
2
debian/matrix-synapse.service
vendored
@@ -5,7 +5,7 @@ Description=Synapse Matrix homeserver
|
||||
Type=notify
|
||||
User=matrix-synapse
|
||||
WorkingDirectory=/var/lib/matrix-synapse
|
||||
EnvironmentFile=-/etc/default/matrix-synapse
|
||||
EnvironmentFile=/etc/default/matrix-synapse
|
||||
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
|
||||
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
|
||||
10
debian/rules
vendored
10
debian/rules
vendored
@@ -51,11 +51,7 @@ override_dh_shlibdeps:
|
||||
override_dh_virtualenv:
|
||||
./debian/build_virtualenv
|
||||
|
||||
override_dh_builddeb:
|
||||
# force the compression to xzip, to stop dpkg-deb on impish defaulting to zstd
|
||||
# (which requires reprepro 5.3.0-1.3, which is currently only in 'experimental' in Debian:
|
||||
# https://metadata.ftp-master.debian.org/changelogs/main/r/reprepro/reprepro_5.3.0-1.3_changelog)
|
||||
dh_builddeb -- -Zxz
|
||||
|
||||
# We are restricted to compat level 9 (because xenial), so have to
|
||||
# enable the systemd bits manually.
|
||||
%:
|
||||
dh $@ --with python-virtualenv
|
||||
dh $@ --with python-virtualenv --with systemd
|
||||
|
||||
2
debian/test/.gitignore
vendored
Normal file
2
debian/test/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.vagrant
|
||||
*.log
|
||||
23
debian/test/provision.sh
vendored
Normal file
23
debian/test/provision.sh
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# provisioning script for vagrant boxes for testing the matrix-synapse debs.
|
||||
#
|
||||
# Will install the most recent matrix-synapse-py3 deb for this platform from
|
||||
# the /debs directory.
|
||||
|
||||
set -e
|
||||
|
||||
apt-get update
|
||||
apt-get install -y lsb-release
|
||||
|
||||
deb=`ls /debs/matrix-synapse-py3_*+$(lsb_release -cs)*.deb | sort | tail -n1`
|
||||
|
||||
debconf-set-selections <<EOF
|
||||
matrix-synapse matrix-synapse/report-stats boolean false
|
||||
matrix-synapse matrix-synapse/server-name string localhost:18448
|
||||
EOF
|
||||
|
||||
dpkg -i "$deb"
|
||||
|
||||
sed -i -e '/port: 8...$/{s/8448/18448/; s/8008/18008/}' -e '$aregistration_shared_secret: secret' /etc/matrix-synapse/homeserver.yaml
|
||||
systemctl restart matrix-synapse
|
||||
13
debian/test/stretch/Vagrantfile
vendored
Normal file
13
debian/test/stretch/Vagrantfile
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
ver = `cd ../../..; dpkg-parsechangelog -S Version`.strip()
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "debian/stretch64"
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
config.vm.synced_folder "../../../../debs", "/debs", type: "nfs"
|
||||
|
||||
config.vm.provision "shell", path: "../provision.sh"
|
||||
end
|
||||
10
debian/test/xenial/Vagrantfile
vendored
Normal file
10
debian/test/xenial/Vagrantfile
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/xenial64"
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
config.vm.synced_folder "../../../../debs", "/debs"
|
||||
config.vm.provision "shell", path: "../provision.sh"
|
||||
end
|
||||
@@ -6,14 +6,14 @@ DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
PID_FILE="$DIR/servers.pid"
|
||||
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
if [ -f $PID_FILE ]; then
|
||||
echo "servers.pid exists!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for port in 8080 8081 8082; do
|
||||
rm -rf "${DIR:?}/$port"
|
||||
rm -rf "$DIR/media_store.$port"
|
||||
rm -rf $DIR/$port
|
||||
rm -rf $DIR/media_store.$port
|
||||
done
|
||||
|
||||
rm -rf "${DIR:?}/etc"
|
||||
rm -rf $DIR/etc
|
||||
|
||||
150
demo/start.sh
150
demo/start.sh
@@ -4,22 +4,21 @@ DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
CWD=$(pwd)
|
||||
|
||||
cd "$DIR/.." || exit
|
||||
cd "$DIR/.."
|
||||
|
||||
mkdir -p demo/etc
|
||||
|
||||
PYTHONPATH=$(readlink -f "$(pwd)")
|
||||
export PYTHONPATH
|
||||
export PYTHONPATH=$(readlink -f $(pwd))
|
||||
|
||||
|
||||
echo "$PYTHONPATH"
|
||||
echo $PYTHONPATH
|
||||
|
||||
for port in 8080 8081 8082; do
|
||||
echo "Starting server on port $port... "
|
||||
|
||||
https_port=$((port + 400))
|
||||
mkdir -p demo/$port
|
||||
pushd demo/$port || exit
|
||||
pushd demo/$port
|
||||
|
||||
#rm $DIR/etc/$port.config
|
||||
python3 -m synapse.app.homeserver \
|
||||
@@ -28,78 +27,75 @@ for port in 8080 8081 8082; do
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
--report-stats no
|
||||
|
||||
if ! grep -F "Customisation made by demo/start.sh" -q "$DIR/etc/$port.config"; then
|
||||
if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then
|
||||
printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
|
||||
|
||||
echo "public_baseurl: http://localhost:$port/" >> $DIR/etc/$port.config
|
||||
|
||||
echo 'enable_registration: true' >> $DIR/etc/$port.config
|
||||
|
||||
# Warning, this heredoc depends on the interaction of tabs and spaces. Please don't
|
||||
# accidentaly bork me with your fancy settings.
|
||||
listeners=$(cat <<-PORTLISTENERS
|
||||
# Configure server to listen on both $https_port and $port
|
||||
# This overides some of the default settings above
|
||||
listeners:
|
||||
- port: $https_port
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
- port: $port
|
||||
tls: false
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: http
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
compress: false
|
||||
PORTLISTENERS
|
||||
)
|
||||
echo "${listeners}" >> $DIR/etc/$port.config
|
||||
|
||||
# Disable tls for the servers
|
||||
printf '\n\n# Disable tls on the servers.' >> $DIR/etc/$port.config
|
||||
echo '# DO NOT USE IN PRODUCTION' >> $DIR/etc/$port.config
|
||||
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true' >> $DIR/etc/$port.config
|
||||
echo 'federation_verify_certificates: false' >> $DIR/etc/$port.config
|
||||
|
||||
# Set tls paths
|
||||
echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\"" >> $DIR/etc/$port.config
|
||||
echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\"" >> $DIR/etc/$port.config
|
||||
|
||||
# Generate tls keys
|
||||
openssl req -x509 -newkey rsa:4096 -keyout "$DIR/etc/localhost:$https_port.tls.key" -out "$DIR/etc/localhost:$https_port.tls.crt" -days 365 -nodes -subj "/O=matrix"
|
||||
openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix"
|
||||
|
||||
# Regenerate configuration
|
||||
{
|
||||
printf '\n\n# Customisation made by demo/start.sh\n'
|
||||
echo "public_baseurl: http://localhost:$port/"
|
||||
echo 'enable_registration: true'
|
||||
# Ignore keys from the trusted keys server
|
||||
echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config
|
||||
echo 'trusted_key_servers:' >> $DIR/etc/$port.config
|
||||
echo ' - server_name: "matrix.org"' >> $DIR/etc/$port.config
|
||||
echo ' accept_keys_insecurely: true' >> $DIR/etc/$port.config
|
||||
|
||||
# Warning, this heredoc depends on the interaction of tabs and spaces.
|
||||
# Please don't accidentaly bork me with your fancy settings.
|
||||
listeners=$(cat <<-PORTLISTENERS
|
||||
# Configure server to listen on both $https_port and $port
|
||||
# This overides some of the default settings above
|
||||
listeners:
|
||||
- port: $https_port
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
- port: $port
|
||||
tls: false
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: http
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
compress: false
|
||||
PORTLISTENERS
|
||||
)
|
||||
|
||||
echo "${listeners}"
|
||||
|
||||
# Disable tls for the servers
|
||||
printf '\n\n# Disable tls on the servers.'
|
||||
echo '# DO NOT USE IN PRODUCTION'
|
||||
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true'
|
||||
echo 'federation_verify_certificates: false'
|
||||
|
||||
# Set tls paths
|
||||
echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\""
|
||||
echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\""
|
||||
|
||||
# Ignore keys from the trusted keys server
|
||||
echo '# Ignore keys from the trusted keys server'
|
||||
echo 'trusted_key_servers:'
|
||||
echo ' - server_name: "matrix.org"'
|
||||
echo ' accept_keys_insecurely: true'
|
||||
|
||||
# Reduce the blacklist
|
||||
blacklist=$(cat <<-BLACK
|
||||
# Set the blacklist so that it doesn't include 127.0.0.1, ::1
|
||||
federation_ip_range_blacklist:
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
BLACK
|
||||
)
|
||||
|
||||
echo "${blacklist}"
|
||||
} >> "$DIR/etc/$port.config"
|
||||
# Reduce the blacklist
|
||||
blacklist=$(cat <<-BLACK
|
||||
# Set the blacklist so that it doesn't include 127.0.0.1, ::1
|
||||
federation_ip_range_blacklist:
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
BLACK
|
||||
)
|
||||
echo "${blacklist}" >> $DIR/etc/$port.config
|
||||
fi
|
||||
|
||||
# Check script parameters
|
||||
if [ $# -eq 1 ]; then
|
||||
if [ "$1" = "--no-rate-limit" ]; then
|
||||
if [ $1 = "--no-rate-limit" ]; then
|
||||
|
||||
# Disable any rate limiting
|
||||
ratelimiting=$(cat <<-RC
|
||||
@@ -141,22 +137,22 @@ for port in 8080 8081 8082; do
|
||||
burst_count: 1000
|
||||
RC
|
||||
)
|
||||
echo "${ratelimiting}" >> "$DIR/etc/$port.config"
|
||||
echo "${ratelimiting}" >> $DIR/etc/$port.config
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! grep -F "full_twisted_stacktraces" -q "$DIR/etc/$port.config"; then
|
||||
echo "full_twisted_stacktraces: true" >> "$DIR/etc/$port.config"
|
||||
if ! grep -F "full_twisted_stacktraces" -q $DIR/etc/$port.config; then
|
||||
echo "full_twisted_stacktraces: true" >> $DIR/etc/$port.config
|
||||
fi
|
||||
if ! grep -F "report_stats" -q "$DIR/etc/$port.config" ; then
|
||||
echo "report_stats: false" >> "$DIR/etc/$port.config"
|
||||
if ! grep -F "report_stats" -q $DIR/etc/$port.config ; then
|
||||
echo "report_stats: false" >> $DIR/etc/$port.config
|
||||
fi
|
||||
|
||||
python3 -m synapse.app.homeserver \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
-D \
|
||||
|
||||
popd || exit
|
||||
popd
|
||||
done
|
||||
|
||||
cd "$CWD" || exit
|
||||
cd "$CWD"
|
||||
|
||||
@@ -8,7 +8,7 @@ for pid_file in $FILES; do
|
||||
pid=$(cat "$pid_file")
|
||||
if [[ $pid ]]; then
|
||||
echo "Killing $pid_file with $pid"
|
||||
kill "$pid"
|
||||
kill $pid
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
@@ -15,15 +15,6 @@ ARG distro=""
|
||||
###
|
||||
### Stage 0: build a dh-virtualenv
|
||||
###
|
||||
|
||||
# This is only really needed on bionic and focal, since other distributions we
|
||||
# care about have a recent version of dh-virtualenv by default. Unfortunately,
|
||||
# it looks like focal is going to be with us for a while.
|
||||
#
|
||||
# (focal doesn't have a dh-virtualenv package at all. There is a PPA at
|
||||
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
|
||||
# it's not obviously easier to use that than to build our own.)
|
||||
|
||||
FROM ${distro} as builder
|
||||
|
||||
RUN apt-get update -qq -o Acquire::Languages=none
|
||||
@@ -36,7 +27,7 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
wget
|
||||
|
||||
# fetch and unpack the package
|
||||
# TODO: Upgrade to 1.2.2 once bionic is dropped (1.2.2 requires debhelper 12; bionic has only 11)
|
||||
# TODO: Upgrade to 1.2.2 once xenial is dropped
|
||||
RUN mkdir /dh-virtualenv
|
||||
RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz
|
||||
RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz
|
||||
@@ -47,9 +38,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& cd /dh-virtualenv \
|
||||
&& env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -y --no-install-recommends"
|
||||
|
||||
# Build it. Note that building the docs doesn't work due to differences in
|
||||
# Sphinx APIs across versions/distros.
|
||||
RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b
|
||||
# build it
|
||||
RUN cd /dh-virtualenv && dpkg-buildpackage -us -uc -b
|
||||
|
||||
###
|
||||
### Stage 1
|
||||
@@ -69,6 +59,8 @@ ENV LANG C.UTF-8
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
# TODO: it would be nice to do that automatically.
|
||||
# TODO: Remove the dh-systemd stanza after dropping support for Ubuntu xenial
|
||||
# it's a transitional package on all other, more recent releases
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
@@ -84,7 +76,10 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
xmlsec1
|
||||
xmlsec1 \
|
||||
&& ( env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
dh-systemd || true )
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb /
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Use the Sytest image that comes with a lot of the build dependencies
|
||||
# pre-installed
|
||||
FROM matrixdotorg/sytest:bionic
|
||||
FROM matrixdotorg/sytest:latest
|
||||
|
||||
# The Sytest image doesn't come with python, so install that
|
||||
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
|
||||
@@ -8,23 +8,5 @@ RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
|
||||
# We need tox to run the tests in run_pg_tests.sh
|
||||
RUN python3 -m pip install tox
|
||||
|
||||
# Initialise the db
|
||||
RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
|
||||
|
||||
# Add a user with our UID and GID so that files get created on the host owned
|
||||
# by us, not root.
|
||||
ARG UID
|
||||
ARG GID
|
||||
RUN groupadd --gid $GID user
|
||||
RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
|
||||
|
||||
# Ensure we can start postgres by sudo-ing as the postgres user.
|
||||
RUN apt-get update && apt-get -qq install -y sudo
|
||||
RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
|
||||
ADD run_pg_tests.sh /run_pg_tests.sh
|
||||
# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
|
||||
# so that we can `docker run` this container and pass arguments to pg_tests.sh
|
||||
ENTRYPOINT ["/run_pg_tests.sh"]
|
||||
|
||||
USER user
|
||||
ADD run_pg_tests.sh /pg_tests.sh
|
||||
ENTRYPOINT /pg_tests.sh
|
||||
|
||||
@@ -21,6 +21,3 @@ VOLUME ["/data"]
|
||||
# files to run the desired worker configuration. Will start supervisord.
|
||||
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
|
||||
ENTRYPOINT ["/configure_workers_and_start.py"]
|
||||
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
|
||||
@@ -65,8 +65,7 @@ The following environment variables are supported in `generate` mode:
|
||||
* `SYNAPSE_DATA_DIR`: where the generated config will put persistent data
|
||||
such as the database and media store. Defaults to `/data`.
|
||||
* `UID`, `GID`: the user id and group id to use for creating the data
|
||||
directories. If unset, and no user is set via `docker run --user`, defaults
|
||||
to `991`, `991`.
|
||||
directories. Defaults to `991`, `991`.
|
||||
|
||||
## Running synapse
|
||||
|
||||
@@ -98,9 +97,7 @@ The following environment variables are supported in `run` mode:
|
||||
`<SYNAPSE_CONFIG_DIR>/homeserver.yaml`.
|
||||
* `SYNAPSE_WORKER`: module to execute, used when running synapse with workers.
|
||||
Defaults to `synapse.app.homeserver`, which is suitable for non-worker mode.
|
||||
* `UID`, `GID`: the user and group id to run Synapse as. If unset, and no user
|
||||
is set via `docker run --user`, defaults to `991`, `991`. Note that this user
|
||||
must have permission to read the config files, and write to the data directories.
|
||||
* `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`.
|
||||
* `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`.
|
||||
|
||||
For more complex setups (e.g. for workers) you can also pass your args directly to synapse using `run` mode. For example like this:
|
||||
@@ -189,7 +186,7 @@ point to another Dockerfile.
|
||||
## Disabling the healthcheck
|
||||
|
||||
If you are using a non-standard port or tls inside docker you can disable the healthcheck
|
||||
whilst running the above `docker run` commands.
|
||||
whilst running the above `docker run` commands.
|
||||
|
||||
```
|
||||
--no-healthcheck
|
||||
@@ -215,7 +212,7 @@ If you wish to point the healthcheck at a different port with docker command, ad
|
||||
## Setting the healthcheck in docker-compose file
|
||||
|
||||
You can add the following to set a custom healthcheck in a docker compose file.
|
||||
You will need docker-compose version >2.1 for this to work.
|
||||
You will need docker-compose version >2.1 for this to work.
|
||||
|
||||
```
|
||||
healthcheck:
|
||||
@@ -229,5 +226,4 @@ healthcheck:
|
||||
## Using jemalloc
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse
|
||||
[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).
|
||||
You can read about jemalloc by reading the Synapse [README](../README.rst).
|
||||
|
||||
@@ -5,25 +5,12 @@
|
||||
set -ex
|
||||
|
||||
# Get the codename from distro env
|
||||
DIST=$(cut -d ':' -f2 <<< "${distro:?}")
|
||||
DIST=`cut -d ':' -f2 <<< $distro`
|
||||
|
||||
# we get a read-only copy of the source: make a writeable copy
|
||||
cp -aT /synapse/source /synapse/build
|
||||
cd /synapse/build
|
||||
|
||||
# if this is a prerelease, set the Section accordingly.
|
||||
#
|
||||
# When the package is later added to the package repo, reprepro will use the
|
||||
# Section to determine which "component" it should go into (see
|
||||
# https://manpages.debian.org/stretch/reprepro/reprepro.1.en.html#GUESSING)
|
||||
|
||||
DEB_VERSION=$(dpkg-parsechangelog -SVersion)
|
||||
case $DEB_VERSION in
|
||||
*~rc*|*~a*|*~b*|*~c*)
|
||||
sed -ie '/^Section:/c\Section: prerelease' debian/control
|
||||
;;
|
||||
esac
|
||||
|
||||
# add an entry to the changelog for this distribution
|
||||
dch -M -l "+$DIST" "build for $DIST"
|
||||
dch -M -r "" --force-distribution --distribution "$DIST"
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/bin/sh
|
||||
# This healthcheck script is designed to return OK when every
|
||||
# host involved returns OK
|
||||
{%- for healthcheck_url in healthcheck_urls %}
|
||||
curl -fSs {{ healthcheck_url }} || exit 1
|
||||
{%- endfor %}
|
||||
@@ -148,6 +148,14 @@ bcrypt_rounds: 12
|
||||
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
|
||||
enable_group_creation: true
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
#
|
||||
# Also defines the ID server which will be called when an account is
|
||||
# deactivated (one will be picked arbitrarily).
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
|
||||
## Metrics ###
|
||||
|
||||
|
||||
@@ -18,31 +18,18 @@ handlers:
|
||||
backupCount: 6 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
|
||||
# Default to buffering writes to log file for efficiency.
|
||||
# WARNING/ERROR logs will still be flushed immediately, but there will be a
|
||||
# delay (of up to `period` seconds, or until the buffer is full with
|
||||
# `capacity` messages) before INFO/DEBUG logs get written.
|
||||
# Default to buffering writes to log file for efficiency. This means that
|
||||
# there will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
|
||||
# logs will still be flushed immediately.
|
||||
buffer:
|
||||
class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler
|
||||
class: logging.handlers.MemoryHandler
|
||||
target: file
|
||||
|
||||
# The capacity is the maximum number of log lines that are buffered
|
||||
# before being written to disk. Increasing this will lead to better
|
||||
# The capacity is the number of log lines that are buffered before
|
||||
# being written to disk. Increasing this will lead to better
|
||||
# performance, at the expensive of it taking longer for log lines to
|
||||
# be written to disk.
|
||||
# This parameter is required.
|
||||
capacity: 10
|
||||
|
||||
# Logs with a level at or above the flush level will cause the buffer to
|
||||
# be flushed immediately.
|
||||
# Default value: 40 (ERROR)
|
||||
# Other values: 50 (CRITICAL), 30 (WARNING), 20 (INFO), 10 (DEBUG)
|
||||
flushLevel: 30 # Flush immediately for WARNING logs and higher
|
||||
|
||||
# The period of time, in seconds, between forced flushes.
|
||||
# Messages will not be delayed for longer than this time.
|
||||
# Default value: 5 seconds
|
||||
period: 5
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
{% endif %}
|
||||
|
||||
console:
|
||||
|
||||
@@ -48,7 +48,7 @@ WORKERS_CONFIG = {
|
||||
"app": "synapse.app.user_dir",
|
||||
"listener_resources": ["client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$"
|
||||
],
|
||||
"shared_extra_conf": {"update_user_directory": False},
|
||||
"worker_extra_conf": "",
|
||||
@@ -85,10 +85,10 @@ WORKERS_CONFIG = {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(v2_alpha|r0|v3)/sync$",
|
||||
"^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$",
|
||||
"^/_matrix/client/(api/v1|r0|v3)/initialSync$",
|
||||
"^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$",
|
||||
"^/_matrix/client/(v2_alpha|r0)/sync$",
|
||||
"^/_matrix/client/(api/v1|v2_alpha|r0)/events$",
|
||||
"^/_matrix/client/(api/v1|r0)/initialSync$",
|
||||
"^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
@@ -146,11 +146,11 @@ WORKERS_CONFIG = {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact",
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/join/",
|
||||
"^/_matrix/client/(api/v1|r0|unstable)/profile/",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
@@ -158,11 +158,11 @@ WORKERS_CONFIG = {
|
||||
"frontend_proxy": {
|
||||
"app": "synapse.app.frontend_proxy",
|
||||
"listener_resources": ["client", "replication"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
|
||||
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|unstable)/keys/upload"],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": (
|
||||
"worker_main_http_uri: http://127.0.0.1:%d"
|
||||
% (MAIN_PROCESS_HTTP_LISTENER_PORT,)
|
||||
% (MAIN_PROCESS_HTTP_LISTENER_PORT,),
|
||||
),
|
||||
},
|
||||
}
|
||||
@@ -474,16 +474,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
|
||||
# Determine the load-balancing upstreams to configure
|
||||
nginx_upstream_config = ""
|
||||
|
||||
# At the same time, prepare a list of internal endpoints to healthcheck
|
||||
# starting with the main process which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += " server localhost:%d;\n" % (port,)
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (port,))
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
@@ -516,13 +510,6 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
worker_config=supervisord_config,
|
||||
)
|
||||
|
||||
# healthcheck config
|
||||
convert(
|
||||
"/conf/healthcheck.sh.j2",
|
||||
"/healthcheck.sh",
|
||||
healthcheck_urls=healthcheck_urls,
|
||||
)
|
||||
|
||||
# Ensure the logging directory exists
|
||||
log_dir = data_dir + "/logs"
|
||||
if not os.path.exists(log_dir):
|
||||
|
||||
@@ -10,10 +10,11 @@ set -e
|
||||
# Set PGUSER so Synapse's tests know what user to connect to the database with
|
||||
export PGUSER=postgres
|
||||
|
||||
# Start the database
|
||||
sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start
|
||||
# Initialise & start the database
|
||||
su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres
|
||||
su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
||||
|
||||
# Run the tests
|
||||
cd /src
|
||||
export TRIAL_FLAGS="-j 4"
|
||||
tox --workdir=./.tox-pg-container -e py36-postgres "$@"
|
||||
tox --workdir=/tmp -e py35-postgres
|
||||
|
||||
@@ -120,7 +120,6 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
|
||||
]
|
||||
|
||||
if ownership is not None:
|
||||
log(f"Setting ownership on /data to {ownership}")
|
||||
subprocess.check_output(["chown", "-R", ownership, "/data"])
|
||||
args = ["gosu", ownership] + args
|
||||
|
||||
@@ -145,18 +144,12 @@ def run_generate_config(environ, ownership):
|
||||
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
|
||||
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
|
||||
|
||||
if ownership is not None:
|
||||
# make sure that synapse has perms to write to the data dir.
|
||||
log(f"Setting ownership on {data_dir} to {ownership}")
|
||||
subprocess.check_output(["chown", ownership, data_dir])
|
||||
|
||||
# create a suitable log config from our template
|
||||
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
|
||||
if not os.path.exists(log_config_file):
|
||||
log("Creating log config %s" % (log_config_file,))
|
||||
convert("/conf/log.config", log_config_file, environ)
|
||||
|
||||
# generate the main config file, and a signing key.
|
||||
args = [
|
||||
"python",
|
||||
"-m",
|
||||
@@ -175,23 +168,29 @@ def run_generate_config(environ, ownership):
|
||||
"--open-private-ports",
|
||||
]
|
||||
# log("running %s" % (args, ))
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
|
||||
if ownership is not None:
|
||||
# make sure that synapse has perms to write to the data dir.
|
||||
subprocess.check_output(["chown", ownership, data_dir])
|
||||
|
||||
args = ["gosu", ownership] + args
|
||||
os.execv("/usr/sbin/gosu", args)
|
||||
else:
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
|
||||
|
||||
def main(args, environ):
|
||||
mode = args[1] if len(args) > 1 else "run"
|
||||
|
||||
# if we were given an explicit user to switch to, do so
|
||||
ownership = None
|
||||
if "UID" in environ:
|
||||
desired_uid = int(environ["UID"])
|
||||
desired_gid = int(environ.get("GID", "991"))
|
||||
ownership = f"{desired_uid}:{desired_gid}"
|
||||
elif os.getuid() == 0:
|
||||
# otherwise, if we are running as root, use user 991
|
||||
ownership = "991:991"
|
||||
|
||||
desired_uid = int(environ.get("UID", "991"))
|
||||
desired_gid = int(environ.get("GID", "991"))
|
||||
synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver")
|
||||
if (desired_uid == os.getuid()) and (desired_gid == os.getgid()):
|
||||
ownership = None
|
||||
else:
|
||||
ownership = "{}:{}".format(desired_uid, desired_gid)
|
||||
|
||||
if ownership is None:
|
||||
log("Will not perform chmod/gosu as UserID already matches request")
|
||||
|
||||
# In generate mode, generate a configuration and missing keys, then exit
|
||||
if mode == "generate":
|
||||
|
||||
@@ -15,12 +15,12 @@ in `homeserver.yaml`, to the list of authorized domains. If you have not set
|
||||
1. Agree to the terms of service and submit.
|
||||
1. Copy your site key and secret key and add them to your `homeserver.yaml`
|
||||
configuration file
|
||||
```yaml
|
||||
```
|
||||
recaptcha_public_key: YOUR_SITE_KEY
|
||||
recaptcha_private_key: YOUR_SECRET_KEY
|
||||
```
|
||||
1. Enable the CAPTCHA for new registrations
|
||||
```yaml
|
||||
```
|
||||
enable_registration_captcha: true
|
||||
```
|
||||
1. Go to the settings page for the CAPTCHA you just created
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## Historical Note
|
||||
This document was originally written to guide server admins through the upgrade
|
||||
path towards Synapse 1.0. Specifically,
|
||||
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md)
|
||||
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)
|
||||
required that all servers present valid TLS certificates on their federation
|
||||
API. Admins were encouraged to achieve compliance from version 0.99.0 (released
|
||||
in February 2019) ahead of version 1.0 (released June 2019) enforcing the
|
||||
@@ -132,7 +132,7 @@ your domain, you can simply route all traffic through the reverse proxy by
|
||||
updating the SRV record appropriately (or removing it, if the proxy listens on
|
||||
8448).
|
||||
|
||||
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
|
||||
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
#### Option 3: add a .well-known file to delegate your matrix traffic
|
||||
@@ -282,7 +282,7 @@ coffin of the Perspectives project (which was already pretty dead). So, the
|
||||
Spec Core Team decided that a better approach would be to mandate valid TLS
|
||||
certificates for federation alongside the rest of the Web. More details can be
|
||||
found in
|
||||
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach).
|
||||
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach).
|
||||
|
||||
This results in a breaking change, which is disruptive, but absolutely critical
|
||||
for the security model. However, the existence of Let's Encrypt as a trivial
|
||||
@@ -303,7 +303,7 @@ We no longer actively recommend against using a reverse proxy. Many admins will
|
||||
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||
own TLS certificates, and this is a supported configuration.
|
||||
|
||||
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
|
||||
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||
|
||||
@@ -6,9 +6,9 @@ Please update any links to point to the new website instead.
|
||||
## About
|
||||
|
||||
This directory currently holds a series of markdown files documenting how to install, use
|
||||
and develop Synapse. The documentation is readable directly from this repository, but it is
|
||||
recommended to instead browse through the [website](https://matrix-org.github.io/synapse) for
|
||||
easier discoverability.
|
||||
and develop Synapse, the reference Matrix homeserver. The documentation is readable directly
|
||||
from this repository, but it is recommended to instead browse through the
|
||||
[website](https://matrix-org.github.io/synapse) for easier discoverability.
|
||||
|
||||
## Adding to the documentation
|
||||
|
||||
@@ -50,10 +50,8 @@ build the documentation with:
|
||||
mdbook build
|
||||
```
|
||||
|
||||
The rendered contents will be outputted to a new `book/` directory at the root of the repository. Please note that
|
||||
index.html is not built by default, it is created by copying over the file `welcome_and_overview.html` to `index.html`
|
||||
during deployment. Thus, when running `mdbook serve` locally the book will initially show a 404 in place of the index
|
||||
due to the above. Do not be alarmed!
|
||||
The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can
|
||||
browse the book by opening `book/index.html` in a web browser.
|
||||
|
||||
You can also have mdbook host the docs on a local webserver with hot-reload functionality via:
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
- [Installation](setup/installation.md)
|
||||
- [Using Postgres](postgres.md)
|
||||
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
||||
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
|
||||
- [Configuring a Turn Server](turn-howto.md)
|
||||
- [Delegation](delegate.md)
|
||||
|
||||
@@ -21,12 +20,11 @@
|
||||
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
|
||||
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
|
||||
- [Structured Logging](structured_logging.md)
|
||||
- [Templates](templates.md)
|
||||
- [User Authentication](usage/configuration/user_authentication/README.md)
|
||||
- [Single-Sign On](usage/configuration/user_authentication/single_sign_on/README.md)
|
||||
- [Single-Sign On]()
|
||||
- [OpenID Connect](openid.md)
|
||||
- [SAML](usage/configuration/user_authentication/single_sign_on/saml.md)
|
||||
- [CAS](usage/configuration/user_authentication/single_sign_on/cas.md)
|
||||
- [SAML]()
|
||||
- [CAS]()
|
||||
- [SSO Mapping Providers](sso_mapping_providers.md)
|
||||
- [Password Auth Providers](password_auth_providers.md)
|
||||
- [JSON Web Tokens](jwt.md)
|
||||
@@ -34,66 +32,53 @@
|
||||
- [Application Services](application_services.md)
|
||||
- [Server Notices](server_notices.md)
|
||||
- [Consent Tracking](consent_tracking.md)
|
||||
- [URL Previews](development/url_previews.md)
|
||||
- [URL Previews](url_previews.md)
|
||||
- [User Directory](user_directory.md)
|
||||
- [Message Retention Policies](message_retention_policies.md)
|
||||
- [Pluggable Modules](modules/index.md)
|
||||
- [Writing a module](modules/writing_a_module.md)
|
||||
- [Spam checker callbacks](modules/spam_checker_callbacks.md)
|
||||
- [Third-party rules callbacks](modules/third_party_rules_callbacks.md)
|
||||
- [Presence router callbacks](modules/presence_router_callbacks.md)
|
||||
- [Account validity callbacks](modules/account_validity_callbacks.md)
|
||||
- [Password auth provider callbacks](modules/password_auth_provider_callbacks.md)
|
||||
- [Background update controller callbacks](modules/background_update_controller_callbacks.md)
|
||||
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
|
||||
- [Pluggable Modules](modules.md)
|
||||
- [Third Party Rules]()
|
||||
- [Spam Checker](spam_checker.md)
|
||||
- [Presence Router](presence_router_module.md)
|
||||
- [Media Storage Providers]()
|
||||
- [Workers](workers.md)
|
||||
- [Using `synctl` with Workers](synctl_workers.md)
|
||||
- [Systemd](systemd-with-workers/README.md)
|
||||
- [Administration](usage/administration/README.md)
|
||||
- [Admin API](usage/administration/admin_api/README.md)
|
||||
- [Account Validity](admin_api/account_validity.md)
|
||||
- [Background Updates](usage/administration/admin_api/background_updates.md)
|
||||
- [Delete Group](admin_api/delete_group.md)
|
||||
- [Event Reports](admin_api/event_reports.md)
|
||||
- [Media](admin_api/media_admin_api.md)
|
||||
- [Purge History](admin_api/purge_history_api.md)
|
||||
- [Purge Rooms](admin_api/purge_room.md)
|
||||
- [Register Users](admin_api/register_api.md)
|
||||
- [Registration Tokens](usage/administration/admin_api/registration_tokens.md)
|
||||
- [Manipulate Room Membership](admin_api/room_membership.md)
|
||||
- [Rooms](admin_api/rooms.md)
|
||||
- [Spaces](usage/administration/admin_api/spaces.md)
|
||||
- [Server Notices](admin_api/server_notices.md)
|
||||
- [Shutdown Room](admin_api/shutdown_room.md)
|
||||
- [Statistics](admin_api/statistics.md)
|
||||
- [Users](admin_api/user_admin_api.md)
|
||||
- [Server Version](admin_api/version_api.md)
|
||||
- [Federation](usage/administration/admin_api/federation.md)
|
||||
- [Manhole](manhole.md)
|
||||
- [Monitoring](metrics-howto.md)
|
||||
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
|
||||
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
|
||||
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)
|
||||
- [State Groups](usage/administration/state_groups.md)
|
||||
- [Request log format](usage/administration/request_log.md)
|
||||
- [Admin FAQ](usage/administration/admin_faq.md)
|
||||
- [Scripts]()
|
||||
|
||||
# Development
|
||||
- [Contributing Guide](development/contributing_guide.md)
|
||||
- [Code Style](code_style.md)
|
||||
- [Git Usage](development/git.md)
|
||||
- [Git Usage](dev/git.md)
|
||||
- [Testing]()
|
||||
- [OpenTracing](opentracing.md)
|
||||
- [Database Schemas](development/database_schema.md)
|
||||
- [Experimental features](development/experimental_features.md)
|
||||
- [Synapse Architecture]()
|
||||
- [Log Contexts](log_contexts.md)
|
||||
- [Replication](replication.md)
|
||||
- [TCP Replication](tcp_replication.md)
|
||||
- [Internal Documentation](development/internal_documentation/README.md)
|
||||
- [Single Sign-On]()
|
||||
- [SAML](development/saml.md)
|
||||
- [CAS](development/cas.md)
|
||||
- [Room DAG concepts](development/room-dag-concepts.md)
|
||||
- [SAML](dev/saml.md)
|
||||
- [CAS](dev/cas.md)
|
||||
- [State Resolution]()
|
||||
- [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
|
||||
- [Media Repository](media_repository.md)
|
||||
@@ -102,4 +87,3 @@
|
||||
|
||||
# Other
|
||||
- [Dependency Deprecation Policy](deprecation_policy.md)
|
||||
- [Running Synapse on a Single-Board Computer](other/running_synapse_on_single_board_computers.md)
|
||||
|
||||
@@ -99,7 +99,7 @@ server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
```json
|
||||
```jsonc
|
||||
{
|
||||
"event_id": "$bNUFCwGzWca1meCGkjp-zwslF-GfVcXukvRLI1_FaVY",
|
||||
"event_json": {
|
||||
@@ -132,7 +132,7 @@ It returns a JSON body like the following:
|
||||
},
|
||||
"type": "m.room.message",
|
||||
"unsigned": {
|
||||
"age_ts": 1592291711430
|
||||
"age_ts": 1592291711430,
|
||||
}
|
||||
},
|
||||
"id": <report_id>,
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
- [Delete local media](#delete-local-media)
|
||||
* [Delete a specific local media](#delete-a-specific-local-media)
|
||||
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
||||
* [Delete media uploaded by a user](#delete-media-uploaded-by-a-user)
|
||||
- [Purge Remote Media API](#purge-remote-media-api)
|
||||
|
||||
# Querying media
|
||||
@@ -48,8 +47,7 @@ The API returns a JSON body like the following:
|
||||
## List all media uploaded by a user
|
||||
|
||||
Listing all media that has been uploaded by a local user can be achieved through
|
||||
the use of the
|
||||
[List media uploaded by a user](user_admin_api.md#list-media-uploaded-by-a-user)
|
||||
the use of the [List media of a user](user_admin_api.rst#list-media-of-a-user)
|
||||
Admin API.
|
||||
|
||||
# Quarantine media
|
||||
@@ -257,9 +255,9 @@ POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
|
||||
URL Parameters
|
||||
|
||||
* `server_name`: string - The name of your local server (e.g `matrix.org`).
|
||||
* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
* `before_ts`: string representing a positive integer - Unix timestamp in ms.
|
||||
Files that were last used before this timestamp will be deleted. It is the timestamp of
|
||||
last access, not the timestamp when the file was created.
|
||||
last access and not the timestamp creation.
|
||||
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
|
||||
Files that are larger will be deleted. Defaults to `0`.
|
||||
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files
|
||||
@@ -283,11 +281,6 @@ The following fields are returned in the JSON response body:
|
||||
* `deleted_media`: an array of strings - List of deleted `media_id`
|
||||
* `total`: integer - Total number of deleted `media_id`
|
||||
|
||||
## Delete media uploaded by a user
|
||||
|
||||
You can find details of how to delete multiple media uploaded by a user in
|
||||
[User Admin API](user_admin_api.md#delete-media-uploaded-by-a-user).
|
||||
|
||||
# Purge Remote Media API
|
||||
|
||||
The purge remote media API allows server admins to purge old cached remote media.
|
||||
@@ -302,7 +295,7 @@ POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in ms.
|
||||
All cached media that was last accessed before this timestamp will be removed.
|
||||
|
||||
Response:
|
||||
|
||||
@@ -27,7 +27,7 @@ Room state data (such as joins, leaves, topic) is always preserved.
|
||||
|
||||
To delete local message events as well, set `delete_local_events` in the body:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"delete_local_events": true
|
||||
}
|
||||
@@ -70,8 +70,6 @@ This API returns a JSON body like the following:
|
||||
|
||||
The status will be one of `active`, `complete`, or `failed`.
|
||||
|
||||
If `status` is `failed` there will be a string `error` with the error message.
|
||||
|
||||
## Reclaim disk space (Postgres)
|
||||
|
||||
To reclaim the disk space and return it to the operating system, you need to run
|
||||
|
||||
21
docs/admin_api/purge_room.md
Normal file
21
docs/admin_api/purge_room.md
Normal file
@@ -0,0 +1,21 @@
|
||||
Deprecated: Purge room API
|
||||
==========================
|
||||
|
||||
**The old Purge room API is deprecated and will be removed in a future release.
|
||||
See the new [Delete Room API](rooms.md#delete-room-api) for more details.**
|
||||
|
||||
This API will remove all trace of a room from your database.
|
||||
|
||||
All local users must have left the room before it can be removed.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/purge_room
|
||||
|
||||
{
|
||||
"room_id": "!room:id"
|
||||
}
|
||||
```
|
||||
|
||||
You must authenticate using the access token of an admin user.
|
||||
@@ -28,7 +28,7 @@ server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"room_id": "!636q39766251:server.com"
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# Contents
|
||||
- [List Room API](#list-room-api)
|
||||
* [Parameters](#parameters)
|
||||
* [Usage](#usage)
|
||||
- [Room Details API](#room-details-api)
|
||||
- [Room Members API](#room-members-api)
|
||||
- [Room State API](#room-state-api)
|
||||
- [Block Room API](#block-room-api)
|
||||
- [Delete Room API](#delete-room-api)
|
||||
* [Version 1 (old version)](#version-1-old-version)
|
||||
* [Version 2 (new version)](#version-2-new-version)
|
||||
* [Status of deleting rooms](#status-of-deleting-rooms)
|
||||
* [Parameters](#parameters-1)
|
||||
* [Response](#response)
|
||||
* [Undoing room shutdowns](#undoing-room-shutdowns)
|
||||
- [Make Room Admin API](#make-room-admin-api)
|
||||
- [Forward Extremities Admin API](#forward-extremities-admin-api)
|
||||
@@ -19,7 +19,7 @@ The List Room admin API allows server admins to get a list of rooms on their
|
||||
server. There are various parameters available that allow for filtering and
|
||||
sorting the returned list. This API supports pagination.
|
||||
|
||||
**Parameters**
|
||||
## Parameters
|
||||
|
||||
The following query parameters are available:
|
||||
|
||||
@@ -42,16 +42,9 @@ The following query parameters are available:
|
||||
- `history_visibility` - Rooms are ordered alphabetically by visibility of history of the room.
|
||||
- `state_events` - Rooms are ordered by number of state events. Largest to smallest.
|
||||
* `dir` - Direction of room order. Either `f` for forwards or `b` for backwards. Setting
|
||||
this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
* `search_term` - Filter rooms by their room name, canonical alias and room id.
|
||||
Specifically, rooms are selected if the search term is contained in
|
||||
- the room's name,
|
||||
- the local part of the room's canonical alias, or
|
||||
- the complete (local and server part) room's id (case sensitive).
|
||||
|
||||
Defaults to no filtering.
|
||||
|
||||
**Response**
|
||||
this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
* `search_term` - Filter rooms by their room name. Search term can be contained in any
|
||||
part of the room name. Defaults to no filtering.
|
||||
|
||||
The following fields are possible in the JSON response body:
|
||||
|
||||
@@ -86,17 +79,19 @@ The following fields are possible in the JSON response body:
|
||||
Use `prev_batch` for the `from` value in the next request to
|
||||
get the "previous page" of results.
|
||||
|
||||
The API is:
|
||||
## Usage
|
||||
|
||||
A standard request with no filtering:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
Response:
|
||||
|
||||
```json
|
||||
```jsonc
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
@@ -142,9 +137,11 @@ Filtering by room name:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms?search_term=TWIM
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -175,11 +172,13 @@ Paginating through a list of rooms:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms?order_by=size
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
Response:
|
||||
|
||||
```json
|
||||
```jsonc
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
@@ -217,7 +216,7 @@ A response body like the following is returned:
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
"total_rooms": 150,
|
||||
"total_rooms": 150
|
||||
"next_token": 100
|
||||
}
|
||||
```
|
||||
@@ -229,11 +228,13 @@ parameter to the value of `next_token`.
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms?order_by=size&from=100
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
Response:
|
||||
|
||||
```json
|
||||
```jsonc
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
@@ -303,13 +304,17 @@ The following fields are possible in the JSON response body:
|
||||
* `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
|
||||
* `state_events` - Total number of state_events of a room. Complexity of the room.
|
||||
|
||||
The API is:
|
||||
## Usage
|
||||
|
||||
A standard request:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -342,13 +347,17 @@ The response includes the following fields:
|
||||
* `members` - A list of all the members that are present in the room, represented by their ids.
|
||||
* `total` - Total number of members in the room.
|
||||
|
||||
The API is:
|
||||
## Usage
|
||||
|
||||
A standard request:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/members
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -369,13 +378,17 @@ The response includes the following fields:
|
||||
|
||||
* `state` - The current state of the room at the time of request.
|
||||
|
||||
The API is:
|
||||
## Usage
|
||||
|
||||
A standard request:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/state
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -387,86 +400,9 @@ A response body like the following is returned:
|
||||
}
|
||||
```
|
||||
|
||||
# Block Room API
|
||||
The Block Room admin API allows server admins to block and unblock rooms,
|
||||
and query to see if a given room is blocked.
|
||||
This API can be used to pre-emptively block a room, even if it's unknown to this
|
||||
homeserver. Users will be prevented from joining a blocked room.
|
||||
|
||||
## Block or unblock a room
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
PUT /_synapse/admin/v1/rooms/<room_id>/block
|
||||
```
|
||||
|
||||
with a body of:
|
||||
|
||||
```json
|
||||
{
|
||||
"block": true
|
||||
}
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"block": true
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- `room_id` - The ID of the room.
|
||||
|
||||
The following JSON body parameters are available:
|
||||
|
||||
- `block` - If `true` the room will be blocked and if `false` the room will be unblocked.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are possible in the JSON response body:
|
||||
|
||||
- `block` - A boolean. `true` if the room is blocked, otherwise `false`
|
||||
|
||||
## Get block status
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/block
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"block": true,
|
||||
"user_id": "<user_id>"
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- `room_id` - The ID of the room.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are possible in the JSON response body:
|
||||
|
||||
- `block` - A boolean. `true` if the room is blocked, otherwise `false`
|
||||
- `user_id` - An optional string. If the room is blocked (`block` is `true`) shows
|
||||
the user who has add the room to blocking list. Otherwise it is not displayed.
|
||||
|
||||
# Delete Room API
|
||||
|
||||
The Delete Room admin API allows server admins to remove rooms from the server
|
||||
The Delete Room admin API allows server admins to remove rooms from server
|
||||
and block these rooms.
|
||||
|
||||
Shuts down a room. Moves all local users and room aliases automatically to a
|
||||
@@ -477,33 +413,18 @@ The new room will be created with the user specified by the `new_room_user_id` p
|
||||
as room administrator and will contain a message explaining what happened. Users invited
|
||||
to the new room will have power level `-10` by default, and thus be unable to speak.
|
||||
|
||||
If `block` is `true`, users will be prevented from joining the old room.
|
||||
This option can in [Version 1](#version-1-old-version) also be used to pre-emptively
|
||||
block a room, even if it's unknown to this homeserver. In this case, the room will be
|
||||
blocked, and no further action will be taken. If `block` is `false`, attempting to
|
||||
delete an unknown room is invalid and will be rejected as a bad request.
|
||||
If `block` is `True` it prevents new joins to the old room.
|
||||
|
||||
This API will remove all trace of the old room from your database after removing
|
||||
all local users. If `purge` is `true` (the default), all traces of the old room will
|
||||
be removed from your database after removing all local users. If you do not want
|
||||
this to happen, set `purge` to `false`.
|
||||
Depending on the amount of history being purged, a call to the API may take
|
||||
Depending on the amount of history being purged a call to the API may take
|
||||
several minutes or longer.
|
||||
|
||||
The local server will only have the power to move local user and room aliases to
|
||||
the new room. Users on other servers will be unaffected.
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
## Version 1 (old version)
|
||||
|
||||
This version works synchronously. That means you only get the response once the server has
|
||||
finished the action, which may take a long time. If you request the same action
|
||||
a second time, and the server has not finished the first one, the second request will block.
|
||||
This is fixed in version 2 of this API. The parameters are the same in both APIs.
|
||||
This API will become deprecated in the future.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
@@ -511,7 +432,6 @@ DELETE /_synapse/admin/v1/rooms/<room_id>
|
||||
```
|
||||
|
||||
with a body of:
|
||||
|
||||
```json
|
||||
{
|
||||
"new_room_user_id": "@someuser:example.com",
|
||||
@@ -522,6 +442,9 @@ with a body of:
|
||||
}
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
@@ -538,45 +461,7 @@ A response body like the following is returned:
|
||||
}
|
||||
```
|
||||
|
||||
The parameters and response values have the same format as
|
||||
[version 2](#version-2-new-version) of the API.
|
||||
|
||||
## Version 2 (new version)
|
||||
|
||||
**Note**: This API is new, experimental and "subject to change".
|
||||
|
||||
This version works asynchronously, meaning you get the response from server immediately
|
||||
while the server works on that task in background. You can then request the status of the action
|
||||
to check if it has completed.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
DELETE /_synapse/admin/v2/rooms/<room_id>
|
||||
```
|
||||
|
||||
with a body of:
|
||||
|
||||
```json
|
||||
{
|
||||
"new_room_user_id": "@someuser:example.com",
|
||||
"room_name": "Content Violation Notification",
|
||||
"message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service.",
|
||||
"block": true,
|
||||
"purge": true
|
||||
}
|
||||
```
|
||||
|
||||
The API starts the shut down and purge running, and returns immediately with a JSON body with
|
||||
a purge id:
|
||||
|
||||
```json
|
||||
{
|
||||
"delete_id": "<opaque id>"
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
## Parameters
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
@@ -595,10 +480,8 @@ The following JSON body parameters are available:
|
||||
`new_room_user_id` in the new room. Ideally this will clearly convey why the
|
||||
original room was shut down. Defaults to `Sharing illegal content on this server
|
||||
is not permitted and rooms in violation will be blocked.`
|
||||
* `block` - Optional. If set to `true`, this room will be added to a blocking list,
|
||||
preventing future attempts to join the room. Rooms can be blocked
|
||||
even if they're not yet known to the homeserver (only with
|
||||
[Version 1](#version-1-old-version) of the API). Defaults to `false`.
|
||||
* `block` - Optional. If set to `true`, this room will be added to a blocking list, preventing
|
||||
future attempts to join the room. Defaults to `false`.
|
||||
* `purge` - Optional. If set to `true`, it will remove all traces of the room from your database.
|
||||
Defaults to `true`.
|
||||
* `force_purge` - Optional, and ignored unless `purge` is `true`. If set to `true`, it
|
||||
@@ -608,163 +491,53 @@ The following JSON body parameters are available:
|
||||
|
||||
The JSON body must not be empty. The body must be at least `{}`.
|
||||
|
||||
## Status of deleting rooms
|
||||
|
||||
**Note**: This API is new, experimental and "subject to change".
|
||||
|
||||
It is possible to query the status of the background task for deleting rooms.
|
||||
The status can be queried up to 24 hours after completion of the task,
|
||||
or until Synapse is restarted (whichever happens first).
|
||||
|
||||
### Query by `room_id`
|
||||
|
||||
With this API you can get the status of all active deletion tasks, and all those completed in the last 24h,
|
||||
for the given `room_id`.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v2/rooms/<room_id>/delete_status
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"delete_id": "delete_id1",
|
||||
"status": "failed",
|
||||
"error": "error message",
|
||||
"shutdown_room": {
|
||||
"kicked_users": [],
|
||||
"failed_to_kick_users": [],
|
||||
"local_aliases": [],
|
||||
"new_room_id": null
|
||||
}
|
||||
}, {
|
||||
"delete_id": "delete_id2",
|
||||
"status": "purging",
|
||||
"shutdown_room": {
|
||||
"kicked_users": [
|
||||
"@foobar:example.com"
|
||||
],
|
||||
"failed_to_kick_users": [],
|
||||
"local_aliases": [
|
||||
"#badroom:example.com",
|
||||
"#evilsaloon:example.com"
|
||||
],
|
||||
"new_room_id": "!newroomid:example.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
* `room_id` - The ID of the room.
|
||||
|
||||
### Query by `delete_id`
|
||||
|
||||
With this API you can get the status of one specific task by `delete_id`.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v2/rooms/delete_status/<delete_id>
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "purging",
|
||||
"shutdown_room": {
|
||||
"kicked_users": [
|
||||
"@foobar:example.com"
|
||||
],
|
||||
"failed_to_kick_users": [],
|
||||
"local_aliases": [
|
||||
"#badroom:example.com",
|
||||
"#evilsaloon:example.com"
|
||||
],
|
||||
"new_room_id": "!newroomid:example.com"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
* `delete_id` - The ID for this delete.
|
||||
|
||||
### Response
|
||||
## Response
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- `results` - An array of objects, each containing information about one task.
|
||||
This field is omitted from the result when you query by `delete_id`.
|
||||
Task objects contain the following fields:
|
||||
- `delete_id` - The ID for this purge if you query by `room_id`.
|
||||
- `status` - The status will be one of:
|
||||
- `shutting_down` - The process is removing users from the room.
|
||||
- `purging` - The process is purging the room and event data from database.
|
||||
- `complete` - The process has completed successfully.
|
||||
- `failed` - The process is aborted, an error has occurred.
|
||||
- `error` - A string that shows an error message if `status` is `failed`.
|
||||
Otherwise this field is hidden.
|
||||
- `shutdown_room` - An object containing information about the result of shutting down the room.
|
||||
*Note:* The result is shown after removing the room members.
|
||||
The delete process can still be running. Please pay attention to the `status`.
|
||||
- `kicked_users` - An array of users (`user_id`) that were kicked.
|
||||
- `failed_to_kick_users` - An array of users (`user_id`) that that were not kicked.
|
||||
- `local_aliases` - An array of strings representing the local aliases that were
|
||||
migrated from the old room to the new.
|
||||
- `new_room_id` - A string representing the room ID of the new room, or `null` if
|
||||
no such room was created.
|
||||
* `kicked_users` - An array of users (`user_id`) that were kicked.
|
||||
* `failed_to_kick_users` - An array of users (`user_id`) that that were not kicked.
|
||||
* `local_aliases` - An array of strings representing the local aliases that were migrated from
|
||||
the old room to the new.
|
||||
* `new_room_id` - A string representing the room ID of the new room.
|
||||
|
||||
## Undoing room deletions
|
||||
|
||||
*Note*: This guide may be outdated by the time you read it. By nature of room deletions being performed at the database level,
|
||||
## Undoing room shutdowns
|
||||
|
||||
*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
|
||||
the structure can and does change without notice.
|
||||
|
||||
First, it's important to understand that a room deletion is very destructive. Undoing a deletion is not as simple as pretending it
|
||||
First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
|
||||
never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
|
||||
to recover at all:
|
||||
|
||||
* If the room was invite-only, your users will need to be re-invited.
|
||||
* If the room no longer has any members at all, it'll be impossible to rejoin.
|
||||
* The first user to rejoin will have to do so via an alias on a different
|
||||
server (or receive an invite from a user on a different server).
|
||||
* The first user to rejoin will have to do so via an alias on a different server.
|
||||
|
||||
With all that being said, if you still want to try and recover the room:
|
||||
|
||||
1. If the room was `block`ed, you must unblock it on your server. This can be
|
||||
accomplished as follows:
|
||||
1. For safety reasons, shut down Synapse.
|
||||
2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
|
||||
* For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
|
||||
* The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
|
||||
3. Restart Synapse.
|
||||
|
||||
1. For safety reasons, shut down Synapse.
|
||||
2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
|
||||
* For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
|
||||
* The room ID is the same one supplied to the delete room API, not the Content Violation room.
|
||||
3. Restart Synapse.
|
||||
You will have to manually handle, if you so choose, the following:
|
||||
|
||||
This step is unnecessary if `block` was not set.
|
||||
* Aliases that would have been redirected to the Content Violation room.
|
||||
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
||||
* Removal of the Content Violation room if desired.
|
||||
|
||||
2. Any room aliases on your server that pointed to the deleted room may have
|
||||
been deleted, or redirected to the Content Violation room. These will need
|
||||
to be restored manually.
|
||||
## Deprecated endpoint
|
||||
|
||||
3. Users on your server that were in the deleted room will have been kicked
|
||||
from the room. Consider whether you want to update their membership
|
||||
(possibly via the [Edit Room Membership API](room_membership.md)) or let
|
||||
them handle rejoining themselves.
|
||||
The previous deprecated API will be removed in a future release, it was:
|
||||
|
||||
4. If `new_room_user_id` was given, a 'Content Violation' will have been
|
||||
created. Consider whether you want to delete that roomm.
|
||||
```
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
```
|
||||
|
||||
It behaves the same way than the current endpoint except the path and the method.
|
||||
|
||||
# Make Room Admin API
|
||||
|
||||
@@ -775,16 +548,16 @@ By default the server admin (the caller) is granted power, but another user can
|
||||
optionally be specified, e.g.:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
|
||||
{
|
||||
"user_id": "@foo:example.com"
|
||||
}
|
||||
POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
|
||||
{
|
||||
"user_id": "@foo:example.com"
|
||||
}
|
||||
```
|
||||
|
||||
# Forward Extremities Admin API
|
||||
|
||||
Enables querying and deleting forward extremities from rooms. When a lot of forward
|
||||
extremities accumulate in a room, performance can become degraded. For details, see
|
||||
extremities accumulate in a room, performance can become degraded. For details, see
|
||||
[#1760](https://github.com/matrix-org/synapse/issues/1760).
|
||||
|
||||
## Check for forward extremities
|
||||
@@ -792,7 +565,7 @@ extremities accumulate in a room, performance can become degraded. For details,
|
||||
To check the status of forward extremities for a room:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
|
||||
GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
|
||||
```
|
||||
|
||||
A response as follows will be returned:
|
||||
@@ -808,12 +581,12 @@ A response as follows will be returned:
|
||||
"received_ts": 1611263016761
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Deleting forward extremities
|
||||
|
||||
**WARNING**: Please ensure you know what you're doing and have read
|
||||
**WARNING**: Please ensure you know what you're doing and have read
|
||||
the related issue [#1760](https://github.com/matrix-org/synapse/issues/1760).
|
||||
Under no situations should this API be executed as an automated maintenance task!
|
||||
|
||||
@@ -821,7 +594,7 @@ If a room has lots of forward extremities, the extra can be
|
||||
deleted as follows:
|
||||
|
||||
```
|
||||
DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
|
||||
DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
|
||||
```
|
||||
|
||||
A response as follows will be returned, indicating the amount of forward extremities
|
||||
|
||||
@@ -45,4 +45,4 @@ Once the notice has been sent, the API will return the following response:
|
||||
```
|
||||
|
||||
Note that server notices must be enabled in `homeserver.yaml` before this API
|
||||
can be used. See [the server notices documentation](../server_notices.md) for more information.
|
||||
can be used. See [server_notices.md](../server_notices.md) for more information.
|
||||
|
||||
102
docs/admin_api/shutdown_room.md
Normal file
102
docs/admin_api/shutdown_room.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# Deprecated: Shutdown room API
|
||||
|
||||
**The old Shutdown room API is deprecated and will be removed in a future release.
|
||||
See the new [Delete Room API](rooms.md#delete-room-api) for more details.**
|
||||
|
||||
Shuts down a room, preventing new joins and moves local users and room aliases automatically
|
||||
to a new room. The new room will be created with the user specified by the
|
||||
`new_room_user_id` parameter as room administrator and will contain a message
|
||||
explaining what happened. Users invited to the new room will have power level
|
||||
-10 by default, and thus be unable to speak. The old room's power levels will be changed to
|
||||
disallow any further invites or joins.
|
||||
|
||||
The local server will only have the power to move local user and room aliases to
|
||||
the new room. Users on other servers will be unaffected.
|
||||
|
||||
## API
|
||||
|
||||
You will need to authenticate with an access token for an admin user.
|
||||
|
||||
### URL
|
||||
|
||||
`POST /_synapse/admin/v1/shutdown_room/{room_id}`
|
||||
|
||||
### URL Parameters
|
||||
|
||||
* `room_id` - The ID of the room (e.g `!someroom:example.com`)
|
||||
|
||||
### JSON Body Parameters
|
||||
|
||||
* `new_room_user_id` - Required. A string representing the user ID of the user that will admin
|
||||
the new room that all users in the old room will be moved to.
|
||||
* `room_name` - Optional. A string representing the name of the room that new users will be
|
||||
invited to.
|
||||
* `message` - Optional. A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly convey why the
|
||||
original room was shut down.
|
||||
|
||||
If not specified, the default value of `room_name` is "Content Violation
|
||||
Notification". The default value of `message` is "Sharing illegal content on
|
||||
othis server is not permitted and rooms in violation will be blocked."
|
||||
|
||||
### Response Parameters
|
||||
|
||||
* `kicked_users` - An integer number representing the number of users that
|
||||
were kicked.
|
||||
* `failed_to_kick_users` - An integer number representing the number of users
|
||||
that were not kicked.
|
||||
* `local_aliases` - An array of strings representing the local aliases that were migrated from
|
||||
the old room to the new.
|
||||
* `new_room_id` - A string representing the room ID of the new room.
|
||||
|
||||
## Example
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/shutdown_room/!somebadroom%3Aexample.com
|
||||
|
||||
{
|
||||
"new_room_user_id": "@someuser:example.com",
|
||||
"room_name": "Content Violation Notification",
|
||||
"message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service."
|
||||
}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```
|
||||
{
|
||||
"kicked_users": 5,
|
||||
"failed_to_kick_users": 0,
|
||||
"local_aliases": ["#badroom:example.com", "#evilsaloon:example.com],
|
||||
"new_room_id": "!newroomid:example.com",
|
||||
},
|
||||
```
|
||||
|
||||
## Undoing room shutdowns
|
||||
|
||||
*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
|
||||
the structure can and does change without notice.
|
||||
|
||||
First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
|
||||
never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
|
||||
to recover at all:
|
||||
|
||||
* If the room was invite-only, your users will need to be re-invited.
|
||||
* If the room no longer has any members at all, it'll be impossible to rejoin.
|
||||
* The first user to rejoin will have to do so via an alias on a different server.
|
||||
|
||||
With all that being said, if you still want to try and recover the room:
|
||||
|
||||
1. For safety reasons, shut down Synapse.
|
||||
2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
|
||||
* For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
|
||||
* The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
|
||||
3. Restart Synapse.
|
||||
|
||||
You will have to manually handle, if you so choose, the following:
|
||||
|
||||
* Aliases that would have been redirected to the Content Violation room.
|
||||
* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
|
||||
* Removal of the Content Violation room if desired.
|
||||
@@ -21,15 +21,11 @@ It returns a JSON body like the following:
|
||||
"threepids": [
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_1>",
|
||||
"added_at": 1586458409743,
|
||||
"validated_at": 1586458409743
|
||||
"address": "<user_mail_1>"
|
||||
},
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_2>",
|
||||
"added_at": 1586458409743,
|
||||
"validated_at": 1586458409743
|
||||
"address": "<user_mail_2>"
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
@@ -50,8 +46,7 @@ It returns a JSON body like the following:
|
||||
"auth_provider": "<provider2>",
|
||||
"external_id": "<user_id_provider_2>"
|
||||
}
|
||||
],
|
||||
"user_type": null
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -86,60 +81,38 @@ with a body of:
|
||||
"address": "<user_mail_2>"
|
||||
}
|
||||
],
|
||||
"external_ids": [
|
||||
{
|
||||
"auth_provider": "<provider1>",
|
||||
"external_id": "<user_id_provider_1>"
|
||||
},
|
||||
{
|
||||
"auth_provider": "<provider2>",
|
||||
"external_id": "<user_id_provider_2>"
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": false,
|
||||
"deactivated": false,
|
||||
"user_type": null
|
||||
"deactivated": false
|
||||
}
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: [Admin API](../usage/administration/admin_api)
|
||||
|
||||
Returns HTTP status code:
|
||||
- `201` - When a new user object was created.
|
||||
- `200` - When a user was modified.
|
||||
|
||||
URL parameters:
|
||||
|
||||
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
|
||||
|
||||
Body parameters:
|
||||
|
||||
- `password` - string, optional. If provided, the user's password is updated and all
|
||||
- `password`, optional. If provided, the user's password is updated and all
|
||||
devices are logged out.
|
||||
- `displayname` - string, optional, defaults to the value of `user_id`.
|
||||
- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn)
|
||||
- `medium` - string. Kind of third-party ID, either `email` or `msisdn`.
|
||||
- `address` - string. Value of third-party ID.
|
||||
|
||||
- `displayname`, optional, defaults to the value of `user_id`.
|
||||
|
||||
- `threepids`, optional, allows setting the third-party IDs (email, msisdn)
|
||||
belonging to a user.
|
||||
- `external_ids` - array, optional. Allow setting the identifier of the external identity
|
||||
provider for SSO (Single sign-on). Details in
|
||||
[Sample Configuration File](../usage/configuration/homeserver_sample_config.html)
|
||||
section `sso` and `oidc_providers`.
|
||||
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
|
||||
in homeserver configuration.
|
||||
- `external_id` - string, user ID in the external identity provider.
|
||||
- `avatar_url` - string, optional, must be a
|
||||
|
||||
- `avatar_url`, optional, must be a
|
||||
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
|
||||
- `admin` - bool, optional, defaults to `false`.
|
||||
- `deactivated` - bool, optional. If unspecified, deactivation state will be left
|
||||
|
||||
- `admin`, optional, defaults to `false`.
|
||||
|
||||
- `deactivated`, optional. If unspecified, deactivation state will be left
|
||||
unchanged on existing accounts and set to `false` for new accounts.
|
||||
A user cannot be erased by deactivating with this API. For details on
|
||||
deactivating users see [Deactivate Account](#deactivate-account).
|
||||
- `user_type` - string or null, optional. If provided, the user type will be
|
||||
adjusted. If `null` given, the user type will be cleared. Other
|
||||
allowed options are: `bot` and `support`.
|
||||
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
|
||||
@@ -171,8 +144,7 @@ A response body like the following is returned:
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null,
|
||||
"creation_ts": 1560432668000
|
||||
"avatar_url": null
|
||||
}, {
|
||||
"name": "<user_id2>",
|
||||
"is_guest": 0,
|
||||
@@ -181,8 +153,7 @@ A response body like the following is returned:
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>",
|
||||
"creation_ts": 1561550621000
|
||||
"avatar_url": "<avatar_url>"
|
||||
}
|
||||
],
|
||||
"next_token": "100",
|
||||
@@ -226,12 +197,11 @@ The following parameters should be set in the URL:
|
||||
- `shadow_banned` - Users are ordered by `shadow_banned` status.
|
||||
- `displayname` - Users are ordered alphabetically by `displayname`.
|
||||
- `avatar_url` - Users are ordered alphabetically by avatar URL.
|
||||
- `creation_ts` - Users are ordered by when the users was created in ms.
|
||||
|
||||
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
|
||||
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
|
||||
Caution. The database only has indexes on the columns `name` and `creation_ts`.
|
||||
Caution. The database only has indexes on the columns `name` and `created_ts`.
|
||||
This means that if a different sort order is used (`is_guest`, `admin`,
|
||||
`user_type`, `deactivated`, `shadow_banned`, `avatar_url` or `displayname`),
|
||||
this can cause a large load on the database, especially for large environments.
|
||||
@@ -252,7 +222,6 @@ The following fields are returned in the JSON response body:
|
||||
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
|
||||
- `displayname` - string - The user's display name if they have set one.
|
||||
- `avatar_url` - string - The user's avatar URL if they have set one.
|
||||
- `creation_ts` - integer - The user's creation timestamp in ms.
|
||||
|
||||
- `next_token`: string representing a positive integer - Indication for pagination. See above.
|
||||
- `total` - integer - Total number of media.
|
||||
@@ -346,7 +315,6 @@ The following actions are performed when deactivating an user:
|
||||
- Remove all 3PIDs from the homeserver
|
||||
- Delete all devices and E2EE keys
|
||||
- Delete all access tokens
|
||||
- Delete all pushers
|
||||
- Delete the password hash
|
||||
- Removal from all rooms the user is a member of
|
||||
- Remove the user from the user directory
|
||||
@@ -360,15 +328,6 @@ is set to `true`:
|
||||
- Remove the user's avatar URL
|
||||
- Mark the user as erased
|
||||
|
||||
The following actions are **NOT** performed. The list may be incomplete.
|
||||
|
||||
- Remove mappings of SSO IDs
|
||||
- [Delete media uploaded](#delete-media-uploaded-by-a-user) by user (included avatar images)
|
||||
- Delete sent and received messages
|
||||
- Delete E2E cross-signing keys
|
||||
- Remove the user's creation (registration) timestamp
|
||||
- [Remove rate limit overrides](#override-ratelimiting-for-users)
|
||||
- Remove from monthly active users
|
||||
|
||||
## Reset password
|
||||
|
||||
@@ -480,9 +439,8 @@ The following fields are returned in the JSON response body:
|
||||
- `joined_rooms` - An array of `room_id`.
|
||||
- `total` - Number of rooms.
|
||||
|
||||
## User media
|
||||
|
||||
### List media uploaded by a user
|
||||
## List media of a user
|
||||
Gets a list of all local media that a specific `user_id` has created.
|
||||
By default, the response is ordered by descending creation date and ascending media ID.
|
||||
The newest media is on top. You can change the order with parameters
|
||||
@@ -581,6 +539,7 @@ The following fields are returned in the JSON response body:
|
||||
|
||||
- `media` - An array of objects, each containing information about a media.
|
||||
Media objects contain the following fields:
|
||||
|
||||
- `created_ts` - integer - Timestamp when the content was uploaded in ms.
|
||||
- `last_access_ts` - integer - Timestamp when the content was last accessed in ms.
|
||||
- `media_id` - string - The id used to refer to the media.
|
||||
@@ -588,58 +547,13 @@ The following fields are returned in the JSON response body:
|
||||
- `media_type` - string - The MIME-type of the media.
|
||||
- `quarantined_by` - string - The user ID that initiated the quarantine request
|
||||
for this media.
|
||||
|
||||
- `safe_from_quarantine` - bool - Status if this media is safe from quarantining.
|
||||
- `upload_name` - string - The name the media was uploaded with.
|
||||
|
||||
- `next_token`: integer - Indication for pagination. See above.
|
||||
- `total` - integer - Total number of media.
|
||||
|
||||
### Delete media uploaded by a user
|
||||
|
||||
This API deletes the *local* media from the disk of your own server
|
||||
that a specific `user_id` has created. This includes any local thumbnails.
|
||||
|
||||
This API will not affect media that has been uploaded to external
|
||||
media repositories (e.g https://github.com/turt2live/matrix-media-repo/).
|
||||
|
||||
By default, the API deletes media ordered by descending creation date and ascending media ID.
|
||||
The newest media is deleted first. You can change the order with parameters
|
||||
`order_by` and `dir`. If no `limit` is set the API deletes `100` files per request.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
DELETE /_synapse/admin/v1/users/<user_id>/media
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: [Admin API](../usage/administration/admin_api)
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"deleted_media": [
|
||||
"abcdefghijklmnopqrstuvwx"
|
||||
],
|
||||
"total": 1
|
||||
}
|
||||
```
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `deleted_media`: an array of strings - List of deleted `media_id`
|
||||
* `total`: integer - Total number of deleted `media_id`
|
||||
|
||||
**Note**: There is no `next_token`. This is not useful for deleting media, because
|
||||
after deleting media the remaining media have a new order.
|
||||
|
||||
**Parameters**
|
||||
|
||||
This API has the same parameters as
|
||||
[List media uploaded by a user](#list-media-uploaded-by-a-user).
|
||||
With the parameters you can for example limit the number of files to delete at once or
|
||||
delete largest/smallest or newest/oldest files first.
|
||||
|
||||
## Login as a user
|
||||
|
||||
Get an access token that can be used to authenticate as that user. Useful for
|
||||
@@ -948,7 +862,7 @@ The following fields are returned in the JSON response body:
|
||||
See also the
|
||||
[Client-Server API Spec on pushers](https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers).
|
||||
|
||||
## Controlling whether a user is shadow-banned
|
||||
## Shadow-banning users
|
||||
|
||||
Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
|
||||
A shadow-banned users receives successful responses to their client-server API requests,
|
||||
@@ -961,22 +875,16 @@ or broken behaviour for the client. A shadow-banned user will not receive any
|
||||
notification and it is generally more appropriate to ban or kick abusive users.
|
||||
A shadow-banned user will be unable to contact anyone on the server.
|
||||
|
||||
To shadow-ban a user the API is:
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/users/<user_id>/shadow_ban
|
||||
```
|
||||
|
||||
To un-shadow-ban a user the API is:
|
||||
|
||||
```
|
||||
DELETE /_synapse/admin/v1/users/<user_id>/shadow_ban
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: [Admin API](../usage/administration/admin_api)
|
||||
|
||||
An empty JSON dict is returned in both cases.
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
@@ -1101,22 +1009,3 @@ The following parameters should be set in the URL:
|
||||
- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
|
||||
be local.
|
||||
|
||||
### Check username availability
|
||||
|
||||
Checks to see if a username is available, and valid, for the server. See [the client-server
|
||||
API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available)
|
||||
for more information.
|
||||
|
||||
This endpoint will work even if registration is disabled on the server, unlike
|
||||
`/_matrix/client/r0/register/available`.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/username_available?username=$localpart
|
||||
```
|
||||
|
||||
The request and response format is the same as the [/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: [Admin API](../usage/administration/admin_api)
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
## Server to Server Stack
|
||||
|
||||
To use the server to server stack, homeservers should only need to
|
||||
To use the server to server stack, home servers should only need to
|
||||
interact with the Messaging layer.
|
||||
|
||||
The server to server side of things is designed into 4 distinct layers:
|
||||
@@ -23,7 +23,7 @@ Server with a domain specific API.
|
||||
|
||||
1. **Messaging Layer**
|
||||
|
||||
This is what the rest of the homeserver hits to send messages, join rooms,
|
||||
This is what the rest of the Home Server hits to send messages, join rooms,
|
||||
etc. It also allows you to register callbacks for when it get's notified by
|
||||
lower levels that e.g. a new message has been received.
|
||||
|
||||
@@ -45,7 +45,7 @@ Server with a domain specific API.
|
||||
|
||||
For incoming PDUs, it has to check the PDUs it references to see
|
||||
if we have missed any. If we have go and ask someone (another
|
||||
homeserver) for it.
|
||||
home server) for it.
|
||||
|
||||
3. **Transaction Layer**
|
||||
|
||||
|
||||
@@ -10,9 +10,7 @@ The necessary tools are detailed below.
|
||||
|
||||
First install them with:
|
||||
|
||||
```sh
|
||||
pip install -e ".[lint,mypy]"
|
||||
```
|
||||
pip install -e ".[lint,mypy]"
|
||||
|
||||
- **black**
|
||||
|
||||
@@ -23,9 +21,7 @@ pip install -e ".[lint,mypy]"
|
||||
Have `black` auto-format your code (it shouldn't change any
|
||||
functionality) with:
|
||||
|
||||
```sh
|
||||
black . --exclude="\.tox|build|env"
|
||||
```
|
||||
black . --exclude="\.tox|build|env"
|
||||
|
||||
- **flake8**
|
||||
|
||||
@@ -34,9 +30,7 @@ pip install -e ".[lint,mypy]"
|
||||
|
||||
Check all application and test code with:
|
||||
|
||||
```sh
|
||||
flake8 synapse tests
|
||||
```
|
||||
flake8 synapse tests
|
||||
|
||||
- **isort**
|
||||
|
||||
@@ -45,9 +39,7 @@ pip install -e ".[lint,mypy]"
|
||||
|
||||
Auto-fix imports with:
|
||||
|
||||
```sh
|
||||
isort -rc synapse tests
|
||||
```
|
||||
isort -rc synapse tests
|
||||
|
||||
`-rc` means to recursively search the given directories.
|
||||
|
||||
@@ -74,19 +66,15 @@ save as it takes a while and is very resource intensive.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
from synapse.types import UserID
|
||||
...
|
||||
user_id = UserID(local, server)
|
||||
```
|
||||
from synapse.types import UserID
|
||||
...
|
||||
user_id = UserID(local, server)
|
||||
|
||||
is preferred over:
|
||||
|
||||
```python
|
||||
from synapse import types
|
||||
...
|
||||
user_id = types.UserID(local, server)
|
||||
```
|
||||
from synapse import types
|
||||
...
|
||||
user_id = types.UserID(local, server)
|
||||
|
||||
(or any other variant).
|
||||
|
||||
@@ -146,30 +134,28 @@ Some guidelines follow:
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
## Frobnication ##
|
||||
## Frobnication ##
|
||||
|
||||
# The frobnicator will ensure that all requests are fully frobnicated.
|
||||
# To enable it, uncomment the following.
|
||||
#
|
||||
#frobnicator_enabled: true
|
||||
# The frobnicator will ensure that all requests are fully frobnicated.
|
||||
# To enable it, uncomment the following.
|
||||
#
|
||||
#frobnicator_enabled: true
|
||||
|
||||
# By default, the frobnicator will frobnicate with the default frobber.
|
||||
# The following will make it use an alternative frobber.
|
||||
#
|
||||
#frobincator_frobber: special_frobber
|
||||
# By default, the frobnicator will frobnicate with the default frobber.
|
||||
# The following will make it use an alternative frobber.
|
||||
#
|
||||
#frobincator_frobber: special_frobber
|
||||
|
||||
# Settings for the frobber
|
||||
#
|
||||
frobber:
|
||||
# frobbing speed. Defaults to 1.
|
||||
#
|
||||
#speed: 10
|
||||
# Settings for the frobber
|
||||
#
|
||||
frobber:
|
||||
# frobbing speed. Defaults to 1.
|
||||
#
|
||||
#speed: 10
|
||||
|
||||
# frobbing distance. Defaults to 1000.
|
||||
#
|
||||
#distance: 100
|
||||
```
|
||||
# frobbing distance. Defaults to 1000.
|
||||
#
|
||||
#distance: 100
|
||||
|
||||
Note that the sample configuration is generated from the synapse code
|
||||
and is maintained by a script, `scripts-dev/generate_sample_config`.
|
||||
|
||||
@@ -99,7 +99,7 @@ construct URIs where users can give their consent.
|
||||
see if an unauthenticated user is viewing the page. This is typically
|
||||
wrapped around the form that would be used to actually agree to the document:
|
||||
|
||||
```html
|
||||
```
|
||||
{% if not public_version %}
|
||||
<!-- The variables used here are only provided when the 'u' param is given to the homeserver -->
|
||||
<form method="post" action="consent">
|
||||
@@ -152,7 +152,7 @@ version of the policy. To do so:
|
||||
|
||||
* ensure that the consent resource is configured, as in the previous section
|
||||
|
||||
* ensure that server notices are configured, as in [the server notice documentation](server_notices.md).
|
||||
* ensure that server notices are configured, as in [server_notices.md](server_notices.md).
|
||||
|
||||
* Add `server_notice_content` under `user_consent` in `homeserver.yaml`. For
|
||||
example:
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
# Delegation of incoming federation traffic
|
||||
|
||||
In the following documentation, we use the term `server_name` to refer to that setting
|
||||
in your homeserver configuration file. It appears at the ends of user ids, and tells
|
||||
other homeservers where they can find your server.
|
||||
# Delegation
|
||||
|
||||
By default, other homeservers will expect to be able to reach yours via
|
||||
your `server_name`, on port 8448. For example, if you set your `server_name`
|
||||
@@ -16,21 +12,13 @@ to a different server and/or port (e.g. `synapse.example.com:443`).
|
||||
|
||||
## .well-known delegation
|
||||
|
||||
To use this method, you need to be able to configure the server at
|
||||
`https://<server_name>` to serve a file at
|
||||
`https://<server_name>/.well-known/matrix/server`. There are two ways to do this, shown below.
|
||||
To use this method, you need to be able to alter the
|
||||
`server_name` 's https server to serve the `/.well-known/matrix/server`
|
||||
URL. Having an active server (with a valid TLS certificate) serving your
|
||||
`server_name` domain is out of the scope of this documentation.
|
||||
|
||||
Note that the `.well-known` file is hosted on the default port for `https` (port 443).
|
||||
|
||||
### External server
|
||||
|
||||
For maximum flexibility, you need to configure an external server such as nginx, Apache
|
||||
or HAProxy to serve the `https://<server_name>/.well-known/matrix/server` file. Setting
|
||||
up such a server is out of the scope of this documentation, but note that it is often
|
||||
possible to configure your [reverse proxy](reverse_proxy.md) for this.
|
||||
|
||||
The URL `https://<server_name>/.well-known/matrix/server` should be configured
|
||||
return a JSON structure containing the key `m.server` like this:
|
||||
The URL `https://<server_name>/.well-known/matrix/server` should
|
||||
return a JSON structure containing the key `m.server` like so:
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -38,9 +26,8 @@ return a JSON structure containing the key `m.server` like this:
|
||||
}
|
||||
```
|
||||
|
||||
In our example (where we want federation traffic to be routed to
|
||||
`https://synapse.example.com`, on port 443), this would mean that
|
||||
`https://example.com/.well-known/matrix/server` should return:
|
||||
In our example, this would mean that URL `https://example.com/.well-known/matrix/server`
|
||||
should return:
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -51,29 +38,16 @@ In our example (where we want federation traffic to be routed to
|
||||
Note, specifying a port is optional. If no port is specified, then it defaults
|
||||
to 8448.
|
||||
|
||||
### Serving a `.well-known/matrix/server` file with Synapse
|
||||
|
||||
If you are able to set up your domain so that `https://<server_name>` is routed to
|
||||
Synapse (i.e., the only change needed is to direct federation traffic to port 443
|
||||
instead of port 8448), then it is possible to configure Synapse to serve a suitable
|
||||
`.well-known/matrix/server` file. To do so, add the following to your `homeserver.yaml`
|
||||
file:
|
||||
|
||||
```yaml
|
||||
serve_server_wellknown: true
|
||||
```
|
||||
|
||||
**Note**: this *only* works if `https://<server_name>` is routed to Synapse, so is
|
||||
generally not suitable if Synapse is hosted at a subdomain such as
|
||||
`https://synapse.example.com`.
|
||||
With .well-known delegation, federating servers will check for a valid TLS
|
||||
certificate for the delegated hostname (in our example: `synapse.example.com`).
|
||||
|
||||
## SRV DNS record delegation
|
||||
|
||||
It is also possible to do delegation using a SRV DNS record. However, that is generally
|
||||
not recommended, as it can be difficult to configure the TLS certificates correctly in
|
||||
this case, and it offers little advantage over `.well-known` delegation.
|
||||
It is also possible to do delegation using a SRV DNS record. However, that is
|
||||
considered an advanced topic since it's a bit complex to set up, and `.well-known`
|
||||
delegation is already enough in most cases.
|
||||
|
||||
However, if you really need it, you can find some documentation on what such a
|
||||
However, if you really need it, you can find some documentation on how such a
|
||||
record should look like and how Synapse will use it in [the Matrix
|
||||
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names).
|
||||
|
||||
@@ -94,9 +68,27 @@ wouldn't need any delegation set up.
|
||||
domain `server_name` points to, you will need to let other servers know how to
|
||||
find it using delegation.
|
||||
|
||||
### Should I use a reverse proxy for federation traffic?
|
||||
### Do you still recommend against using a reverse proxy on the federation port?
|
||||
|
||||
Generally, using a reverse proxy for both the federation and client traffic is a good
|
||||
idea, since it saves handling TLS traffic in Synapse. See
|
||||
[the reverse proxy documentation](reverse_proxy.md) for information on setting up a
|
||||
We no longer actively recommend against using a reverse proxy. Many admins will
|
||||
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||
own TLS certificates, and this is a supported configuration.
|
||||
|
||||
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||
|
||||
This is no longer necessary. If you are using a reverse proxy for all of your
|
||||
TLS traffic, then you can set `no_tls: True` in the Synapse config.
|
||||
|
||||
In that case, the only reason Synapse needs the certificate is to populate a legacy
|
||||
`tls_fingerprints` field in the federation API. This is ignored by Synapse 0.99.0
|
||||
and later, and the only time pre-0.99 Synapses will check it is when attempting to
|
||||
fetch the server keys - and generally this is delegated via `matrix.org`, which
|
||||
is running a modern version of Synapse.
|
||||
|
||||
### Do I need the same certificate for the client and federation port?
|
||||
|
||||
No. There is nothing stopping you from using different certificates,
|
||||
particularly if you are using a reverse proxy.
|
||||
@@ -8,23 +8,23 @@ easy to run CAS implementation built on top of Django.
|
||||
1. Create a new virtualenv: `python3 -m venv <your virtualenv>`
|
||||
2. Activate your virtualenv: `source /path/to/your/virtualenv/bin/activate`
|
||||
3. Install Django and django-mama-cas:
|
||||
```sh
|
||||
```
|
||||
python -m pip install "django<3" "django-mama-cas==2.4.0"
|
||||
```
|
||||
4. Create a Django project in the current directory:
|
||||
```sh
|
||||
```
|
||||
django-admin startproject cas_test .
|
||||
```
|
||||
5. Follow the [install directions](https://django-mama-cas.readthedocs.io/en/latest/installation.html#configuring) for django-mama-cas
|
||||
6. Setup the SQLite database: `python manage.py migrate`
|
||||
7. Create a user:
|
||||
```sh
|
||||
```
|
||||
python manage.py createsuperuser
|
||||
```
|
||||
1. Use whatever you want as the username and password.
|
||||
2. Leave the other fields blank.
|
||||
8. Use the built-in Django test server to serve the CAS endpoints on port 8000:
|
||||
```sh
|
||||
```
|
||||
python manage.py runserver
|
||||
```
|
||||
|
||||
@@ -9,7 +9,7 @@ commits each of which contains a single change building on what came
|
||||
before. Here, by way of an arbitrary example, is the top of `git log --graph
|
||||
b2dba0607`:
|
||||
|
||||
<img src="img/git/clean.png" alt="clean git graph" width="500px">
|
||||
<img src="git/clean.png" alt="clean git graph" width="500px">
|
||||
|
||||
Note how the commit comment explains clearly what is changing and why. Also
|
||||
note the *absence* of merge commits, as well as the absence of commits called
|
||||
@@ -61,7 +61,7 @@ Ok, so that's what we'd like to achieve. How do we achieve it?
|
||||
The TL;DR is: when you come to merge a pull request, you *probably* want to
|
||||
“squash and merge”:
|
||||
|
||||
.
|
||||
.
|
||||
|
||||
(This applies whether you are merging your own PR, or that of another
|
||||
contributor.)
|
||||
@@ -105,7 +105,7 @@ complicated. Here's how we do it.
|
||||
|
||||
Let's start with a picture:
|
||||
|
||||

|
||||

|
||||
|
||||
It looks complicated, but it's really not. There's one basic rule: *anyone* is
|
||||
free to merge from *any* more-stable branch to *any* less-stable branch at
|
||||
|
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 70 KiB |
|
Before Width: | Height: | Size: 108 KiB After Width: | Height: | Size: 108 KiB |
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
@@ -1,9 +1,10 @@
|
||||
# How to test SAML as a developer without a server
|
||||
|
||||
https://fujifish.github.io/samling/samling.html (https://github.com/fujifish/samling) is a great resource for being able to tinker with the
|
||||
SAML options within Synapse without needing to deploy and configure a complicated software stack.
|
||||
https://capriza.github.io/samling/samling.html (https://github.com/capriza/samling) is a great
|
||||
resource for being able to tinker with the SAML options within Synapse without needing to
|
||||
deploy and configure a complicated software stack.
|
||||
|
||||
To make Synapse (and therefore Element) use it:
|
||||
To make Synapse (and therefore Riot) use it:
|
||||
|
||||
1. Use the samling.html URL above or deploy your own and visit the IdP Metadata tab.
|
||||
2. Copy the XML to your clipboard.
|
||||
@@ -15,7 +16,7 @@ To make Synapse (and therefore Element) use it:
|
||||
sp_config:
|
||||
allow_unknown_attributes: true # Works around a bug with AVA Hashes: https://github.com/IdentityPython/pysaml2/issues/388
|
||||
metadata:
|
||||
local: ["samling.xml"]
|
||||
local: ["samling.xml"]
|
||||
```
|
||||
5. Ensure that your `homeserver.yaml` has a setting for `public_baseurl`:
|
||||
```yaml
|
||||
@@ -25,9 +26,9 @@ To make Synapse (and therefore Element) use it:
|
||||
the dependencies are installed and ready to go.
|
||||
7. Restart Synapse.
|
||||
|
||||
Then in Element:
|
||||
Then in Riot:
|
||||
|
||||
1. Visit the login page and point Element towards your homeserver using the `public_baseurl` above.
|
||||
1. Visit the login page with a Riot pointing at your homeserver.
|
||||
2. Click the Single Sign-On button.
|
||||
3. On the samling page, enter a Name Identifier and add a SAML Attribute for `uid=your_localpart`.
|
||||
The response must also be signed.
|
||||
@@ -1,473 +1,7 @@
|
||||
<!--
|
||||
Include the contents of CONTRIBUTING.md from the project root (where GitHub likes it
|
||||
to be)
|
||||
-->
|
||||
# Contributing
|
||||
|
||||
This document aims to get you started with contributing to Synapse!
|
||||
|
||||
# 1. Who can contribute to Synapse?
|
||||
|
||||
Everyone is welcome to contribute code to [matrix.org
|
||||
projects](https://github.com/matrix-org), provided that they are willing to
|
||||
license their contributions under the same license as the project itself. We
|
||||
follow a simple 'inbound=outbound' model for contributions: the act of
|
||||
submitting an 'inbound' contribution means that the contributor agrees to
|
||||
license the code under the same terms as the project's overall 'outbound'
|
||||
license - in our case, this is almost always Apache Software License v2 (see
|
||||
[LICENSE](https://github.com/matrix-org/synapse/blob/develop/LICENSE)).
|
||||
|
||||
# 2. What do I need?
|
||||
|
||||
If you are running Windows, the Windows Subsystem for Linux (WSL) is strongly
|
||||
recommended for development. More information about WSL can be found at
|
||||
<https://docs.microsoft.com/en-us/windows/wsl/install>. Running Synapse natively
|
||||
on Windows is not officially supported.
|
||||
|
||||
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download).
|
||||
|
||||
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
|
||||
# 3. Get the source.
|
||||
|
||||
The preferred and easiest way to contribute changes is to fork the relevant
|
||||
project on GitHub, and then [create a pull request](
|
||||
https://help.github.com/articles/using-pull-requests/) to ask us to pull your
|
||||
changes into our repo.
|
||||
|
||||
Please base your changes on the `develop` branch.
|
||||
|
||||
```sh
|
||||
git clone git@github.com:YOUR_GITHUB_USER_NAME/synapse.git
|
||||
git checkout develop
|
||||
```
|
||||
|
||||
If you need help getting started with git, this is beyond the scope of the document, but you
|
||||
can find many good git tutorials on the web.
|
||||
|
||||
# 4. Install the dependencies
|
||||
|
||||
Once you have installed Python 3 and added the source, please open a terminal and
|
||||
setup a *virtualenv*, as follows:
|
||||
|
||||
```sh
|
||||
cd path/where/you/have/cloned/the/repository
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install -e ".[all,dev]"
|
||||
pip install tox
|
||||
```
|
||||
|
||||
This will install the developer dependencies for the project.
|
||||
|
||||
|
||||
# 5. Get in touch.
|
||||
|
||||
Join our developer community on Matrix: [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)!
|
||||
|
||||
|
||||
# 6. Pick an issue.
|
||||
|
||||
Fix your favorite problem or perhaps find a [Good First Issue](https://github.com/matrix-org/synapse/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22)
|
||||
to work on.
|
||||
|
||||
|
||||
# 7. Turn coffee into code and documentation!
|
||||
|
||||
There is a growing amount of documentation located in the
|
||||
[`docs`](https://github.com/matrix-org/synapse/tree/develop/docs)
|
||||
directory, with a rendered version [available online](https://matrix-org.github.io/synapse).
|
||||
This documentation is intended primarily for sysadmins running their
|
||||
own Synapse instance, as well as developers interacting externally with
|
||||
Synapse.
|
||||
[`docs/development`](https://github.com/matrix-org/synapse/tree/develop/docs/development)
|
||||
exists primarily to house documentation for
|
||||
Synapse developers.
|
||||
[`docs/admin_api`](https://github.com/matrix-org/synapse/tree/develop/docs/admin_api) houses documentation
|
||||
regarding Synapse's Admin API, which is used mostly by sysadmins and external
|
||||
service developers.
|
||||
|
||||
Synapse's code style is documented [here](../code_style.md). Please follow
|
||||
it, including the conventions for the [sample configuration
|
||||
file](../code_style.md#configuration-file-format).
|
||||
|
||||
We welcome improvements and additions to our documentation itself! When
|
||||
writing new pages, please
|
||||
[build `docs` to a book](https://github.com/matrix-org/synapse/tree/develop/docs#adding-to-the-documentation)
|
||||
to check that your contributions render correctly. The docs are written in
|
||||
[GitHub-Flavoured Markdown](https://guides.github.com/features/mastering-markdown/).
|
||||
|
||||
Some documentation also exists in [Synapse's GitHub
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
|
||||
# 8. Test, test, test!
|
||||
<a name="test-test-test"></a>
|
||||
|
||||
While you're developing and before submitting a patch, you'll
|
||||
want to test your code.
|
||||
|
||||
## Run the linters.
|
||||
|
||||
The linters look at your code and do two things:
|
||||
|
||||
- ensure that your code follows the coding style adopted by the project;
|
||||
- catch a number of errors in your code.
|
||||
|
||||
They're pretty fast, don't hesitate!
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh
|
||||
```
|
||||
|
||||
Note that this script *will modify your files* to fix styling errors.
|
||||
Make sure that you have saved all your files.
|
||||
|
||||
If you wish to restrict the linters to only the files changed since the last commit
|
||||
(much faster!), you can instead run:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh -d
|
||||
```
|
||||
|
||||
Or if you know exactly which files you wish to lint, you can instead run:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||
```
|
||||
|
||||
## Run the unit tests (Twisted trial).
|
||||
|
||||
The unit tests run parts of Synapse, including your changes, to see if anything
|
||||
was broken. They are slower than the linters but will typically catch more errors.
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests
|
||||
```
|
||||
|
||||
If you wish to only run *some* unit tests, you may specify
|
||||
another module instead of `tests` - or a test class or a method:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
|
||||
```
|
||||
|
||||
If your tests fail, you may wish to look at the logs (the default log level is `ERROR`):
|
||||
|
||||
```sh
|
||||
less _trial_temp/test.log
|
||||
```
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
|
||||
|
||||
```sh
|
||||
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
|
||||
```
|
||||
|
||||
### Running tests under PostgreSQL
|
||||
|
||||
Invoking `trial` as above will use an in-memory SQLite database. This is great for
|
||||
quick development and testing. However, we recommend using a PostgreSQL database
|
||||
in production (and indeed, we have some code paths specific to each database).
|
||||
This means that we need to run our unit tests against PostgreSQL too. Our CI does
|
||||
this automatically for pull requests and release candidates, but it's sometimes
|
||||
useful to reproduce this locally.
|
||||
|
||||
To do so, [configure Postgres](../postgres.md) and run `trial` with the
|
||||
following environment variables matching your configuration:
|
||||
|
||||
- `SYNAPSE_POSTGRES` to anything nonempty
|
||||
- `SYNAPSE_POSTGRES_HOST`
|
||||
- `SYNAPSE_POSTGRES_USER`
|
||||
- `SYNAPSE_POSTGRES_PASSWORD`
|
||||
|
||||
For example:
|
||||
|
||||
```shell
|
||||
export SYNAPSE_POSTGRES=1
|
||||
export SYNAPSE_POSTGRES_HOST=localhost
|
||||
export SYNAPSE_POSTGRES_USER=postgres
|
||||
export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword
|
||||
trial
|
||||
```
|
||||
|
||||
#### Prebuilt container
|
||||
|
||||
Since configuring PostgreSQL can be fiddly, we can make use of a pre-made
|
||||
Docker container to set up PostgreSQL and run our tests for us. To do so, run
|
||||
|
||||
```shell
|
||||
scripts-dev/test_postgresql.sh
|
||||
```
|
||||
|
||||
Any extra arguments to the script will be passed to `tox` and then to `trial`,
|
||||
so we can run a specific test in this container with e.g.
|
||||
|
||||
```shell
|
||||
scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase
|
||||
```
|
||||
|
||||
The container creates a folder in your Synapse checkout called
|
||||
`.tox-pg-container` and uses this as a tox environment. The output of any
|
||||
`trial` runs goes into `_trial_temp` in your synapse source directory — the same
|
||||
as running `trial` directly on your host machine.
|
||||
|
||||
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
|
||||
|
||||
The integration tests are a more comprehensive suite of tests. They
|
||||
run a full version of Synapse, including your changes, to check if
|
||||
anything was broken. They are slower than the unit tests but will
|
||||
typically catch more errors.
|
||||
|
||||
The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
|
||||
```
|
||||
|
||||
This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md).
|
||||
|
||||
|
||||
## Run the integration tests ([Complement](https://github.com/matrix-org/complement)).
|
||||
|
||||
[Complement](https://github.com/matrix-org/complement) is a suite of black box tests that can be run on any homeserver implementation. It can also be thought of as end-to-end (e2e) tests.
|
||||
|
||||
It's often nice to develop on Synapse and write Complement tests at the same time.
|
||||
Here is how to run your local Synapse checkout against your local Complement checkout.
|
||||
|
||||
(checkout [`complement`](https://github.com/matrix-org/complement) alongside your `synapse` checkout)
|
||||
```sh
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh
|
||||
```
|
||||
|
||||
To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output:
|
||||
|
||||
```sh
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory
|
||||
```
|
||||
|
||||
To run a specific test, you can specify the whole name structure:
|
||||
|
||||
```sh
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order
|
||||
```
|
||||
|
||||
|
||||
### Access database for homeserver after Complement test runs.
|
||||
|
||||
If you're curious what the database looks like after you run some tests, here are some steps to get you going in Synapse:
|
||||
|
||||
1. In your Complement test comment out `defer deployment.Destroy(t)` and replace with `defer time.Sleep(2 * time.Hour)` to keep the homeserver running after the tests complete
|
||||
1. Start the Complement tests
|
||||
1. Find the name of the container, `docker ps -f name=complement_` (this will filter for just the Compelement related Docker containers)
|
||||
1. Access the container replacing the name with what you found in the previous step: `docker exec -it complement_1_hs_with_application_service.hs1_2 /bin/bash`
|
||||
1. Install sqlite (database driver), `apt-get update && apt-get install -y sqlite3`
|
||||
1. Then run `sqlite3` and open the database `.open /conf/homeserver.db` (this db path comes from the Synapse homeserver.yaml)
|
||||
|
||||
|
||||
# 9. Submit your patch.
|
||||
|
||||
Once you're happy with your patch, it's time to prepare a Pull Request.
|
||||
|
||||
To prepare a Pull Request, please:
|
||||
|
||||
1. verify that [all the tests pass](#test-test-test), including the coding style;
|
||||
2. [sign off](#sign-off) your contribution;
|
||||
3. `git push` your commit to your fork of Synapse;
|
||||
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
|
||||
5. add a [changelog entry](#changelog) and push it to your Pull Request;
|
||||
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
|
||||
7. if you need to update your PR, please avoid rebasing and just add new commits to your branch.
|
||||
|
||||
|
||||
## Changelog
|
||||
|
||||
All changes, even minor ones, need a corresponding changelog / newsfragment
|
||||
entry. These are managed by [Towncrier](https://github.com/hawkowl/towncrier).
|
||||
|
||||
To create a changelog entry, make a new file in the `changelog.d` directory named
|
||||
in the format of `PRnumber.type`. The type can be one of the following:
|
||||
|
||||
* `feature`
|
||||
* `bugfix`
|
||||
* `docker` (for updates to the Docker image)
|
||||
* `doc` (for updates to the documentation)
|
||||
* `removal` (also used for deprecations)
|
||||
* `misc` (for internal-only changes)
|
||||
|
||||
This file will become part of our [changelog](
|
||||
https://github.com/matrix-org/synapse/blob/master/CHANGES.md) at the next
|
||||
release, so the content of the file should be a short description of your
|
||||
change in the same style as the rest of the changelog. The file can contain Markdown
|
||||
formatting, and should end with a full stop (.) or an exclamation mark (!) for
|
||||
consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
contributions and would like to have you shouted out in the release notes!
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
`changelog.d/1234.bugfix`, and contain content like:
|
||||
|
||||
> The security levels of Florbs are now validated when received
|
||||
> via the `/federation/florb` endpoint. Contributed by Jane Matrix.
|
||||
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc,
|
||||
then the content for each `changelog.d` file should be the same. Towncrier will
|
||||
merge the matching files together into a single changelog entry when we come to
|
||||
release.
|
||||
|
||||
### How do I know what to call the changelog file before I create the PR?
|
||||
|
||||
Obviously, you don't know if you should call your newsfile
|
||||
`1234.bugfix` or `5678.bugfix` until you create the PR, which leads to a
|
||||
chicken-and-egg problem.
|
||||
|
||||
There are two options for solving this:
|
||||
|
||||
1. Open the PR without a changelog file, see what number you got, and *then*
|
||||
add the changelog file to your branch (see [Updating your pull
|
||||
request](#updating-your-pull-request)), or:
|
||||
|
||||
1. Look at the [list of all
|
||||
issues/PRs](https://github.com/matrix-org/synapse/issues?q=), add one to the
|
||||
highest number you see, and quickly open the PR before somebody else claims
|
||||
your number.
|
||||
|
||||
[This
|
||||
script](https://github.com/richvdh/scripts/blob/master/next_github_number.sh)
|
||||
might be helpful if you find yourself doing this a lot.
|
||||
|
||||
Sorry, we know it's a bit fiddly, but it's *really* helpful for us when we come
|
||||
to put together a release!
|
||||
|
||||
### Debian changelog
|
||||
|
||||
Changes which affect the debian packaging files (in `debian`) are an
|
||||
exception to the rule that all changes require a `changelog.d` file.
|
||||
|
||||
In this case, you will need to add an entry to the debian changelog for the
|
||||
next release. For this, run the following command:
|
||||
|
||||
```
|
||||
dch
|
||||
```
|
||||
|
||||
This will make up a new version number (if there isn't already an unreleased
|
||||
version in flight), and open an editor where you can add a new changelog entry.
|
||||
(Our release process will ensure that the version number and maintainer name is
|
||||
corrected for the release.)
|
||||
|
||||
If your change affects both the debian packaging *and* files outside the debian
|
||||
directory, you will need both a regular newsfragment *and* an entry in the
|
||||
debian changelog. (Though typically such changes should be submitted as two
|
||||
separate pull requests.)
|
||||
|
||||
## Sign off
|
||||
|
||||
In order to have a concrete record that your contribution is intentional
|
||||
and you agree to license it under the same terms as the project's license, we've adopted the
|
||||
same lightweight approach that the Linux Kernel
|
||||
[submitting patches process](
|
||||
https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>),
|
||||
[Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
||||
projects use: the DCO (Developer Certificate of Origin:
|
||||
http://developercertificate.org/). This is a simple declaration that you wrote
|
||||
the contribution or otherwise have the right to contribute it to Matrix:
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
If you agree to this for your contribution, then all that's needed is to
|
||||
include the line in your commit or pull request comment:
|
||||
|
||||
```
|
||||
Signed-off-by: Your Name <your@email.example.org>
|
||||
```
|
||||
|
||||
We accept contributions under a legally identifiable name, such as
|
||||
your name on government documentation or common-law names (names
|
||||
claimed by legitimate usage or repute). Unfortunately, we cannot
|
||||
accept anonymous contributions at this time.
|
||||
|
||||
Git allows you to add this signoff automatically when using the `-s`
|
||||
flag to `git commit`, which uses the name and email set in your
|
||||
`user.name` and `user.email` git configs.
|
||||
|
||||
|
||||
# 10. Turn feedback into better code.
|
||||
|
||||
Once the Pull Request is opened, you will see a few things:
|
||||
|
||||
1. our automated CI (Continuous Integration) pipeline will run (again) the linters, the unit tests, the integration tests and more;
|
||||
2. one or more of the developers will take a look at your Pull Request and offer feedback.
|
||||
|
||||
From this point, you should:
|
||||
|
||||
1. Look at the results of the CI pipeline.
|
||||
- If there is any error, fix the error.
|
||||
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
|
||||
3. Create a new commit with the changes.
|
||||
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
|
||||
- Push this commits to your Pull Request.
|
||||
4. Back to 1.
|
||||
|
||||
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
|
||||
|
||||
# 11. Find a new issue.
|
||||
|
||||
By now, you know the drill!
|
||||
|
||||
# Notes for maintainers on merging PRs etc
|
||||
|
||||
There are some notes for those with commit access to the project on how we
|
||||
manage git [here](git.md).
|
||||
|
||||
# Conclusion
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect
|
||||
given our obsession with open communication. If we're going to successfully
|
||||
matrix together all the fragmented communication technologies out there we are
|
||||
reliant on contributions and collaboration from the community to do so. So
|
||||
please get involved - and we hope you have as much fun hacking on Matrix as we
|
||||
do!
|
||||
{{#include ../../CONTRIBUTING.md}}
|
||||
|
||||
@@ -89,9 +89,7 @@ To do so, use `scripts-dev/make_full_schema.sh`. This will produce new
|
||||
|
||||
Ensure postgres is installed, then run:
|
||||
|
||||
```sh
|
||||
./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/
|
||||
```
|
||||
./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/
|
||||
|
||||
NB at the time of writing, this script predates the split into separate `state`/`main`
|
||||
databases so will require updates to handle that correctly.
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
# Implementing experimental features in Synapse
|
||||
|
||||
It can be desirable to implement "experimental" features which are disabled by
|
||||
default and must be explicitly enabled via the Synapse configuration. This is
|
||||
applicable for features which:
|
||||
|
||||
* Are unstable in the Matrix spec (e.g. those defined by an MSC that has not yet been merged).
|
||||
* Developers are not confident in their use by general Synapse administrators/users
|
||||
(e.g. a feature is incomplete, buggy, performs poorly, or needs further testing).
|
||||
|
||||
Note that this only really applies to features which are expected to be desirable
|
||||
to a broad audience. The [module infrastructure](../modules/index.md) should
|
||||
instead be investigated for non-standard features.
|
||||
|
||||
Guarding experimental features behind configuration flags should help with some
|
||||
of the following scenarios:
|
||||
|
||||
* Ensure that clients do not assume that unstable features exist (failing
|
||||
gracefully if they do not).
|
||||
* Unstable features do not become de-facto standards and can be removed
|
||||
aggressively (since only those who have opted-in will be affected).
|
||||
* Ease finding the implementation of unstable features in Synapse (for future
|
||||
removal or stabilization).
|
||||
* Ease testing a feature (or removal of feature) due to enabling/disabling without
|
||||
code changes. It also becomes possible to ask for wider testing, if desired.
|
||||
|
||||
Experimental configuration flags should be disabled by default (requiring Synapse
|
||||
administrators to explicitly opt-in), although there are situations where it makes
|
||||
sense (from a product point-of-view) to enable features by default. This is
|
||||
expected and not an issue.
|
||||
|
||||
It is not a requirement for experimental features to be behind a configuration flag,
|
||||
but one should be used if unsure.
|
||||
|
||||
New experimental configuration flags should be added under the `experimental`
|
||||
configuration key (see the `synapse.config.experimental` file) and either explain
|
||||
(briefly) what is being enabled, or include the MSC number.
|
||||
@@ -1,77 +0,0 @@
|
||||
# Room DAG concepts
|
||||
|
||||
## Edges
|
||||
|
||||
The word "edge" comes from graph theory lingo. An edge is just a connection
|
||||
between two events. In Synapse, we connect events by specifying their
|
||||
`prev_events`. A subsequent event points back at a previous event.
|
||||
|
||||
```
|
||||
A (oldest) <---- B <---- C (most recent)
|
||||
```
|
||||
|
||||
|
||||
## Depth and stream ordering
|
||||
|
||||
Events are normally sorted by `(topological_ordering, stream_ordering)` where
|
||||
`topological_ordering` is just `depth`. In other words, we first sort by `depth`
|
||||
and then tie-break based on `stream_ordering`. `depth` is incremented as new
|
||||
messages are added to the DAG. Normally, `stream_ordering` is an auto
|
||||
incrementing integer, but backfilled events start with `stream_ordering=-1` and decrement.
|
||||
|
||||
---
|
||||
|
||||
- `/sync` returns things in the order they arrive at the server (`stream_ordering`).
|
||||
- `/messages` (and `/backfill` in the federation API) return them in the order determined by the event graph `(topological_ordering, stream_ordering)`.
|
||||
|
||||
The general idea is that, if you're following a room in real-time (i.e.
|
||||
`/sync`), you probably want to see the messages as they arrive at your server,
|
||||
rather than skipping any that arrived late; whereas if you're looking at a
|
||||
historical section of timeline (i.e. `/messages`), you want to see the best
|
||||
representation of the state of the room as others were seeing it at the time.
|
||||
|
||||
|
||||
## Forward extremity
|
||||
|
||||
Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet.
|
||||
|
||||
The forward extremities of a room are used as the `prev_events` when the next event is sent.
|
||||
|
||||
|
||||
## Backward extremity
|
||||
|
||||
The current marker of where we have backfilled up to and will generally be the
|
||||
`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when
|
||||
backfilling history.
|
||||
|
||||
When we persist a non-outlier event, we clear it as a backward extremity and set
|
||||
all of its `prev_events` as the new backward extremities if they aren't already
|
||||
persisted in the `events` table.
|
||||
|
||||
|
||||
## Outliers
|
||||
|
||||
We mark an event as an `outlier` when we haven't figured out the state for the
|
||||
room at that point in the DAG yet.
|
||||
|
||||
We won't *necessarily* have the `prev_events` of an `outlier` in the database,
|
||||
but it's entirely possible that we *might*.
|
||||
|
||||
For example, when we fetch the event auth chain or state for a given event, we
|
||||
mark all of those claimed auth events as outliers because we haven't done the
|
||||
state calculation ourself.
|
||||
|
||||
|
||||
## State groups
|
||||
|
||||
For every non-outlier event we need to know the state at that event. Instead of
|
||||
storing the full state for each event in the DB (i.e. a `event_id -> state`
|
||||
mapping), which is *very* space inefficient when state doesn't change, we
|
||||
instead assign each different set of state a "state group" and then have
|
||||
mappings of `event_id -> state_group` and `state_group -> state`.
|
||||
|
||||
|
||||
### Stage group edges
|
||||
|
||||
TODO: `state_group_edges` is a further optimization...
|
||||
notes from @Azrenbeth, https://pastebin.com/seUGVGeT
|
||||
@@ -1,56 +0,0 @@
|
||||
URL Previews
|
||||
============
|
||||
|
||||
The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API
|
||||
for URLs which outputs [Open Graph](https://ogp.me/) responses (with some Matrix
|
||||
specific additions).
|
||||
|
||||
This does have trade-offs compared to other designs:
|
||||
|
||||
* Pros:
|
||||
* Simple and flexible; can be used by any clients at any point
|
||||
* Cons:
|
||||
* If each homeserver provides one of these independently, all the HSes in a
|
||||
room may needlessly DoS the target URI
|
||||
* The URL metadata must be stored somewhere, rather than just using Matrix
|
||||
itself to store the media.
|
||||
* Matrix cannot be used to distribute the metadata between homeservers.
|
||||
|
||||
When Synapse is asked to preview a URL it does the following:
|
||||
|
||||
1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the
|
||||
config).
|
||||
2. Checks the in-memory cache by URLs and returns the result if it exists. (This
|
||||
is also used to de-duplicate processing of multiple in-flight requests at once.)
|
||||
3. Kicks off a background process to generate a preview:
|
||||
1. Checks the database cache by URL and timestamp and returns the result if it
|
||||
has not expired and was successful (a 2xx return code).
|
||||
2. Checks if the URL matches an [oEmbed](https://oembed.com/) pattern. If it
|
||||
does, update the URL to download.
|
||||
3. Downloads the URL and stores it into a file via the media storage provider
|
||||
and saves the local media metadata.
|
||||
4. If the media is an image:
|
||||
1. Generates thumbnails.
|
||||
2. Generates an Open Graph response based on image properties.
|
||||
5. If the media is HTML:
|
||||
1. Decodes the HTML via the stored file.
|
||||
2. Generates an Open Graph response from the HTML.
|
||||
3. If an image exists in the Open Graph response:
|
||||
1. Downloads the URL and stores it into a file via the media storage
|
||||
provider and saves the local media metadata.
|
||||
2. Generates thumbnails.
|
||||
3. Updates the Open Graph response based on image properties.
|
||||
6. If the media is JSON and an oEmbed URL was found:
|
||||
1. Convert the oEmbed response to an Open Graph response.
|
||||
2. If a thumbnail or image is in the oEmbed response:
|
||||
1. Downloads the URL and stores it into a file via the media storage
|
||||
provider and saves the local media metadata.
|
||||
2. Generates thumbnails.
|
||||
3. Updates the Open Graph response based on image properties.
|
||||
7. Stores the result in the database cache.
|
||||
4. Returns the result.
|
||||
|
||||
The in-memory cache expires after 1 hour.
|
||||
|
||||
Expired entries in the database cache (and their associated media files) are
|
||||
deleted every 10 seconds. The default expiration time is 1 hour from download.
|
||||
@@ -14,7 +14,7 @@ you set the `server_name` to match your machine's public DNS hostname.
|
||||
|
||||
For this default configuration to work, you will need to listen for TLS
|
||||
connections on port 8448. The preferred way to do that is by using a
|
||||
reverse proxy: see [the reverse proxy documentation](reverse_proxy.md) for instructions
|
||||
reverse proxy: see [reverse_proxy.md](reverse_proxy.md) for instructions
|
||||
on how to correctly set one up.
|
||||
|
||||
In some cases you might not want to run Synapse on the machine that has
|
||||
@@ -23,7 +23,7 @@ traffic to use a different port than 8448. For example, you might want to
|
||||
have your user names look like `@user:example.com`, but you want to run
|
||||
Synapse on `synapse.example.com` on port 443. This can be done using
|
||||
delegation, which allows an admin to control where federation traffic should
|
||||
be sent. See [the delegation documentation](delegate.md) for instructions on how to set this up.
|
||||
be sent. See [delegate.md](delegate.md) for instructions on how to set this up.
|
||||
|
||||
Once federation has been configured, you should be able to join a room over
|
||||
federation. A good place to start is `#synapse:matrix.org` - a room for
|
||||
@@ -44,8 +44,8 @@ a complicated dance which requires connections in both directions).
|
||||
|
||||
Another common problem is that people on other servers can't join rooms that
|
||||
you invite them to. This can be caused by an incorrectly-configured reverse
|
||||
proxy: see [the reverse proxy documentation](reverse_proxy.md) for instructions on how
|
||||
to correctly configure a reverse proxy.
|
||||
proxy: see [reverse_proxy.md](reverse_proxy.md) for instructions on how to correctly
|
||||
configure a reverse proxy.
|
||||
|
||||
### Known issues
|
||||
|
||||
|
||||
@@ -22,9 +22,8 @@ will be removed in a future version of Synapse.
|
||||
|
||||
The `token` field should include the JSON web token with the following claims:
|
||||
|
||||
* A claim that encodes the local part of the user ID is required. By default,
|
||||
the `sub` (subject) claim is used, or a custom claim can be set in the
|
||||
configuration file.
|
||||
* The `sub` (subject) claim is required and should encode the local part of the
|
||||
user ID.
|
||||
* The expiration time (`exp`), not before time (`nbf`), and issued at (`iat`)
|
||||
claims are optional, but validated if present.
|
||||
* The issuer (`iss`) claim is optional, but required and validated if configured.
|
||||
|
||||
@@ -10,20 +10,16 @@ Logcontexts are also used for CPU and database accounting, so that we
|
||||
can track which requests were responsible for high CPU use or database
|
||||
activity.
|
||||
|
||||
The `synapse.logging.context` module provides facilities for managing
|
||||
The `synapse.logging.context` module provides a facilities for managing
|
||||
the current log context (as well as providing the `LoggingContextFilter`
|
||||
class).
|
||||
|
||||
Asynchronous functions make the whole thing complicated, so this document describes
|
||||
Deferreds make the whole thing complicated, so this document describes
|
||||
how it all works, and how to write code which follows the rules.
|
||||
|
||||
In this document, "awaitable" refers to any object which can be `await`ed. In the context of
|
||||
Synapse, that normally means either a coroutine or a Twisted
|
||||
[`Deferred`](https://twistedmatrix.com/documents/current/api/twisted.internet.defer.Deferred.html).
|
||||
##Logcontexts without Deferreds
|
||||
|
||||
## Logcontexts without asynchronous code
|
||||
|
||||
In the absence of any asynchronous voodoo, things are simple enough. As with
|
||||
In the absence of any Deferred voodoo, things are simple enough. As with
|
||||
any code of this nature, the rule is that our function should leave
|
||||
things as it found them:
|
||||
|
||||
@@ -59,109 +55,126 @@ def do_request_handling():
|
||||
logger.debug("phew")
|
||||
```
|
||||
|
||||
## Using logcontexts with awaitables
|
||||
## Using logcontexts with Deferreds
|
||||
|
||||
Awaitables break the linear flow of code so that there is no longer a single entry point
|
||||
where we should set the logcontext and a single exit point where we should remove it.
|
||||
Deferreds --- and in particular, `defer.inlineCallbacks` --- break the
|
||||
linear flow of code so that there is no longer a single entry point
|
||||
where we should set the logcontext and a single exit point where we
|
||||
should remove it.
|
||||
|
||||
Consider the example above, where `do_request_handling` needs to do some
|
||||
blocking operation, and returns an awaitable:
|
||||
blocking operation, and returns a deferred:
|
||||
|
||||
```python
|
||||
async def handle_request(request_id):
|
||||
@defer.inlineCallbacks
|
||||
def handle_request(request_id):
|
||||
with context.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
await do_request_handling()
|
||||
yield do_request_handling()
|
||||
logger.debug("finished")
|
||||
```
|
||||
|
||||
In the above flow:
|
||||
|
||||
- The logcontext is set
|
||||
- `do_request_handling` is called, and returns an awaitable
|
||||
- `handle_request` awaits the awaitable
|
||||
- Execution of `handle_request` is suspended
|
||||
- `do_request_handling` is called, and returns a deferred
|
||||
- `handle_request` yields the deferred
|
||||
- The `inlineCallbacks` wrapper of `handle_request` returns a deferred
|
||||
|
||||
So we have stopped processing the request (and will probably go on to
|
||||
start processing the next), without clearing the logcontext.
|
||||
|
||||
To circumvent this problem, synapse code assumes that, wherever you have
|
||||
an awaitable, you will want to `await` it. To that end, whereever
|
||||
functions return awaitables, we adopt the following conventions:
|
||||
a deferred, you will want to yield on it. To that end, whereever
|
||||
functions return a deferred, we adopt the following conventions:
|
||||
|
||||
**Rules for functions returning awaitables:**
|
||||
**Rules for functions returning deferreds:**
|
||||
|
||||
> - If the awaitable is already complete, the function returns with the
|
||||
> - If the deferred is already complete, the function returns with the
|
||||
> same logcontext it started with.
|
||||
> - If the awaitable is incomplete, the function clears the logcontext
|
||||
> before returning; when the awaitable completes, it restores the
|
||||
> - If the deferred is incomplete, the function clears the logcontext
|
||||
> before returning; when the deferred completes, it restores the
|
||||
> logcontext before running any callbacks.
|
||||
|
||||
That sounds complicated, but actually it means a lot of code (including
|
||||
the example above) "just works". There are two cases:
|
||||
|
||||
- If `do_request_handling` returns a completed awaitable, then the
|
||||
- If `do_request_handling` returns a completed deferred, then the
|
||||
logcontext will still be in place. In this case, execution will
|
||||
continue immediately after the `await`; the "finished" line will
|
||||
continue immediately after the `yield`; the "finished" line will
|
||||
be logged against the right context, and the `with` block restores
|
||||
the original context before we return to the caller.
|
||||
- If the returned awaitable is incomplete, `do_request_handling` clears
|
||||
- If the returned deferred is incomplete, `do_request_handling` clears
|
||||
the logcontext before returning. The logcontext is therefore clear
|
||||
when `handle_request` `await`s the awaitable.
|
||||
when `handle_request` yields the deferred. At that point, the
|
||||
`inlineCallbacks` wrapper adds a callback to the deferred, and
|
||||
returns another (incomplete) deferred to the caller, and it is safe
|
||||
to begin processing the next request.
|
||||
|
||||
Once `do_request_handling`'s awaitable completes, it will reinstate
|
||||
the logcontext, before running the second half of `handle_request`,
|
||||
so again the "finished" line will be logged against the right context,
|
||||
and the `with` block restores the original context.
|
||||
Once `do_request_handling`'s deferred completes, it will reinstate
|
||||
the logcontext, before running the callback added by the
|
||||
`inlineCallbacks` wrapper. That callback runs the second half of
|
||||
`handle_request`, so again the "finished" line will be logged
|
||||
against the right context, and the `with` block restores the
|
||||
original context.
|
||||
|
||||
As an aside, it's worth noting that `handle_request` follows our rules
|
||||
- though that only matters if the caller has its own logcontext which it
|
||||
-though that only matters if the caller has its own logcontext which it
|
||||
cares about.
|
||||
|
||||
The following sections describe pitfalls and helpful patterns when
|
||||
implementing these rules.
|
||||
|
||||
Always await your awaitables
|
||||
----------------------------
|
||||
Always yield your deferreds
|
||||
---------------------------
|
||||
|
||||
Whenever you get an awaitable back from a function, you should `await` on
|
||||
it as soon as possible. Do not pass go; do not do any logging; do not
|
||||
call any other functions.
|
||||
Whenever you get a deferred back from a function, you should `yield` on
|
||||
it as soon as possible. (Returning it directly to your caller is ok too,
|
||||
if you're not doing `inlineCallbacks`.) Do not pass go; do not do any
|
||||
logging; do not call any other functions.
|
||||
|
||||
```python
|
||||
async def fun():
|
||||
@defer.inlineCallbacks
|
||||
def fun():
|
||||
logger.debug("starting")
|
||||
await do_some_stuff() # just like this
|
||||
yield do_some_stuff() # just like this
|
||||
|
||||
coro = more_stuff()
|
||||
result = await coro # also fine, of course
|
||||
d = more_stuff()
|
||||
result = yield d # also fine, of course
|
||||
|
||||
return result
|
||||
|
||||
def nonInlineCallbacksFun():
|
||||
logger.debug("just a wrapper really")
|
||||
return do_some_stuff() # this is ok too - the caller will yield on
|
||||
# it anyway.
|
||||
```
|
||||
|
||||
Provided this pattern is followed all the way back up to the callchain
|
||||
to where the logcontext was set, this will make things work out ok:
|
||||
provided `do_some_stuff` and `more_stuff` follow the rules above, then
|
||||
so will `fun`.
|
||||
so will `fun` (as wrapped by `inlineCallbacks`) and
|
||||
`nonInlineCallbacksFun`.
|
||||
|
||||
It's all too easy to forget to `await`: for instance if we forgot that
|
||||
`do_some_stuff` returned an awaitable, we might plough on regardless. This
|
||||
It's all too easy to forget to `yield`: for instance if we forgot that
|
||||
`do_some_stuff` returned a deferred, we might plough on regardless. This
|
||||
leads to a mess; it will probably work itself out eventually, but not
|
||||
before a load of stuff has been logged against the wrong context.
|
||||
(Normally, other things will break, more obviously, if you forget to
|
||||
`await`, so this tends not to be a major problem in practice.)
|
||||
`yield`, so this tends not to be a major problem in practice.)
|
||||
|
||||
Of course sometimes you need to do something a bit fancier with your
|
||||
awaitable - not all code follows the linear A-then-B-then-C pattern.
|
||||
Deferreds - not all code follows the linear A-then-B-then-C pattern.
|
||||
Notes on implementing more complex patterns are in later sections.
|
||||
|
||||
## Where you create a new awaitable, make it follow the rules
|
||||
## Where you create a new Deferred, make it follow the rules
|
||||
|
||||
Most of the time, an awaitable comes from another synapse function.
|
||||
Sometimes, though, we need to make up a new awaitable, or we get an awaitable
|
||||
back from external code. We need to make it follow our rules.
|
||||
Most of the time, a Deferred comes from another synapse function.
|
||||
Sometimes, though, we need to make up a new Deferred, or we get a
|
||||
Deferred back from external code. We need to make it follow our rules.
|
||||
|
||||
The easy way to do it is by using `context.make_deferred_yieldable`. Suppose we want to implement
|
||||
The easy way to do it is with a combination of `defer.inlineCallbacks`,
|
||||
and `context.PreserveLoggingContext`. Suppose we want to implement
|
||||
`sleep`, which returns a deferred which will run its callbacks after a
|
||||
given number of seconds. That might look like:
|
||||
|
||||
@@ -173,12 +186,25 @@ def get_sleep_deferred(seconds):
|
||||
return d
|
||||
```
|
||||
|
||||
That doesn't follow the rules, but we can fix it by calling it through
|
||||
`context.make_deferred_yieldable`:
|
||||
That doesn't follow the rules, but we can fix it by wrapping it with
|
||||
`PreserveLoggingContext` and `yield` ing on it:
|
||||
|
||||
```python
|
||||
async def sleep(seconds):
|
||||
return await context.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||
@defer.inlineCallbacks
|
||||
def sleep(seconds):
|
||||
with PreserveLoggingContext():
|
||||
yield get_sleep_deferred(seconds)
|
||||
```
|
||||
|
||||
This technique works equally for external functions which return
|
||||
deferreds, or deferreds we have made ourselves.
|
||||
|
||||
You can also use `context.make_deferred_yieldable`, which just does the
|
||||
boilerplate for you, so the above could be written:
|
||||
|
||||
```python
|
||||
def sleep(seconds):
|
||||
return context.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||
```
|
||||
|
||||
## Fire-and-forget
|
||||
@@ -187,18 +213,20 @@ Sometimes you want to fire off a chain of execution, but not wait for
|
||||
its result. That might look a bit like this:
|
||||
|
||||
```python
|
||||
async def do_request_handling():
|
||||
await foreground_operation()
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
# *don't* do this
|
||||
background_operation()
|
||||
|
||||
logger.debug("Request handling complete")
|
||||
|
||||
async def background_operation():
|
||||
await first_background_step()
|
||||
@defer.inlineCallbacks
|
||||
def background_operation():
|
||||
yield first_background_step()
|
||||
logger.debug("Completed first step")
|
||||
await second_background_step()
|
||||
yield second_background_step()
|
||||
logger.debug("Completed second step")
|
||||
```
|
||||
|
||||
@@ -207,13 +235,13 @@ The above code does a couple of steps in the background after
|
||||
against the `request_context` logcontext, which may or may not be
|
||||
desirable. There are two big problems with the above, however. The first
|
||||
problem is that, if `background_operation` returns an incomplete
|
||||
awaitable, it will expect its caller to `await` immediately, so will have
|
||||
Deferred, it will expect its caller to `yield` immediately, so will have
|
||||
cleared the logcontext. In this example, that means that 'Request
|
||||
handling complete' will be logged without any context.
|
||||
|
||||
The second problem, which is potentially even worse, is that when the
|
||||
awaitable returned by `background_operation` completes, it will restore
|
||||
the original logcontext. There is nothing waiting on that awaitable, so
|
||||
Deferred returned by `background_operation` completes, it will restore
|
||||
the original logcontext. There is nothing waiting on that Deferred, so
|
||||
the logcontext will leak into the reactor and possibly get attached to
|
||||
some arbitrary future operation.
|
||||
|
||||
@@ -226,8 +254,9 @@ deferred completes will be the empty logcontext), and will restore the
|
||||
current logcontext before continuing the foreground process:
|
||||
|
||||
```python
|
||||
async def do_request_handling():
|
||||
await foreground_operation()
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
# start background_operation off in the empty logcontext, to
|
||||
# avoid leaking the current context into the reactor.
|
||||
@@ -245,15 +274,16 @@ Obviously that option means that the operations done in
|
||||
|
||||
The second option is to use `context.run_in_background`, which wraps a
|
||||
function so that it doesn't reset the logcontext even when it returns
|
||||
an incomplete awaitable, and adds a callback to the returned awaitable to
|
||||
an incomplete deferred, and adds a callback to the returned deferred to
|
||||
reset the logcontext. In other words, it turns a function that follows
|
||||
the Synapse rules about logcontexts and awaitables into one which behaves
|
||||
the Synapse rules about logcontexts and Deferreds into one which behaves
|
||||
more like an external function --- the opposite operation to that
|
||||
described in the previous section. It can be used like this:
|
||||
|
||||
```python
|
||||
async def do_request_handling():
|
||||
await foreground_operation()
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
context.run_in_background(background_operation)
|
||||
|
||||
@@ -264,53 +294,152 @@ async def do_request_handling():
|
||||
## Passing synapse deferreds into third-party functions
|
||||
|
||||
A typical example of this is where we want to collect together two or
|
||||
more awaitables via `defer.gatherResults`:
|
||||
more deferred via `defer.gatherResults`:
|
||||
|
||||
```python
|
||||
a1 = operation1()
|
||||
a2 = operation2()
|
||||
a3 = defer.gatherResults([a1, a2])
|
||||
d1 = operation1()
|
||||
d2 = operation2()
|
||||
d3 = defer.gatherResults([d1, d2])
|
||||
```
|
||||
|
||||
This is really a variation of the fire-and-forget problem above, in that
|
||||
we are firing off `a1` and `a2` without awaiting on them. The difference
|
||||
we are firing off `d1` and `d2` without yielding on them. The difference
|
||||
is that we now have third-party code attached to their callbacks. Anyway
|
||||
either technique given in the [Fire-and-forget](#fire-and-forget)
|
||||
section will work.
|
||||
|
||||
Of course, the new awaitable returned by `gather` needs to be
|
||||
Of course, the new Deferred returned by `gatherResults` needs to be
|
||||
wrapped in order to make it follow the logcontext rules before we can
|
||||
yield it, as described in [Where you create a new awaitable, make it
|
||||
yield it, as described in [Where you create a new Deferred, make it
|
||||
follow the
|
||||
rules](#where-you-create-a-new-awaitable-make-it-follow-the-rules).
|
||||
rules](#where-you-create-a-new-deferred-make-it-follow-the-rules).
|
||||
|
||||
So, option one: reset the logcontext before starting the operations to
|
||||
be gathered:
|
||||
|
||||
```python
|
||||
async def do_request_handling():
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
with PreserveLoggingContext():
|
||||
a1 = operation1()
|
||||
a2 = operation2()
|
||||
result = await defer.gatherResults([a1, a2])
|
||||
d1 = operation1()
|
||||
d2 = operation2()
|
||||
result = yield defer.gatherResults([d1, d2])
|
||||
```
|
||||
|
||||
In this case particularly, though, option two, of using
|
||||
`context.run_in_background` almost certainly makes more sense, so that
|
||||
`context.preserve_fn` almost certainly makes more sense, so that
|
||||
`operation1` and `operation2` are both logged against the original
|
||||
logcontext. This looks like:
|
||||
|
||||
```python
|
||||
async def do_request_handling():
|
||||
a1 = context.run_in_background(operation1)
|
||||
a2 = context.run_in_background(operation2)
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
d1 = context.preserve_fn(operation1)()
|
||||
d2 = context.preserve_fn(operation2)()
|
||||
|
||||
result = await make_deferred_yieldable(defer.gatherResults([a1, a2]))
|
||||
with PreserveLoggingContext():
|
||||
result = yield defer.gatherResults([d1, d2])
|
||||
```
|
||||
|
||||
## A note on garbage-collection of awaitable chains
|
||||
## Was all this really necessary?
|
||||
|
||||
It turns out that our logcontext rules do not play nicely with awaitable
|
||||
The conventions used work fine for a linear flow where everything
|
||||
happens in series via `defer.inlineCallbacks` and `yield`, but are
|
||||
certainly tricky to follow for any more exotic flows. It's hard not to
|
||||
wonder if we could have done something else.
|
||||
|
||||
We're not going to rewrite Synapse now, so the following is entirely of
|
||||
academic interest, but I'd like to record some thoughts on an
|
||||
alternative approach.
|
||||
|
||||
I briefly prototyped some code following an alternative set of rules. I
|
||||
think it would work, but I certainly didn't get as far as thinking how
|
||||
it would interact with concepts as complicated as the cache descriptors.
|
||||
|
||||
My alternative rules were:
|
||||
|
||||
- functions always preserve the logcontext of their caller, whether or
|
||||
not they are returning a Deferred.
|
||||
- Deferreds returned by synapse functions run their callbacks in the
|
||||
same context as the function was orignally called in.
|
||||
|
||||
The main point of this scheme is that everywhere that sets the
|
||||
logcontext is responsible for clearing it before returning control to
|
||||
the reactor.
|
||||
|
||||
So, for example, if you were the function which started a
|
||||
`with LoggingContext` block, you wouldn't `yield` within it --- instead
|
||||
you'd start off the background process, and then leave the `with` block
|
||||
to wait for it:
|
||||
|
||||
```python
|
||||
def handle_request(request_id):
|
||||
with context.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
d = do_request_handling()
|
||||
|
||||
def cb(r):
|
||||
logger.debug("finished")
|
||||
|
||||
d.addCallback(cb)
|
||||
return d
|
||||
```
|
||||
|
||||
(in general, mixing `with LoggingContext` blocks and
|
||||
`defer.inlineCallbacks` in the same function leads to slighly
|
||||
counter-intuitive code, under this scheme).
|
||||
|
||||
Because we leave the original `with` block as soon as the Deferred is
|
||||
returned (as opposed to waiting for it to be resolved, as we do today),
|
||||
the logcontext is cleared before control passes back to the reactor; so
|
||||
if there is some code within `do_request_handling` which needs to wait
|
||||
for a Deferred to complete, there is no need for it to worry about
|
||||
clearing the logcontext before doing so:
|
||||
|
||||
```python
|
||||
def handle_request():
|
||||
r = do_some_stuff()
|
||||
r.addCallback(do_some_more_stuff)
|
||||
return r
|
||||
```
|
||||
|
||||
--- and provided `do_some_stuff` follows the rules of returning a
|
||||
Deferred which runs its callbacks in the original logcontext, all is
|
||||
happy.
|
||||
|
||||
The business of a Deferred which runs its callbacks in the original
|
||||
logcontext isn't hard to achieve --- we have it today, in the shape of
|
||||
`context._PreservingContextDeferred`:
|
||||
|
||||
```python
|
||||
def do_some_stuff():
|
||||
deferred = do_some_io()
|
||||
pcd = _PreservingContextDeferred(LoggingContext.current_context())
|
||||
deferred.chainDeferred(pcd)
|
||||
return pcd
|
||||
```
|
||||
|
||||
It turns out that, thanks to the way that Deferreds chain together, we
|
||||
automatically get the property of a context-preserving deferred with
|
||||
`defer.inlineCallbacks`, provided the final Defered the function
|
||||
`yields` on has that property. So we can just write:
|
||||
|
||||
```python
|
||||
@defer.inlineCallbacks
|
||||
def handle_request():
|
||||
yield do_some_stuff()
|
||||
yield do_some_more_stuff()
|
||||
```
|
||||
|
||||
To conclude: I think this scheme would have worked equally well, with
|
||||
less danger of messing it up, and probably made some more esoteric code
|
||||
easier to write. But again --- changing the conventions of the entire
|
||||
Synapse codebase is not a sensible option for the marginal improvement
|
||||
offered.
|
||||
|
||||
## A note on garbage-collection of Deferred chains
|
||||
|
||||
It turns out that our logcontext rules do not play nicely with Deferred
|
||||
chains which get orphaned and garbage-collected.
|
||||
|
||||
Imagine we have some code that looks like this:
|
||||
@@ -322,12 +451,13 @@ def on_something_interesting():
|
||||
for d in listener_queue:
|
||||
d.callback("foo")
|
||||
|
||||
async def await_something_interesting():
|
||||
new_awaitable = defer.Deferred()
|
||||
listener_queue.append(new_awaitable)
|
||||
@defer.inlineCallbacks
|
||||
def await_something_interesting():
|
||||
new_deferred = defer.Deferred()
|
||||
listener_queue.append(new_deferred)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
await new_awaitable
|
||||
yield new_deferred
|
||||
```
|
||||
|
||||
Obviously, the idea here is that we have a bunch of things which are
|
||||
@@ -346,19 +476,18 @@ def reset_listener_queue():
|
||||
listener_queue.clear()
|
||||
```
|
||||
|
||||
So, both ends of the awaitable chain have now dropped their references,
|
||||
and the awaitable chain is now orphaned, and will be garbage-collected at
|
||||
some point. Note that `await_something_interesting` is a coroutine,
|
||||
which Python implements as a generator function. When Python
|
||||
garbage-collects generator functions, it gives them a chance to
|
||||
clean up by making the `await` (or `yield`) raise a `GeneratorExit`
|
||||
So, both ends of the deferred chain have now dropped their references,
|
||||
and the deferred chain is now orphaned, and will be garbage-collected at
|
||||
some point. Note that `await_something_interesting` is a generator
|
||||
function, and when Python garbage-collects generator functions, it gives
|
||||
them a chance to clean up by making the `yield` raise a `GeneratorExit`
|
||||
exception. In our case, that means that the `__exit__` handler of
|
||||
`PreserveLoggingContext` will carefully restore the request context, but
|
||||
there is now nothing waiting for its return, so the request context is
|
||||
never cleared.
|
||||
|
||||
To reiterate, this problem only arises when *both* ends of a awaitable
|
||||
chain are dropped. Dropping the the reference to an awaitable you're
|
||||
supposed to be awaiting is bad practice, so this doesn't
|
||||
To reiterate, this problem only arises when *both* ends of a deferred
|
||||
chain are dropped. Dropping the the reference to a deferred you're
|
||||
supposed to be calling is probably bad practice, so this doesn't
|
||||
actually happen too much. Unfortunately, when it does happen, it will
|
||||
lead to leaked logcontexts which are incredibly hard to track down.
|
||||
|
||||
@@ -11,7 +11,7 @@ Note that this will give administrative access to synapse to **all users** with
|
||||
shell access to the server. It should therefore **not** be enabled in
|
||||
environments where untrusted users have shell access.
|
||||
|
||||
## Configuring the manhole
|
||||
***
|
||||
|
||||
To enable it, first uncomment the `manhole` listener configuration in
|
||||
`homeserver.yaml`. The configuration is slightly different if you're using docker.
|
||||
@@ -52,43 +52,22 @@ listeners:
|
||||
type: manhole
|
||||
```
|
||||
|
||||
### Security settings
|
||||
|
||||
The following config options are available:
|
||||
|
||||
- `username` - The username for the manhole (defaults to `matrix`)
|
||||
- `password` - The password for the manhole (defaults to `rabbithole`)
|
||||
- `ssh_priv_key` - The path to a private SSH key (defaults to a hardcoded value)
|
||||
- `ssh_pub_key` - The path to a public SSH key (defaults to a hardcoded value)
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
manhole_settings:
|
||||
username: manhole
|
||||
password: mypassword
|
||||
ssh_priv_key: "/home/synapse/manhole_keys/id_rsa"
|
||||
ssh_pub_key: "/home/synapse/manhole_keys/id_rsa.pub"
|
||||
```
|
||||
|
||||
|
||||
## Accessing synapse manhole
|
||||
#### Accessing synapse manhole
|
||||
|
||||
Then restart synapse, and point an ssh client at port 9000 on localhost, using
|
||||
the username and password configured in `homeserver.yaml` - with the default
|
||||
configuration, this would be:
|
||||
the username `matrix`:
|
||||
|
||||
```bash
|
||||
ssh -p9000 matrix@localhost
|
||||
```
|
||||
|
||||
Then enter the password when prompted (the default is `rabbithole`).
|
||||
The password is `rabbithole`.
|
||||
|
||||
This gives a Python REPL in which `hs` gives access to the
|
||||
`synapse.server.HomeServer` object - which in turn gives access to many other
|
||||
parts of the process.
|
||||
|
||||
Note that, prior to Synapse 1.41, any call which returns a coroutine will need to be wrapped in `ensureDeferred`.
|
||||
Note that any call which returns a coroutine will need to be wrapped in `ensureDeferred`.
|
||||
|
||||
As a simple example, retrieving an event from the database:
|
||||
|
||||
|
||||
@@ -2,80 +2,29 @@
|
||||
|
||||
*Synapse implementation-specific details for the media repository*
|
||||
|
||||
The media repository
|
||||
* stores avatars, attachments and their thumbnails for media uploaded by local
|
||||
users.
|
||||
* caches avatars, attachments and their thumbnails for media uploaded by remote
|
||||
users.
|
||||
* caches resources and thumbnails used for
|
||||
[URL previews](development/url_previews.md).
|
||||
The media repository is where attachments and avatar photos are stored.
|
||||
It stores attachment content and thumbnails for media uploaded by local users.
|
||||
It caches attachment content and thumbnails for media uploaded by remote users.
|
||||
|
||||
All media in Matrix can be identified by a unique
|
||||
[MXC URI](https://spec.matrix.org/latest/client-server-api/#matrix-content-mxc-uris),
|
||||
consisting of a server name and media ID:
|
||||
```
|
||||
mxc://<server-name>/<media-id>
|
||||
```
|
||||
## Storage
|
||||
|
||||
## Local Media
|
||||
Synapse generates 24 character media IDs for content uploaded by local users.
|
||||
These media IDs consist of upper and lowercase letters and are case-sensitive.
|
||||
Other homeserver implementations may generate media IDs differently.
|
||||
Each item of media is assigned a `media_id` when it is uploaded.
|
||||
The `media_id` is a randomly chosen, URL safe 24 character string.
|
||||
|
||||
Local media is recorded in the `local_media_repository` table, which includes
|
||||
metadata such as MIME types, upload times and file sizes.
|
||||
Note that this table is shared by the URL cache, which has a different media ID
|
||||
scheme.
|
||||
Metadata such as the MIME type, upload time and length are stored in the
|
||||
sqlite3 database indexed by `media_id`.
|
||||
|
||||
### Paths
|
||||
A file with media ID `aabbcccccccccccccccccccc` and its `128x96` `image/jpeg`
|
||||
thumbnail, created by scaling, would be stored at:
|
||||
```
|
||||
local_content/aa/bb/cccccccccccccccccccc
|
||||
local_thumbnails/aa/bb/cccccccccccccccccccc/128-96-image-jpeg-scale
|
||||
```
|
||||
Content is stored on the filesystem under a `"local_content"` directory.
|
||||
|
||||
## Remote Media
|
||||
When media from a remote homeserver is requested from Synapse, it is assigned
|
||||
a local `filesystem_id`, with the same format as locally-generated media IDs,
|
||||
as described above.
|
||||
Thumbnails are stored under a `"local_thumbnails"` directory.
|
||||
|
||||
A record of remote media is stored in the `remote_media_cache` table, which
|
||||
can be used to map remote MXC URIs (server names and media IDs) to local
|
||||
`filesystem_id`s.
|
||||
The item with `media_id` `"aabbccccccccdddddddddddd"` is stored under
|
||||
`"local_content/aa/bb/ccccccccdddddddddddd"`. Its thumbnail with width
|
||||
`128` and height `96` and type `"image/jpeg"` is stored under
|
||||
`"local_thumbnails/aa/bb/ccccccccdddddddddddd/128-96-image-jpeg"`
|
||||
|
||||
### Paths
|
||||
A file from `matrix.org` with `filesystem_id` `aabbcccccccccccccccccccc` and its
|
||||
`128x96` `image/jpeg` thumbnail, created by scaling, would be stored at:
|
||||
```
|
||||
remote_content/matrix.org/aa/bb/cccccccccccccccccccc
|
||||
remote_thumbnail/matrix.org/aa/bb/cccccccccccccccccccc/128-96-image-jpeg-scale
|
||||
```
|
||||
Older thumbnails may omit the thumbnailing method:
|
||||
```
|
||||
remote_thumbnail/matrix.org/aa/bb/cccccccccccccccccccc/128-96-image-jpeg
|
||||
```
|
||||
|
||||
Note that `remote_thumbnail/` does not have an `s`.
|
||||
|
||||
## URL Previews
|
||||
See [URL Previews](development/url_previews.md) for documentation on the URL preview
|
||||
process.
|
||||
|
||||
When generating previews for URLs, Synapse may download and cache various
|
||||
resources, including images. These resources are assigned temporary media IDs
|
||||
of the form `yyyy-mm-dd_aaaaaaaaaaaaaaaa`, where `yyyy-mm-dd` is the current
|
||||
date and `aaaaaaaaaaaaaaaa` is a random sequence of 16 case-sensitive letters.
|
||||
|
||||
The metadata for these cached resources is stored in the
|
||||
`local_media_repository` and `local_media_repository_url_cache` tables.
|
||||
|
||||
Resources for URL previews are deleted after a few days.
|
||||
|
||||
### Paths
|
||||
The file with media ID `yyyy-mm-dd_aaaaaaaaaaaaaaaa` and its `128x96`
|
||||
`image/jpeg` thumbnail, created by scaling, would be stored at:
|
||||
```
|
||||
url_cache/yyyy-mm-dd/aaaaaaaaaaaaaaaa
|
||||
url_cache_thumbnails/yyyy-mm-dd/aaaaaaaaaaaaaaaa/128-96-image-jpeg-scale
|
||||
```
|
||||
Remote content is cached under `"remote_content"` directory. Each item of
|
||||
remote content is assigned a local `"filesystem_id"` to ensure that the
|
||||
directory structure `"remote_content/server_name/aa/bb/ccccccccdddddddddddd"`
|
||||
is appropriate. Thumbnails for remote content are stored under
|
||||
`"remote_thumbnails/server_name/..."`
|
||||
|
||||
@@ -69,9 +69,9 @@ A default policy can be defined as such, in the `retention` section of
|
||||
the configuration file:
|
||||
|
||||
```yaml
|
||||
default_policy:
|
||||
min_lifetime: 1d
|
||||
max_lifetime: 1y
|
||||
default_policy:
|
||||
min_lifetime: 1d
|
||||
max_lifetime: 1y
|
||||
```
|
||||
|
||||
Here, `min_lifetime` and `max_lifetime` have the same meaning and level
|
||||
@@ -95,14 +95,14 @@ depending on an event's room's policy. This can be done by setting the
|
||||
file. An example of such configuration could be:
|
||||
|
||||
```yaml
|
||||
purge_jobs:
|
||||
- longest_max_lifetime: 3d
|
||||
interval: 12h
|
||||
- shortest_max_lifetime: 3d
|
||||
longest_max_lifetime: 1w
|
||||
interval: 1d
|
||||
- shortest_max_lifetime: 1w
|
||||
interval: 2d
|
||||
purge_jobs:
|
||||
- longest_max_lifetime: 3d
|
||||
interval: 12h
|
||||
- shortest_max_lifetime: 3d
|
||||
longest_max_lifetime: 1w
|
||||
interval: 1d
|
||||
- shortest_max_lifetime: 1w
|
||||
interval: 2d
|
||||
```
|
||||
|
||||
In this example, we define three jobs:
|
||||
@@ -141,8 +141,8 @@ purging old events in a room. These limits can be defined as such in the
|
||||
`retention` section of the configuration file:
|
||||
|
||||
```yaml
|
||||
allowed_lifetime_min: 1d
|
||||
allowed_lifetime_max: 1y
|
||||
allowed_lifetime_min: 1d
|
||||
allowed_lifetime_max: 1y
|
||||
```
|
||||
|
||||
The limits are considered when running purge jobs. If necessary, the
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user