Compare commits
2 Commits
release-v1
...
rei/flake
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c27fe41858 | ||
|
|
9c8a9d8c51 |
@@ -29,12 +29,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For PRs, we only run each type of test with the oldest Python version supported (which
|
||||
# is Python 3.8 right now)
|
||||
# For each type of test we only run on Py3.7 on PRs
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.7",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
@@ -47,13 +46,13 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.9", "3.10", "3.11")
|
||||
for version in ("3.8", "3.9", "3.10", "3.11")
|
||||
)
|
||||
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.7",
|
||||
"database": "postgres",
|
||||
"postgres-version": "11",
|
||||
"extras": "all",
|
||||
@@ -72,7 +71,7 @@ if not IS_PR:
|
||||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.8",
|
||||
"python-version": "3.7",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
@@ -134,6 +133,11 @@ if not IS_PR:
|
||||
"sytest-tag": "testing",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "buster",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@@ -31,6 +31,35 @@ sed -i \
|
||||
-e '/systemd/d' \
|
||||
pyproject.toml
|
||||
|
||||
# Use poetry to do the installation. This ensures that the versions are all mutually
|
||||
# compatible (as far the package metadata declares, anyway); pip's package resolver
|
||||
# is more lax.
|
||||
#
|
||||
# Rather than `poetry install --no-dev`, we drop all dev dependencies and the dev-docs
|
||||
# group from the toml file. This means we don't have to ensure compatibility between
|
||||
# old deps and dev tools.
|
||||
|
||||
pip install toml wheel
|
||||
|
||||
REMOVE_DEV_DEPENDENCIES="
|
||||
import toml
|
||||
with open('pyproject.toml', 'r') as f:
|
||||
data = toml.loads(f.read())
|
||||
|
||||
del data['tool']['poetry']['dev-dependencies']
|
||||
del data['tool']['poetry']['group']['dev-docs']
|
||||
|
||||
with open('pyproject.toml', 'w') as f:
|
||||
toml.dump(data, f)
|
||||
"
|
||||
python3 -c "$REMOVE_DEV_DEPENDENCIES"
|
||||
|
||||
pip install poetry==1.3.2
|
||||
poetry lock
|
||||
|
||||
echo "::group::Patched pyproject.toml"
|
||||
cat pyproject.toml
|
||||
echo "::endgroup::"
|
||||
echo "::group::Lockfile after patch"
|
||||
cat poetry.lock
|
||||
echo "::endgroup::"
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
2
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -129,7 +129,7 @@ body:
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
Please copy and paste any relevant log output as text (not images), ideally at INFO or DEBUG log level.
|
||||
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
|
||||
This will be automatically formatted into code, so there is no need for backticks (`\``).
|
||||
|
||||
Please be careful to remove any personal or private data.
|
||||
|
||||
49
.github/workflows/dependabot_changelog.yml
vendored
Normal file
49
.github/workflows/dependabot_changelog.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Write changelog for dependabot PR
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened # For debugging!
|
||||
|
||||
permissions:
|
||||
# Needed to be able to push the commit. See
|
||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
|
||||
# for a similar example
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
add-changelog:
|
||||
runs-on: 'ubuntu-latest'
|
||||
if: ${{ github.actor == 'dependabot[bot]' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
- name: Write, commit and push changelog
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
run: |
|
||||
echo "${PR_TITLE}." > "changelog.d/${PR_NUMBER}".misc
|
||||
git add changelog.d
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "GitHub Actions"
|
||||
git commit -m "Changelog"
|
||||
git push
|
||||
shell: bash
|
||||
# The `git push` above does not trigger CI on the dependabot PR.
|
||||
#
|
||||
# By default, workflows can't trigger other workflows when they're just using the
|
||||
# default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
|
||||
# recursive workflow loops by accident, because that'll get very expensive very
|
||||
# quickly.) Instead, you have to manually call out to another workflow, or else
|
||||
# make your changes (i.e. the `git push` above) using a personal access token.
|
||||
# See
|
||||
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
||||
#
|
||||
# I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
|
||||
# See git commit history for previous attempts. If anyone desperately wants to try
|
||||
# again in the future, make a matrix-bot account and use its access token to git push.
|
||||
|
||||
# THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
|
||||
# are sufficiently locked down to dependabot only as above.
|
||||
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
path: book
|
||||
|
||||
- name: 📤 Deploy to Netlify
|
||||
uses: matrix-org/netlify-pr-preview@v2
|
||||
uses: matrix-org/netlify-pr-preview@v1
|
||||
with:
|
||||
path: book
|
||||
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
|
||||
|
||||
23
.github/workflows/latest_deps.yml
vendored
23
.github/workflows/latest_deps.yml
vendored
@@ -22,21 +22,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check_repo:
|
||||
# Prevent this workflow from running on any fork of Synapse other than matrix-org/synapse, as it is
|
||||
# only useful to the Synapse core team.
|
||||
# All other workflow steps depend on this one, thus if 'should_run_workflow' is not 'true', the rest
|
||||
# of the workflow will be skipped as well.
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_run_workflow: ${{ steps.check_condition.outputs.should_run_workflow }}
|
||||
steps:
|
||||
- id: check_condition
|
||||
run: echo "should_run_workflow=${{ github.repository == 'matrix-org/synapse' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
mypy:
|
||||
needs: check_repo
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -61,8 +47,6 @@ jobs:
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
trial:
|
||||
needs: check_repo
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -121,8 +105,6 @@ jobs:
|
||||
|
||||
|
||||
sytest:
|
||||
needs: check_repo
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:testing
|
||||
@@ -174,8 +156,7 @@ jobs:
|
||||
|
||||
|
||||
complement:
|
||||
needs: check_repo
|
||||
if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'"
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
@@ -211,7 +192,7 @@ jobs:
|
||||
# Open an issue if the build fails, so we know about it.
|
||||
# Only do this if we're not experimenting with this action in a PR.
|
||||
open-issue:
|
||||
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request' && needs.check_repo.outputs.should_run_workflow == 'true'"
|
||||
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
|
||||
needs:
|
||||
# TODO: should mypy be included here? It feels more brittle than the others.
|
||||
- mypy
|
||||
|
||||
3
.github/workflows/release-artifacts.yml
vendored
3
.github/workflows/release-artifacts.yml
vendored
@@ -34,7 +34,6 @@ jobs:
|
||||
- id: set-distros
|
||||
run: |
|
||||
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
||||
# NOTE: inside the actual Dockerfile-dhvirtualenv, the image name is expanded into its full image path
|
||||
dists='["debian:sid"]'
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
|
||||
@@ -144,7 +143,7 @@ jobs:
|
||||
|
||||
- name: Only build a single wheel on PR
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
|
||||
89
.github/workflows/tests.yml
vendored
89
.github/workflows/tests.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.60.0
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
@@ -92,10 +92,6 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.60.0
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Setup Poetry
|
||||
uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
@@ -107,15 +103,18 @@ jobs:
|
||||
# To make CI green, err towards caution and install the project.
|
||||
install-project: "true"
|
||||
|
||||
# Cribbed from
|
||||
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# NB: I have two concerns with this action:
|
||||
# 1. We occasionally see odd mypy problems that aren't reproducible
|
||||
# locally with clean caches. I suspect some dodgy caching behaviour.
|
||||
# 2. The action uses GHA machinery that's deprecated
|
||||
# (https://github.com/AustinScola/mypy-cache-github-action/issues/277)
|
||||
# It may be simpler to use actions/cache ourselves to restore .mypy_cache.
|
||||
- name: Restore/persist mypy's cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
.mypy_cache
|
||||
key: mypy-cache-${{ github.context.sha }}
|
||||
restore-keys: mypy-cache-
|
||||
uses: AustinScola/mypy-cache-github-action@df56268388422ee282636ee2c7a9cc55ec644a41
|
||||
|
||||
- name: Run mypy
|
||||
run: poetry run mypy
|
||||
@@ -150,7 +149,7 @@ jobs:
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.60.0
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
@@ -167,7 +166,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.60.0
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
with:
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -268,7 +267,7 @@ jobs:
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.60.0
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -308,39 +307,47 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.60.0
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
# their build dependencies
|
||||
- run: |
|
||||
sudo apt-get -qq update
|
||||
sudo apt-get -qq install build-essential libffi-dev python-dev \
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
python-version: '3.7'
|
||||
|
||||
# Calculating the old-deps actually takes a bunch of time, so we cache the
|
||||
# pyproject.toml / poetry.lock. We need to cache pyproject.toml as
|
||||
# otherwise the `poetry install` step will error due to the poetry.lock
|
||||
# file being outdated.
|
||||
#
|
||||
# This caches the output of `Prepare old deps`, which should generate the
|
||||
# same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
|
||||
- uses: actions/cache@v3
|
||||
id: cache-poetry-old-deps
|
||||
name: Cache poetry.lock
|
||||
with:
|
||||
path: |
|
||||
poetry.lock
|
||||
pyproject.toml
|
||||
key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
run: .ci/scripts/prepare_old_deps.sh
|
||||
|
||||
# Note: we install using `pip` here, not poetry. `poetry install` ignores the
|
||||
# build-system section (https://github.com/python-poetry/poetry/issues/6154), but
|
||||
# we explicitly want to test that you can `pip install` using the oldest version
|
||||
# of poetry-core and setuptools-rust.
|
||||
- run: pip install .[all,test]
|
||||
# We only now install poetry so that `setup-python-poetry` caches the
|
||||
# right poetry.lock's dependencies.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
poetry-version: "1.3.2"
|
||||
extras: "all test"
|
||||
|
||||
# We nuke the local copy, as we've installed synapse into the virtualenv
|
||||
# (rather than use an editable install, which we no longer support). If we
|
||||
# don't do this then python can't find the native lib.
|
||||
- run: rm -rf synapse/
|
||||
|
||||
# Sanity check we can import/run Synapse
|
||||
- run: python -m synapse.app.homeserver --help
|
||||
|
||||
- run: python -m twisted.trial -j6 tests
|
||||
- run: poetry run trial -j6 tests
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
@@ -362,7 +369,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.8"]
|
||||
python-version: ["pypy-3.7"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
@@ -399,8 +406,8 @@ jobs:
|
||||
env:
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.job.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }}
|
||||
ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') || '' }}
|
||||
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
|
||||
ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') && 1 }}
|
||||
WORKERS: ${{ matrix.job.workers && 1 }}
|
||||
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
|
||||
TOP: ${{ github.workspace }}
|
||||
@@ -416,7 +423,7 @@ jobs:
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.60.0
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run SyTest
|
||||
@@ -477,7 +484,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.8"
|
||||
- python-version: "3.7"
|
||||
postgres-version: "11"
|
||||
|
||||
- python-version: "3.11"
|
||||
@@ -556,7 +563,7 @@ jobs:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.60.0
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
@@ -584,7 +591,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@1.60.0
|
||||
uses: dtolnay/rust-toolchain@1.58.1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo test
|
||||
|
||||
30
.github/workflows/twisted_trunk.yml
vendored
30
.github/workflows/twisted_trunk.yml
vendored
@@ -18,22 +18,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check_repo:
|
||||
# Prevent this workflow from running on any fork of Synapse other than matrix-org/synapse, as it is
|
||||
# only useful to the Synapse core team.
|
||||
# All other workflow steps depend on this one, thus if 'should_run_workflow' is not 'true', the rest
|
||||
# of the workflow will be skipped as well.
|
||||
if: github.repository == 'matrix-org/synapse'
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_run_workflow: ${{ steps.check_condition.outputs.should_run_workflow }}
|
||||
steps:
|
||||
- id: check_condition
|
||||
run: echo "should_run_workflow=${{ github.repository == 'matrix-org/synapse' }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
mypy:
|
||||
needs: check_repo
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -56,8 +41,6 @@ jobs:
|
||||
- run: poetry run mypy
|
||||
|
||||
trial:
|
||||
needs: check_repo
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -92,15 +75,9 @@ jobs:
|
||||
|| true
|
||||
|
||||
sytest:
|
||||
needs: check_repo
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
|
||||
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
|
||||
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
|
||||
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
|
||||
image: matrixdotorg/sytest-synapse:focal
|
||||
image: matrixdotorg/sytest-synapse:buster
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
@@ -142,8 +119,7 @@ jobs:
|
||||
/logs/**/*.log*
|
||||
|
||||
complement:
|
||||
needs: check_repo
|
||||
if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'"
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
@@ -190,7 +166,7 @@ jobs:
|
||||
|
||||
# open an issue if the build fails, so we know about it.
|
||||
open-issue:
|
||||
if: failure() && needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
if: failure()
|
||||
needs:
|
||||
- mypy
|
||||
- trial
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -15,7 +15,7 @@ _trial_temp*/
|
||||
.DS_Store
|
||||
__pycache__/
|
||||
|
||||
# We do want poetry, cargo and flake lockfiles.
|
||||
# We do want the poetry and cargo lockfile.
|
||||
!poetry.lock
|
||||
!Cargo.lock
|
||||
!flake.lock
|
||||
@@ -34,15 +34,11 @@ __pycache__/
|
||||
/logs
|
||||
/media_store/
|
||||
/uploads
|
||||
/homeserver-config-overrides.d
|
||||
|
||||
# For direnv users
|
||||
/.envrc
|
||||
.direnv/
|
||||
|
||||
# For nix/devenv users
|
||||
.devenv/
|
||||
|
||||
# IDEs
|
||||
/.idea/
|
||||
/.ropeproject/
|
||||
|
||||
3360
CHANGES.md
3360
CHANGES.md
File diff suppressed because it is too large
Load Diff
65
Cargo.lock
generated
65
Cargo.lock
generated
@@ -4,18 +4,18 @@ version = 3
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "1.0.2"
|
||||
version = "0.7.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
|
||||
checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.72"
|
||||
version = "1.0.70"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854"
|
||||
checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -132,9 +132,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.19"
|
||||
version = "0.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
|
||||
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -182,9 +185,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.64"
|
||||
version = "1.0.52"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
|
||||
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -229,9 +232,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.8.3"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f47b0777feb17f61eea78667d61103758b243a871edc09a7786500a50467b605"
|
||||
checksum = "f9c8b57fe71fb5dcf38970ebedc2b1531cf1c14b1b9b4c560a182a57e115575c"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
@@ -273,9 +276,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.29"
|
||||
version = "1.0.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
|
||||
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -291,21 +294,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.9.1"
|
||||
version = "1.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf"
|
||||
checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -314,9 +305,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.7.3"
|
||||
version = "0.6.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846"
|
||||
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
@@ -332,29 +323,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.171"
|
||||
version = "1.0.160"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9"
|
||||
checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.171"
|
||||
version = "1.0.160"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682"
|
||||
checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.25",
|
||||
"syn 2.0.10",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.103"
|
||||
version = "1.0.96"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b"
|
||||
checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -386,9 +377,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.25"
|
||||
version = "2.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2"
|
||||
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
@@ -3,4 +3,3 @@
|
||||
|
||||
[workspace]
|
||||
members = ["rust"]
|
||||
resolver = "2"
|
||||
|
||||
12
book.toml
12
book.toml
@@ -34,14 +34,6 @@ additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
"docs/website_files/version-picker.css",
|
||||
]
|
||||
additional-js = [
|
||||
"docs/website_files/table-of-contents.js",
|
||||
"docs/website_files/version-picker.js",
|
||||
"docs/website_files/version.js",
|
||||
]
|
||||
theme = "docs/website_files/theme"
|
||||
|
||||
[preprocessor.schema_versions]
|
||||
command = "./scripts-dev/schema_versions.py"
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
1
changelog.d/15284.misc
Normal file
1
changelog.d/15284.misc
Normal file
@@ -0,0 +1 @@
|
||||
Speedup tests by caching HomeServerConfig instances.
|
||||
1
changelog.d/15417.bugfix
Normal file
1
changelog.d/15417.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug where cached key results which were directly fetched would not be properly re-used.
|
||||
1
changelog.d/15418.misc
Normal file
1
changelog.d/15418.misc
Normal file
@@ -0,0 +1 @@
|
||||
Always use multi-user device resync replication endpoints.
|
||||
1
changelog.d/15492.misc
Normal file
1
changelog.d/15492.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a Nix flake for use as a development environment.
|
||||
@@ -70,10 +70,6 @@ redis:
|
||||
port: 6379
|
||||
# dbid: <redis_logical_db_id>
|
||||
# password: <secret_password>
|
||||
# use_tls: True
|
||||
# certificate_file: <path_to_certificate>
|
||||
# private_key_file: <path_to_private_key>
|
||||
# ca_file: <path_to_ca_certificate>
|
||||
```
|
||||
|
||||
This assumes that your Redis service is called `redis` in your Docker Compose file.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -29,7 +29,7 @@
|
||||
"level": "error"
|
||||
},
|
||||
{
|
||||
"line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix-federation://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
|
||||
"line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
|
||||
"level": "warning"
|
||||
},
|
||||
{
|
||||
|
||||
120
debian/changelog
vendored
120
debian/changelog
vendored
@@ -1,123 +1,3 @@
|
||||
matrix-synapse-py3 (1.89.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.89.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Aug 2023 11:07:15 +0100
|
||||
|
||||
matrix-synapse-py3 (1.89.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.89.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Jul 2023 14:31:07 +0200
|
||||
|
||||
matrix-synapse-py3 (1.88.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.88.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jul 2023 13:59:28 +0100
|
||||
|
||||
matrix-synapse-py3 (1.88.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.88.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jul 2023 10:20:19 +0100
|
||||
|
||||
matrix-synapse-py3 (1.87.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.87.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Jul 2023 16:24:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.87.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.87.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Jun 2023 15:27:04 +0000
|
||||
|
||||
matrix-synapse-py3 (1.86.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.86.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Jun 2023 17:22:46 +0200
|
||||
|
||||
matrix-synapse-py3 (1.86.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.86.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 14 Jun 2023 12:16:27 +0200
|
||||
|
||||
matrix-synapse-py3 (1.86.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.86.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Jun 2023 14:30:45 +0200
|
||||
|
||||
matrix-synapse-py3 (1.85.2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.85.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 08 Jun 2023 13:04:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.85.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.85.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 07 Jun 2023 10:51:12 +0100
|
||||
|
||||
matrix-synapse-py3 (1.85.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.85.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Jun 2023 09:39:29 +0100
|
||||
|
||||
matrix-synapse-py3 (1.85.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.85.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 01 Jun 2023 09:16:18 -0700
|
||||
|
||||
matrix-synapse-py3 (1.85.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.85.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 30 May 2023 13:56:54 +0100
|
||||
|
||||
matrix-synapse-py3 (1.84.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.84.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 26 May 2023 16:15:30 +0100
|
||||
|
||||
matrix-synapse-py3 (1.84.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.84.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 23 May 2023 10:57:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.84.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.84.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 16 May 2023 11:12:02 +0100
|
||||
|
||||
matrix-synapse-py3 (1.83.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.83.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 09 May 2023 18:13:37 +0200
|
||||
|
||||
matrix-synapse-py3 (1.83.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.83.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 May 2023 15:56:38 +0100
|
||||
|
||||
matrix-synapse-py3 (1.82.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.82.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Apr 2023 11:56:06 +0100
|
||||
|
||||
matrix-synapse-py3 (1.82.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.82.0rc1.
|
||||
|
||||
@@ -46,7 +46,7 @@ for port in 8080 8081 8082; do
|
||||
echo ''
|
||||
|
||||
# Warning, this heredoc depends on the interaction of tabs and spaces.
|
||||
# Please don't accidentally bork me with your fancy settings.
|
||||
# Please don't accidentaly bork me with your fancy settings.
|
||||
listeners=$(cat <<-PORTLISTENERS
|
||||
# Configure server to listen on both $https_port and $port
|
||||
# This overides some of the default settings above
|
||||
@@ -80,8 +80,12 @@ for port in 8080 8081 8082; do
|
||||
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
|
||||
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
|
||||
|
||||
# Request keys directly from servers contacted over federation
|
||||
echo 'trusted_key_servers: []'
|
||||
# Ignore keys from the trusted keys server
|
||||
echo '# Ignore keys from the trusted keys server'
|
||||
echo 'trusted_key_servers:'
|
||||
echo ' - server_name: "matrix.org"'
|
||||
echo ' accept_keys_insecurely: true'
|
||||
echo ''
|
||||
|
||||
# Allow the servers to communicate over localhost.
|
||||
allow_list=$(cat <<-ALLOW_LIST
|
||||
|
||||
@@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11
|
||||
###
|
||||
# We hardcode the use of Debian bullseye here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bullseye.
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as requirements
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
|
||||
|
||||
# RUN --mount is specific to buildkit and is documented at
|
||||
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
|
||||
@@ -37,7 +37,7 @@ RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential curl git libffi-dev libssl-dev pkg-config \
|
||||
build-essential curl git libffi-dev libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install rust and ensure its in the PATH.
|
||||
@@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
###
|
||||
### Stage 1: builder
|
||||
###
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as builder
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
|
||||
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
@@ -158,7 +158,7 @@ RUN --mount=type=cache,target=/synapse/target,sharing=locked \
|
||||
### Stage 2: runtime
|
||||
###
|
||||
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
|
||||
|
||||
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
|
||||
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
|
||||
|
||||
@@ -24,16 +24,16 @@ ARG distro=""
|
||||
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
|
||||
# it's not obviously easier to use that than to build our own.)
|
||||
|
||||
FROM docker.io/library/${distro} as builder
|
||||
FROM ${distro} as builder
|
||||
|
||||
RUN apt-get update -qq -o Acquire::Languages=none
|
||||
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
|
||||
# fetch and unpack the package
|
||||
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
|
||||
@@ -55,36 +55,40 @@ RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b
|
||||
###
|
||||
### Stage 1
|
||||
###
|
||||
FROM docker.io/library/${distro}
|
||||
FROM ${distro}
|
||||
|
||||
# Get the distro we want to pull from as a dynamic build variable
|
||||
# (We need to define it in each build stage)
|
||||
ARG distro=""
|
||||
ENV distro ${distro}
|
||||
|
||||
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
|
||||
# http://bugs.python.org/issue19846
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
# Install the build dependencies
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
# TODO: it would be nice to do that automatically.
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
|
||||
@@ -7,7 +7,7 @@ ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
# target image. For repeated rebuilds, this is much faster than apt installing
|
||||
# each time.
|
||||
|
||||
FROM docker.io/library/debian:bullseye-slim AS deps_base
|
||||
FROM debian:bullseye-slim AS deps_base
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
@@ -21,7 +21,7 @@ FROM docker.io/library/debian:bullseye-slim AS deps_base
|
||||
# which makes it much easier to copy (but we need to make sure we use an image
|
||||
# based on the same debian version as the synapse image, to make sure we get
|
||||
# the expected version of libc.
|
||||
FROM docker.io/library/redis:7-bullseye AS redis_base
|
||||
FROM redis:6-bullseye AS redis_base
|
||||
|
||||
# now build the final image, based on the the regular Synapse docker image
|
||||
FROM $FROM
|
||||
|
||||
@@ -73,8 +73,7 @@ The following environment variables are supported in `generate` mode:
|
||||
will log sensitive information such as access tokens.
|
||||
This should not be needed unless you are a developer attempting to debug something
|
||||
particularly tricky.
|
||||
* `SYNAPSE_LOG_TESTING`: if set, Synapse will log additional information useful
|
||||
for testing.
|
||||
|
||||
|
||||
## Postgres
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
# This is an intermediate image, to be built locally (not pulled from a registry).
|
||||
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
|
||||
FROM $FROM
|
||||
@@ -20,8 +19,8 @@ FROM $FROM
|
||||
# the same debian version as Synapse's docker image (so the versions of the
|
||||
# shared libraries match).
|
||||
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=docker.io/library/postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=docker.io/library/postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
|
||||
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
|
||||
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
||||
@@ -92,6 +92,8 @@ allow_device_name_lookup_over_federation: true
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
# Enable history backfilling support
|
||||
msc2716_enabled: true
|
||||
# client-side support for partial state in /send_join responses
|
||||
faster_joins: true
|
||||
# Enable support for polls
|
||||
|
||||
@@ -35,11 +35,7 @@ server {
|
||||
|
||||
# Send all other traffic to the main process
|
||||
location ~* ^(\\/_matrix|\\/_synapse) {
|
||||
{% if using_unix_sockets %}
|
||||
proxy_pass http://unix:/run/main_public.sock;
|
||||
{% else %}
|
||||
proxy_pass http://localhost:8080;
|
||||
{% endif %}
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
@@ -6,9 +6,6 @@
|
||||
{% if enable_redis %}
|
||||
redis:
|
||||
enabled: true
|
||||
{% if using_unix_sockets %}
|
||||
path: /tmp/redis.sock
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if appservice_registrations is not none %}
|
||||
|
||||
@@ -19,11 +19,7 @@ username=www-data
|
||||
autorestart=true
|
||||
|
||||
[program:redis]
|
||||
{% if using_unix_sockets %}
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock
|
||||
{% else %}
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
|
||||
{% endif %}
|
||||
priority=1
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
|
||||
@@ -6,13 +6,13 @@
|
||||
worker_app: "{{ app }}"
|
||||
worker_name: "{{ name }}"
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
{% if using_unix_sockets %}
|
||||
path: "/run/worker.{{ port }}"
|
||||
{% else %}
|
||||
port: {{ port }}
|
||||
{% endif %}
|
||||
{% if listener_resources %}
|
||||
resources:
|
||||
- names:
|
||||
|
||||
@@ -36,17 +36,12 @@ listeners:
|
||||
|
||||
# Allow configuring in case we want to reverse proxy 8008
|
||||
# using another process in the same container
|
||||
{% if SYNAPSE_USE_UNIX_SOCKET %}
|
||||
# Unix sockets don't care about TLS or IP addresses or ports
|
||||
- path: '/run/main_public.sock'
|
||||
type: http
|
||||
{% else %}
|
||||
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
|
||||
tls: false
|
||||
bind_addresses: ['::']
|
||||
type: http
|
||||
x_forwarded: false
|
||||
{% endif %}
|
||||
|
||||
resources:
|
||||
- names: [client]
|
||||
compress: true
|
||||
@@ -62,11 +57,8 @@ database:
|
||||
user: "{{ POSTGRES_USER or "synapse" }}"
|
||||
password: "{{ POSTGRES_PASSWORD }}"
|
||||
database: "{{ POSTGRES_DB or "synapse" }}"
|
||||
{% if not SYNAPSE_USE_UNIX_SOCKET %}
|
||||
{# Synapse will use a default unix socket for Postgres when host/port is not specified (behavior from `psycopg2`). #}
|
||||
host: "{{ POSTGRES_HOST or "db" }}"
|
||||
port: "{{ POSTGRES_PORT or "5432" }}"
|
||||
{% endif %}
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
{% else %}
|
||||
|
||||
@@ -49,35 +49,17 @@ handlers:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
|
||||
{% if not SYNAPSE_LOG_SENSITIVE %}
|
||||
{#
|
||||
If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
|
||||
so that DEBUG entries (containing sensitive information) are not emitted.
|
||||
#}
|
||||
loggers:
|
||||
# This is just here so we can leave `loggers` in the config regardless of whether
|
||||
# we configure other loggers below (avoid empty yaml dict error).
|
||||
_placeholder:
|
||||
level: "INFO"
|
||||
|
||||
{% if not SYNAPSE_LOG_SENSITIVE %}
|
||||
{#
|
||||
If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
|
||||
so that DEBUG entries (containing sensitive information) are not emitted.
|
||||
#}
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
{% endif %}
|
||||
|
||||
{% if SYNAPSE_LOG_TESTING %}
|
||||
{#
|
||||
If Synapse is under test, log a few more useful things for a developer
|
||||
attempting to debug something particularly tricky.
|
||||
|
||||
With `synapse.visibility.filtered_event_debug`, it logs when events are (maybe
|
||||
unexpectedly) filtered out of responses in tests. It's just nice to be able to
|
||||
look at the CI log and figure out why an event isn't being returned.
|
||||
#}
|
||||
synapse.visibility.filtered_event_debug:
|
||||
level: DEBUG
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
root:
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
|
||||
|
||||
@@ -40,8 +40,6 @@
|
||||
# log level. INFO is the default.
|
||||
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
|
||||
# regardless of the SYNAPSE_LOG_LEVEL setting.
|
||||
# * SYNAPSE_LOG_TESTING: if set, Synapse will log additional information useful
|
||||
# for testing.
|
||||
#
|
||||
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
|
||||
# in the project's README), this script may be run multiple times, and functionality should
|
||||
@@ -71,12 +69,6 @@ import yaml
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
MAIN_PROCESS_INSTANCE_NAME = "main"
|
||||
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
|
||||
MAIN_PROCESS_REPLICATION_PORT = 9093
|
||||
# Obviously, these would only be used with the UNIX socket option
|
||||
MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
|
||||
MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
|
||||
|
||||
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
|
||||
# during processing with the name of the worker.
|
||||
@@ -247,6 +239,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/knock/",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
|
||||
"^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
@@ -410,15 +403,11 @@ def add_worker_roles_to_shared_config(
|
||||
)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
||||
instance_map[worker_name] = {
|
||||
"path": f"/run/worker.{worker_port}",
|
||||
}
|
||||
else:
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
# Update the list of stream writers. It's convenient that the name of the worker
|
||||
# type is the same as the stream to write. Iterate over the whole list in case there
|
||||
# is more than one.
|
||||
@@ -430,15 +419,10 @@ def add_worker_roles_to_shared_config(
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
||||
instance_map[worker_name] = {
|
||||
"path": f"/run/worker.{worker_port}",
|
||||
}
|
||||
else:
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def merge_worker_template_configs(
|
||||
@@ -730,29 +714,17 @@ def generate_worker_files(
|
||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||
# into files at the correct indentation below.
|
||||
|
||||
# Convenience helper for if using unix sockets instead of host:port
|
||||
using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
|
||||
# First read the original config file and extract the listeners block. Then we'll
|
||||
# add another listener for replication. Later we'll write out the result to the
|
||||
# shared config file.
|
||||
listeners: List[Any]
|
||||
if using_unix_sockets:
|
||||
listeners = [
|
||||
{
|
||||
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
else:
|
||||
listeners = [
|
||||
{
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
listeners = [
|
||||
{
|
||||
"port": 9093,
|
||||
"bind_address": "127.0.0.1",
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
with open(config_path) as file_stream:
|
||||
original_config = yaml.safe_load(file_stream)
|
||||
original_listeners = original_config.get("listeners")
|
||||
@@ -793,17 +765,7 @@ def generate_worker_files(
|
||||
|
||||
# A list of internal endpoints to healthcheck, starting with the main process
|
||||
# which exists even if no workers do.
|
||||
# This list ends up being part of the command line to curl, (curl added support for
|
||||
# Unix sockets in version 7.40).
|
||||
if using_unix_sockets:
|
||||
healthcheck_urls = [
|
||||
f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
|
||||
# The scheme and hostname from the following URL are ignored.
|
||||
# The only thing that matters is the path `/health`
|
||||
"http://localhost/health"
|
||||
]
|
||||
else:
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
# Get the set of all worker types that we have configured
|
||||
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
||||
@@ -840,12 +802,8 @@ def generate_worker_files(
|
||||
# given worker_type needs to stay assigned and not be replaced.
|
||||
worker_config["shared_extra_conf"].update(shared_config)
|
||||
shared_config = worker_config["shared_extra_conf"]
|
||||
if using_unix_sockets:
|
||||
healthcheck_urls.append(
|
||||
f"--unix-socket /run/worker.{worker_port} http://localhost/health"
|
||||
)
|
||||
else:
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
@@ -864,7 +822,6 @@ def generate_worker_files(
|
||||
"/conf/workers/{name}.yaml".format(name=worker_name),
|
||||
**worker_config,
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Save this worker's port number to the correct nginx upstreams
|
||||
@@ -885,13 +842,8 @@ def generate_worker_files(
|
||||
nginx_upstream_config = ""
|
||||
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
if using_unix_sockets:
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server unix:/run/worker.{port};\n"
|
||||
|
||||
else:
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
@@ -918,19 +870,6 @@ def generate_worker_files(
|
||||
|
||||
workers_in_use = len(requested_worker_types) > 0
|
||||
|
||||
# If there are workers, add the main process to the instance_map too.
|
||||
if workers_in_use:
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
if using_unix_sockets:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
||||
}
|
||||
else:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
}
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
"/conf/shared.yaml.j2",
|
||||
@@ -939,7 +878,6 @@ def generate_worker_files(
|
||||
appservice_registrations=appservice_registrations,
|
||||
enable_redis=workers_in_use,
|
||||
workers_in_use=workers_in_use,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Nginx config
|
||||
@@ -950,7 +888,6 @@ def generate_worker_files(
|
||||
upstream_directives=nginx_upstream_config,
|
||||
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
|
||||
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Supervisord config
|
||||
@@ -960,7 +897,6 @@ def generate_worker_files(
|
||||
"/etc/supervisor/supervisord.conf",
|
||||
main_config_path=config_path,
|
||||
enable_redis=workers_in_use,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
convert(
|
||||
@@ -1000,7 +936,6 @@ def generate_worker_log_config(
|
||||
extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
|
||||
"SYNAPSE_LOG_SENSITIVE"
|
||||
)
|
||||
extra_log_template_args["SYNAPSE_LOG_TESTING"] = environ.get("SYNAPSE_LOG_TESTING")
|
||||
|
||||
# Render and write the file
|
||||
log_config_filepath = f"/conf/workers/{worker_name}.log.config"
|
||||
|
||||
@@ -10,7 +10,7 @@ ARG PYTHON_VERSION=3.9
|
||||
###
|
||||
# We hardcode the use of Debian bullseye here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bullseye.
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye
|
||||
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
|
||||
|
||||
# Install Rust and other dependencies (stolen from normal Dockerfile)
|
||||
# install the OS build deps
|
||||
|
||||
@@ -57,7 +57,6 @@
|
||||
- [Account Validity](admin_api/account_validity.md)
|
||||
- [Background Updates](usage/administration/admin_api/background_updates.md)
|
||||
- [Event Reports](admin_api/event_reports.md)
|
||||
- [Experimental Features](admin_api/experimental_features.md)
|
||||
- [Media](admin_api/media_admin_api.md)
|
||||
- [Purge History](admin_api/purge_history_api.md)
|
||||
- [Register Users](admin_api/register_api.md)
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
# Experimental Features API
|
||||
|
||||
This API allows a server administrator to enable or disable some experimental features on a per-user
|
||||
basis. The currently supported features are:
|
||||
- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
|
||||
presence state enabled
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
|
||||
UIA when first uploading cross-signing keys.
|
||||
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
||||
## Enabling/Disabling Features
|
||||
|
||||
This API allows a server administrator to enable experimental features for a given user. The request must
|
||||
provide a body containing the user id and listing the features to enable/disable in the following format:
|
||||
```json
|
||||
{
|
||||
"features": {
|
||||
"msc3026":true,
|
||||
"msc3881":true
|
||||
}
|
||||
}
|
||||
```
|
||||
where true is used to enable the feature, and false is used to disable the feature.
|
||||
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
PUT /_synapse/admin/v1/experimental_features/<user_id>
|
||||
```
|
||||
|
||||
## Listing Enabled Features
|
||||
|
||||
To list which features are enabled/disabled for a given user send a request to the following API:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/experimental_features/<user_id>
|
||||
```
|
||||
|
||||
It will return a list of possible features and indicate whether they are enabled or disabled for the
|
||||
user like so:
|
||||
```json
|
||||
{
|
||||
"features": {
|
||||
"msc3026": true,
|
||||
"msc3881": false,
|
||||
"msc3967": false
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -419,7 +419,7 @@ The following query parameters are available:
|
||||
|
||||
* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
|
||||
or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
|
||||
* `to` - The token to stop returning events at.
|
||||
* `to` - The token to spot returning events at.
|
||||
* `limit` - The maximum number of events to return. Defaults to `10`.
|
||||
* `filter` - A JSON RoomEventFilter to filter returned events with.
|
||||
* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting
|
||||
|
||||
@@ -81,52 +81,3 @@ The following fields are returned in the JSON response body:
|
||||
- `user_id` - string - Fully-qualified user ID (ex. `@user:server.com`).
|
||||
* `next_token` - integer - Opaque value used for pagination. See above.
|
||||
* `total` - integer - Total number of users after filtering.
|
||||
|
||||
|
||||
# Get largest rooms by size in database
|
||||
|
||||
Returns the 10 largest rooms and an estimate of how much space in the database
|
||||
they are taking.
|
||||
|
||||
This does not include the size of any associated media associated with the room.
|
||||
|
||||
Returns an error on SQLite.
|
||||
|
||||
*Note:* This uses the planner statistics from PostgreSQL to do the estimates,
|
||||
which means that the returned information can vary widely from reality. However,
|
||||
it should be enough to get a rough idea of where database disk space is going.
|
||||
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
GET /_synapse/admin/v1/statistics/database/rooms
|
||||
```
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"rooms": [
|
||||
{
|
||||
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
|
||||
"estimated_size": 47325417353
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
* `rooms` - An array of objects, sorted by largest room first. Objects contain
|
||||
the following fields:
|
||||
- `room_id` - string - The room ID.
|
||||
- `estimated_size` - integer - Estimated disk space used in bytes by the room
|
||||
in the database.
|
||||
|
||||
|
||||
*Added in Synapse 1.83.0*
|
||||
|
||||
@@ -62,7 +62,7 @@ URL parameters:
|
||||
|
||||
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
|
||||
|
||||
## Create or modify account
|
||||
## Create or modify Account
|
||||
|
||||
This API allows an administrator to create or modify a user account with a
|
||||
specific `user_id`.
|
||||
@@ -78,29 +78,28 @@ with a body of:
|
||||
```json
|
||||
{
|
||||
"password": "user_password",
|
||||
"logout_devices": false,
|
||||
"displayname": "Alice Marigold",
|
||||
"avatar_url": "mxc://example.com/abcde12345",
|
||||
"displayname": "User",
|
||||
"threepids": [
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "alice@example.com"
|
||||
"address": "<user_mail_1>"
|
||||
},
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "alice@domain.org"
|
||||
"address": "<user_mail_2>"
|
||||
}
|
||||
],
|
||||
"external_ids": [
|
||||
{
|
||||
"auth_provider": "example",
|
||||
"external_id": "12345"
|
||||
"auth_provider": "<provider1>",
|
||||
"external_id": "<user_id_provider_1>"
|
||||
},
|
||||
{
|
||||
"auth_provider": "example2",
|
||||
"external_id": "abc54321"
|
||||
"auth_provider": "<provider2>",
|
||||
"external_id": "<user_id_provider_2>"
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": false,
|
||||
"deactivated": false,
|
||||
"user_type": null
|
||||
@@ -113,51 +112,41 @@ Returns HTTP status code:
|
||||
|
||||
URL parameters:
|
||||
|
||||
- `user_id` - A fully-qualified user id. For example, `@user:server.com`.
|
||||
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
|
||||
|
||||
Body parameters:
|
||||
|
||||
- `password` - **string**, optional. If provided, the user's password is updated and all
|
||||
- `password` - string, optional. If provided, the user's password is updated and all
|
||||
devices are logged out, unless `logout_devices` is set to `false`.
|
||||
- `logout_devices` - **bool**, optional, defaults to `true`. If set to `false`, devices aren't
|
||||
- `logout_devices` - bool, optional, defaults to `true`. If set to false, devices aren't
|
||||
logged out even when `password` is provided.
|
||||
- `displayname` - **string**, optional. If set to an empty string (`""`), the user's display name
|
||||
will be removed.
|
||||
- `avatar_url` - **string**, optional. Must be a
|
||||
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
|
||||
If set to an empty string (`""`), the user's avatar is removed.
|
||||
- `threepids` - **array**, optional. If provided, the user's third-party IDs (email, msisdn) are
|
||||
entirely replaced with the given list. Each item in the array is an object with the following
|
||||
fields:
|
||||
- `medium` - **string**, required. The type of third-party ID, either `email` or `msisdn` (phone number).
|
||||
- `address` - **string**, required. The third-party ID itself, e.g. `alice@example.com` for `email` or
|
||||
`447470274584` (for a phone number with country code "44") and `19254857364` (for a phone number
|
||||
with country code "1") for `msisdn`.
|
||||
Note: If a threepid is removed from a user via this option, Synapse will also attempt to remove
|
||||
that threepid from any identity servers it is aware has a binding for it.
|
||||
- `external_ids` - **array**, optional. Allow setting the identifier of the external identity
|
||||
provider for SSO (Single sign-on). More details are in the configuration manual under the
|
||||
- `displayname` - string, optional, defaults to the value of `user_id`.
|
||||
- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn)
|
||||
- `medium` - string. Kind of third-party ID, either `email` or `msisdn`.
|
||||
- `address` - string. Value of third-party ID.
|
||||
belonging to a user.
|
||||
- `external_ids` - array, optional. Allow setting the identifier of the external identity
|
||||
provider for SSO (Single sign-on). Details in the configuration manual under the
|
||||
sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
|
||||
- `auth_provider` - **string**, required. The unique, internal ID of the external identity provider.
|
||||
The same as `idp_id` from the homeserver configuration. Note that no error is raised if the
|
||||
provided value is not in the homeserver configuration.
|
||||
- `external_id` - **string**, required. An identifier for the user in the external identity provider.
|
||||
When the user logs in to the identity provider, this must be the unique ID that they map to.
|
||||
- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator,
|
||||
granting them access to the Admin API, among other things.
|
||||
- `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged.
|
||||
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
|
||||
in the homeserver configuration. Note that no error is raised if the provided
|
||||
value is not in the homeserver configuration.
|
||||
- `external_id` - string, user ID in the external identity provider.
|
||||
- `avatar_url` - string, optional, must be a
|
||||
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
|
||||
- `admin` - bool, optional, defaults to `false`.
|
||||
- `deactivated` - bool, optional. If unspecified, deactivation state will be left
|
||||
unchanged on existing accounts and set to `false` for new accounts.
|
||||
A user cannot be erased by deactivating with this API. For details on
|
||||
deactivating users see [Deactivate Account](#deactivate-account).
|
||||
- `user_type` - string or null, optional. If provided, the user type will be
|
||||
adjusted. If `null` given, the user type will be cleared. Other
|
||||
allowed options are: `bot` and `support`.
|
||||
|
||||
Note: the `password` field must also be set if both of the following are true:
|
||||
- `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user)
|
||||
- Users are allowed to set their password on this homeserver (both `password_config.enabled` and
|
||||
`password_config.localdb_enabled` config options are set to `true`).
|
||||
Users' passwords are wiped upon account deactivation, hence the need to set a new one here.
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
|
||||
Note: a user cannot be erased with this API. For more details on
|
||||
deactivating and erasing users see [Deactivate Account](#deactivate-account).
|
||||
- `user_type` - **string** or null, optional. If not provided, the user type will be
|
||||
not be changed. If `null` is given, the user type will be cleared.
|
||||
Other allowed options are: `bot` and `support`.
|
||||
In order to re-activate an account `deactivated` must be set to `false`. If
|
||||
users do not login via single-sign-on, a new `password` must be provided.
|
||||
|
||||
## List Accounts
|
||||
|
||||
@@ -242,9 +231,6 @@ The following parameters should be set in the URL:
|
||||
|
||||
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
|
||||
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
- `not_user_type` - Exclude certain user types, such as bot users, from the request.
|
||||
Can be provided multiple times. Possible values are `bot`, `support` or "empty string".
|
||||
"empty string" here means to exclude users without a type.
|
||||
|
||||
Caution. The database only has indexes on the columns `name` and `creation_ts`.
|
||||
This means that if a different sort order is used (`is_guest`, `admin`,
|
||||
@@ -732,8 +718,7 @@ POST /_synapse/admin/v1/users/<user_id>/login
|
||||
|
||||
An optional `valid_until_ms` field can be specified in the request body as an
|
||||
integer timestamp that specifies when the token should expire. By default tokens
|
||||
do not expire. Note that this API does not allow a user to login as themselves
|
||||
(to create more tokens).
|
||||
do not expire.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
@@ -817,33 +802,6 @@ The following fields are returned in the JSON response body:
|
||||
|
||||
- `total` - Total number of user's devices.
|
||||
|
||||
### Create a device
|
||||
|
||||
Creates a new device for a specific `user_id` and `device_id`. Does nothing if the `device_id`
|
||||
exists already.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v2/users/<user_id>/devices
|
||||
|
||||
{
|
||||
"device_id": "QBUAZIFURK"
|
||||
}
|
||||
```
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- `user_id` - fully qualified: for example, `@user:server.com`.
|
||||
|
||||
The following fields are required in the JSON request body:
|
||||
|
||||
- `device_id` - The device ID to create.
|
||||
|
||||
### Delete multiple devices
|
||||
Deletes the given devices for a specific `user_id`, and invalidates
|
||||
any access token associated with them.
|
||||
@@ -1184,7 +1142,7 @@ The following parameters should be set in the URL:
|
||||
- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
|
||||
be local.
|
||||
|
||||
## Check username availability
|
||||
### Check username availability
|
||||
|
||||
Checks to see if a username is available, and valid, for the server. See [the client-server
|
||||
API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available)
|
||||
@@ -1202,7 +1160,7 @@ GET /_synapse/admin/v1/username_available?username=$localpart
|
||||
The request and response format is the same as the
|
||||
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
|
||||
|
||||
## Find a user based on their ID in an auth provider
|
||||
### Find a user based on their ID in an auth provider
|
||||
|
||||
The API is:
|
||||
|
||||
@@ -1241,7 +1199,7 @@ Returns a `404` HTTP status code if no user was found, with a response body like
|
||||
_Added in Synapse 1.68.0._
|
||||
|
||||
|
||||
## Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
### Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
|
||||
The API is:
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,7 @@ people building from source should ensure they can fetch recent versions of Rust
|
||||
(e.g. by using [rustup](https://rustup.rs/)).
|
||||
|
||||
The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/bullseye/libsqlite3-0) by
|
||||
[provided](https://packages.debian.org/buster/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
Context
|
||||
|
||||
@@ -22,9 +22,6 @@ on Windows is not officially supported.
|
||||
|
||||
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://www.python.org/downloads/). Your Python also needs support for [virtual environments](https://docs.python.org/3/library/venv.html). This is usually built-in, but some Linux distributions like Debian and Ubuntu split it out into its own package. Running `sudo apt install python3-venv` should be enough.
|
||||
|
||||
A recent version of the Rust compiler is needed to build the native modules. The
|
||||
easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`.
|
||||
|
||||
Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`.
|
||||
@@ -33,6 +30,9 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
A recent version of the Rust compiler is needed to build the native modules. The
|
||||
easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
|
||||
# 3. Get the source.
|
||||
|
||||
@@ -53,11 +53,6 @@ can find many good git tutorials on the web.
|
||||
|
||||
# 4. Install the dependencies
|
||||
|
||||
|
||||
Before installing the Python dependencies, make sure you have installed a recent version
|
||||
of Rust (see the "What do I need?" section above). The easiest way of installing the
|
||||
latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies
|
||||
and development environment. Once you have installed Python 3 and added the
|
||||
source, you should install `poetry`.
|
||||
@@ -81,8 +76,7 @@ cd path/where/you/have/cloned/the/repository
|
||||
poetry install --extras all
|
||||
```
|
||||
|
||||
This will install the runtime and developer dependencies for the project. Be sure to check
|
||||
that the `poetry install` step completed cleanly.
|
||||
This will install the runtime and developer dependencies for the project.
|
||||
|
||||
## Running Synapse via poetry
|
||||
|
||||
@@ -90,31 +84,14 @@ To start a local instance of Synapse in the locked poetry environment, create a
|
||||
|
||||
```sh
|
||||
cp docs/sample_config.yaml homeserver.yaml
|
||||
cp docs/sample_log_config.yaml log_config.yaml
|
||||
```
|
||||
|
||||
Now edit `homeserver.yaml`, things you might want to change include:
|
||||
|
||||
- Set a `server_name`
|
||||
- Adjusting paths to be correct for your system like the `log_config` to point to the log config you just copied
|
||||
- Using a [PostgreSQL database instead of SQLite](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database)
|
||||
- Adding a [`registration_shared_secret`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration_shared_secret) so you can use [`register_new_matrix_user` command](https://matrix-org.github.io/synapse/latest/setup/installation.html#registering-a-user).
|
||||
|
||||
And then run Synapse with the following command:
|
||||
Now edit homeserver.yaml, and run Synapse with:
|
||||
|
||||
```sh
|
||||
poetry run python -m synapse.app.homeserver -c homeserver.yaml
|
||||
```
|
||||
|
||||
If you get an error like the following:
|
||||
|
||||
```
|
||||
importlib.metadata.PackageNotFoundError: matrix-synapse
|
||||
```
|
||||
|
||||
this probably indicates that the `poetry install` step did not complete cleanly - go back and
|
||||
resolve any issues and re-run until successful.
|
||||
|
||||
# 5. Get in touch.
|
||||
|
||||
Join our developer community on Matrix: [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)!
|
||||
@@ -322,7 +299,7 @@ The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
|
||||
```
|
||||
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
|
||||
|
||||
@@ -369,8 +346,6 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
|
||||
See the [worker documentation](../workers.md) for additional information on workers.
|
||||
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
|
||||
- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
|
||||
- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis, and Postgres(when applicable).
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
|
||||
@@ -155,11 +155,43 @@ def run_upgrade(
|
||||
Boolean columns require special treatment, since SQLite treats booleans the
|
||||
same as integers.
|
||||
|
||||
Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
|
||||
There are three separate aspects to this:
|
||||
|
||||
* Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
|
||||
`synapse/_scripts/synapse_port_db.py`. This tells the port script to cast
|
||||
the integer value from SQLite to a boolean before writing the value to the
|
||||
postgres database.
|
||||
|
||||
* Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
|
||||
SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not
|
||||
supported. This makes it necessary to avoid using `TRUE` and `FALSE`
|
||||
constants in SQL commands.
|
||||
|
||||
For example, to insert a `TRUE` value into the database, write:
|
||||
|
||||
```python
|
||||
txn.execute("INSERT INTO tbl(col) VALUES (?)", (True, ))
|
||||
```
|
||||
|
||||
* Default values for new boolean columns present a particular
|
||||
difficulty. Generally it is best to create separate schema files for
|
||||
Postgres and SQLite. For example:
|
||||
|
||||
```sql
|
||||
# in 00delta.sql.postgres:
|
||||
ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT FALSE;
|
||||
```
|
||||
|
||||
```sql
|
||||
# in 00delta.sql.sqlite:
|
||||
ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT 0;
|
||||
```
|
||||
|
||||
Note that there is a particularly insidious failure mode here: the Postgres
|
||||
flavour will be accepted by SQLite 3.22, but will give a column whose
|
||||
default value is the **string** `"FALSE"` - which, when cast back to a boolean
|
||||
in Python, evaluates to `True`.
|
||||
|
||||
|
||||
## `event_id` global uniqueness
|
||||
|
||||
|
||||
@@ -260,17 +260,15 @@ doesn't require poetry. (It's what we use in CI too). However, you could try
|
||||
|
||||
## ...handle a Dependabot pull request?
|
||||
|
||||
Synapse uses Dependabot to keep the `poetry.lock` and `Cargo.lock` file
|
||||
up-to-date with the latest releases of our dependencies. The changelog check is
|
||||
omitted for Dependabot PRs; the release script will include them in the
|
||||
changelog.
|
||||
|
||||
When reviewing a dependabot PR, ensure that:
|
||||
Synapse uses Dependabot to keep the `poetry.lock` file up-to-date. When it
|
||||
creates a pull request a GitHub Action will run to automatically create a changelog
|
||||
file. Ensure that:
|
||||
|
||||
* the lockfile changes look reasonable;
|
||||
* the upstream changelog file (linked in the description) doesn't include any
|
||||
breaking changes;
|
||||
* continuous integration passes.
|
||||
* continuous integration passes (due to permissions, the GitHub Actions run on
|
||||
the changelog commit will fail, look at the initial commit of the pull request);
|
||||
|
||||
In particular, any updates to the type hints (usually packages which start with `types-`)
|
||||
should be safe to merge if linting passes.
|
||||
|
||||
@@ -6,7 +6,7 @@ This is a work-in-progress set of notes with two goals:
|
||||
|
||||
See also [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902).
|
||||
|
||||
The key idea is described by [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). This allows servers to
|
||||
The key idea is described by [MSC706](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). This allows servers to
|
||||
request a lightweight response to the federation `/send_join` endpoint.
|
||||
This is called a **faster join**, also known as a **partial join**. In these
|
||||
notes we'll usually use the word "partial" as it matches the database schema.
|
||||
|
||||
@@ -46,9 +46,6 @@ instead.
|
||||
|
||||
If the authentication is unsuccessful, the module must return `None`.
|
||||
|
||||
Note that the user is not automatically registered, the `register_user(..)` method of
|
||||
the [module API](writing_a_module.html) can be used to lazily create users.
|
||||
|
||||
If multiple modules register an auth checker for the same login type but with different
|
||||
fields, Synapse will refuse to start.
|
||||
|
||||
|
||||
@@ -348,42 +348,6 @@ callback returns `False`, Synapse falls through to the next one. The value of th
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
|
||||
### `check_login_for_spam`
|
||||
|
||||
_First introduced in Synapse v1.87.0_
|
||||
|
||||
```python
|
||||
async def check_login_for_spam(
|
||||
user_id: str,
|
||||
device_id: Optional[str],
|
||||
initial_display_name: Optional[str],
|
||||
request_info: Collection[Tuple[Optional[str], str]],
|
||||
auth_provider_id: Optional[str] = None,
|
||||
) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]
|
||||
```
|
||||
|
||||
Called when a user logs in.
|
||||
|
||||
The arguments passed to this callback are:
|
||||
|
||||
* `user_id`: The user ID the user is logging in with
|
||||
* `device_id`: The device ID the user is re-logging into.
|
||||
* `initial_display_name`: The device display name, if any.
|
||||
* `request_info`: A collection of tuples, which first item is a user agent, and which
|
||||
second item is an IP address. These user agents and IP addresses are the ones that were
|
||||
used during the login process.
|
||||
* `auth_provider_id`: The identifier of the SSO authentication provider, if any.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
|
||||
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
|
||||
be used. If this happens, Synapse will not call any of the subsequent implementations of
|
||||
this callback.
|
||||
|
||||
*Note:* This will not be called when a user registers.
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements the spam checker callback
|
||||
|
||||
@@ -569,7 +569,7 @@ You should receive a response similar to the following. Make sure to save it.
|
||||
{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
|
||||
```
|
||||
|
||||
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_template` has to be set. Your Synapse configuration should include the following:
|
||||
As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
@@ -585,9 +585,7 @@ oidc_providers:
|
||||
scopes: ["read"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
subject_template: "{{ user.id }}"
|
||||
localpart_template: "{{ user.username }}"
|
||||
display_name_template: "{{ user.display_name }}"
|
||||
subject_claim: "id"
|
||||
```
|
||||
|
||||
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
|
||||
|
||||
@@ -30,6 +30,12 @@ minimal.
|
||||
|
||||
See [the TCP replication documentation](tcp_replication.md).
|
||||
|
||||
### The Slaved DataStore
|
||||
|
||||
There are read-only version of the synapse storage layer in
|
||||
`synapse/replication/slave/storage` that use the response of the
|
||||
replication API to invalidate their caches.
|
||||
|
||||
### The TCP Replication Module
|
||||
Information about how the tcp replication module is structured, including how
|
||||
the classes interact, can be found in
|
||||
|
||||
@@ -68,7 +68,9 @@ root:
|
||||
# Write logs to the `buffer` handler, which will buffer them together in memory,
|
||||
# then write them to a file.
|
||||
#
|
||||
# Replace "buffer" with "console" to log to stderr instead.
|
||||
# Replace "buffer" with "console" to log to stderr instead. (Note that you'll
|
||||
# also need to update the configuration for the `twisted` logger above, in
|
||||
# this case.)
|
||||
#
|
||||
handlers: [buffer]
|
||||
|
||||
|
||||
@@ -135,8 +135,8 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi
|
||||
|
||||
#### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the package provided by ArchLinux
|
||||
<https://archlinux.org/packages/extra/x86_64/matrix-synapse/>, which should pull in most of
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
<https://archlinux.org/packages/community/x86_64/matrix-synapse/>, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
@@ -200,7 +200,7 @@ When following this route please make sure that the [Platform-specific prerequis
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.8 or later, up to Python 3.11.
|
||||
- Python 3.7 or later, up to Python 3.11.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: background_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/background-worker-log.yaml
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: event_persister1
|
||||
worker_name: event_persister1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
worker_app: synapse.app.federation_sender
|
||||
worker_name: federation_sender1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
worker_app: synapse.app.media_repository
|
||||
worker_name: media_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
worker_app: synapse.app.pusher
|
||||
worker_name: pusher_worker1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
|
||||
|
||||
145
docs/upgrade.md
145
docs/upgrade.md
@@ -88,151 +88,6 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.89.0
|
||||
|
||||
## Removal of unspecced `user` property for `/register`
|
||||
|
||||
Application services can no longer call `/register` with a `user` property to create new users.
|
||||
The standard `username` property should be used instead. See the
|
||||
[Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions)
|
||||
for more information.
|
||||
|
||||
|
||||
# Upgrading to v1.88.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
||||
The minimum supported Python version has been increased from v3.7 to v3.8.
|
||||
You will need Python 3.8 to run Synapse v1.88.0 (due out July 18th, 2023).
|
||||
|
||||
If you use current versions of the Matrix.org-distributed Debian
|
||||
packages or Docker images, no action is required.
|
||||
|
||||
## Removal of `worker_replication_*` settings
|
||||
|
||||
As mentioned previously in [Upgrading to v1.84.0](#upgrading-to-v1840), the following deprecated settings
|
||||
are being removed in this release of Synapse:
|
||||
|
||||
* [`worker_replication_host`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_host)
|
||||
* [`worker_replication_http_port`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_port)
|
||||
* [`worker_replication_http_tls`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_tls)
|
||||
|
||||
Please ensure that you have migrated to using `main` on your shared configuration's `instance_map`
|
||||
(or create one if necessary). This is required if you have ***any*** workers at all;
|
||||
administrators of single-process (monolith) installations don't need to do anything.
|
||||
|
||||
For an illustrative example, please see [Upgrading to v1.84.0](#upgrading-to-v1840) below.
|
||||
|
||||
|
||||
# Upgrading to v1.86.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
The minimum supported Rust version has been increased from v1.58.1 to v1.60.0.
|
||||
Users building from source will need to ensure their `rustc` version is up to
|
||||
date.
|
||||
|
||||
|
||||
# Upgrading to v1.85.0
|
||||
|
||||
## Application service registration with "user" property deprecation
|
||||
|
||||
Application services should ensure they call the `/register` endpoint with a
|
||||
`username` property. The legacy `user` property is considered deprecated and
|
||||
should no longer be included.
|
||||
|
||||
A future version of Synapse (v1.88.0 or later) will remove support for legacy
|
||||
application service login.
|
||||
|
||||
# Upgrading to v1.84.0
|
||||
|
||||
## Deprecation of `worker_replication_*` configuration settings
|
||||
|
||||
When using workers,
|
||||
|
||||
* `worker_replication_host`
|
||||
* `worker_replication_http_port`
|
||||
* `worker_replication_http_tls`
|
||||
|
||||
should now be removed from individual worker YAML configurations and the main process should instead be added to the `instance_map`
|
||||
in the shared YAML configuration, using the name `main`.
|
||||
|
||||
The old `worker_replication_*` settings are now considered deprecated and are expected to be removed in Synapse v1.88.0.
|
||||
|
||||
|
||||
### Example change
|
||||
|
||||
#### Before:
|
||||
|
||||
Shared YAML
|
||||
```yaml
|
||||
instance_map:
|
||||
generic_worker1:
|
||||
host: localhost
|
||||
port: 5678
|
||||
tls: false
|
||||
```
|
||||
|
||||
Worker YAML
|
||||
```yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker1
|
||||
|
||||
worker_replication_host: localhost
|
||||
worker_replication_http_port: 3456
|
||||
worker_replication_http_tls: false
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 1234
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
- type: http
|
||||
port: 5678
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
```
|
||||
|
||||
|
||||
#### After:
|
||||
|
||||
Shared YAML
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
host: localhost
|
||||
port: 3456
|
||||
tls: false
|
||||
generic_worker1:
|
||||
host: localhost
|
||||
port: 5678
|
||||
tls: false
|
||||
```
|
||||
|
||||
Worker YAML
|
||||
```yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker1
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 1234
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
- type: http
|
||||
port: 5678
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
|
||||
```
|
||||
Notes:
|
||||
* `tls` is optional but mirrors the functionality of `worker_replication_http_tls`
|
||||
|
||||
|
||||
# Upgrading to v1.81.0
|
||||
|
||||
## Application service path & authentication deprecations
|
||||
|
||||
@@ -27,8 +27,9 @@ What servers are currently participating in this room?
|
||||
Run this sql query on your db:
|
||||
```sql
|
||||
SELECT DISTINCT split_part(state_key, ':', 2)
|
||||
FROM current_state_events
|
||||
WHERE room_id = '!cURbafjkfsMDVwdRDQ:matrix.org' AND membership = 'join';
|
||||
FROM current_state_events AS c
|
||||
INNER JOIN room_memberships AS m USING (room_id, event_id)
|
||||
WHERE room_id = '!cURbafjkfsMDVwdRDQ:matrix.org' AND membership = 'join';
|
||||
```
|
||||
|
||||
What users are registered on my server?
|
||||
|
||||
@@ -42,6 +42,11 @@ The following statistics are sent to the configured reporting endpoint:
|
||||
| `daily_e2ee_messages` | int | The number of (state) events with the type `m.room.encrypted` seen in the last 24 hours. |
|
||||
| `daily_sent_messages` | int | The number of (state) events sent by a local user with the type `m.room.message` seen in the last 24 hours. |
|
||||
| `daily_sent_e2ee_messages` | int | The number of (state) events sent by a local user with the type `m.room.encrypted` seen in the last 24 hours. |
|
||||
| `r30_users_all` | int | The number of 30 day retained users, defined as users who have created their accounts more than 30 days ago, where they were last seen at most 30 days ago and where those two timestamps are over 30 days apart. Includes clients that do not fit into the below r30 client types. |
|
||||
| `r30_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Android" in the user agent string. |
|
||||
| `r30_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "iOS" in the user agent string. |
|
||||
| `r30_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Electron" in the user agent string. |
|
||||
| `r30_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Mozilla" or "Gecko" in the user agent string. |
|
||||
| `r30v2_users_all` | int | The number of 30 day retained users, with a revised algorithm. Defined as users that appear more than once in the past 60 days, and have more than 30 days between the most and least recent appearances in the past 60 days. Includes clients that do not fit into the below r30 client types. |
|
||||
| `r30v2_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "android" (case-insensitive) in the user agent string. |
|
||||
| `r30v2_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "ios" (case-insensitive) in the user agent string. |
|
||||
|
||||
@@ -462,20 +462,6 @@ See the docs [request log format](../administration/request_log.md).
|
||||
* `additional_resources`: Only valid for an 'http' listener. A map of
|
||||
additional endpoints which should be loaded via dynamic modules.
|
||||
|
||||
Unix socket support (_Added in Synapse 1.89.0_):
|
||||
* `path`: A path and filename for a Unix socket. Make sure it is located in a
|
||||
directory with read and write permissions, and that it already exists (the directory
|
||||
will not be created). Defaults to `None`.
|
||||
* **Note**: The use of both `path` and `port` options for the same `listener` is not
|
||||
compatible.
|
||||
* The `x_forwarded` option defaults to true when using Unix sockets and can be omitted.
|
||||
* Other options that would not make sense to use with a UNIX socket, such as
|
||||
`bind_addresses` and `tls` will be ignored and can be removed.
|
||||
* `mode`: The file permissions to set on the UNIX socket. Defaults to `666`
|
||||
* **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`).
|
||||
Also make sure that `metrics` is not included in `resources` -> `names`
|
||||
|
||||
|
||||
Valid resource names are:
|
||||
|
||||
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
|
||||
@@ -488,7 +474,7 @@ Valid resource names are:
|
||||
|
||||
* `media`: the media API (/_matrix/media).
|
||||
|
||||
* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets)
|
||||
* `metrics`: the metrics interface. See [here](../../metrics-howto.md).
|
||||
|
||||
* `openid`: OpenID authentication. See [here](../../openid.md).
|
||||
|
||||
@@ -547,22 +533,6 @@ listeners:
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: manhole
|
||||
```
|
||||
Example configuration #3:
|
||||
```yaml
|
||||
listeners:
|
||||
# Unix socket listener: Ideal for Synapse deployments behind a reverse proxy, offering
|
||||
# lightweight interprocess communication without TCP/IP overhead, avoid port
|
||||
# conflicts, and providing enhanced security through system file permissions.
|
||||
#
|
||||
# Note that x_forwarded will default to true, when using a UNIX socket. Please see
|
||||
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
|
||||
#
|
||||
- path: /var/run/synapse/main_public.sock
|
||||
type: http
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
---
|
||||
### `manhole_settings`
|
||||
|
||||
@@ -1226,32 +1196,6 @@ Example configuration:
|
||||
allow_device_name_lookup_over_federation: true
|
||||
```
|
||||
---
|
||||
### `federation`
|
||||
|
||||
The federation section defines some sub-options related to federation.
|
||||
|
||||
The following options are related to configuring timeout and retry logic for one request,
|
||||
independently of the others.
|
||||
Short retry algorithm is used when something or someone will wait for the request to have an
|
||||
answer, while long retry is used for requests that happen in the background,
|
||||
like sending a federation transaction.
|
||||
|
||||
* `client_timeout`: timeout for the federation requests. Default to 60s.
|
||||
* `max_short_retry_delay`: maximum delay to be used for the short retry algo. Default to 2s.
|
||||
* `max_long_retry_delay`: maximum delay to be used for the short retry algo. Default to 60s.
|
||||
* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts.
|
||||
* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
federation:
|
||||
client_timeout: 180s
|
||||
max_short_retry_delay: 7s
|
||||
max_long_retry_delay: 100s
|
||||
max_short_retries: 5
|
||||
max_long_retries: 20
|
||||
```
|
||||
---
|
||||
## Caching
|
||||
|
||||
Options related to caching.
|
||||
@@ -1824,30 +1768,6 @@ Example configuration:
|
||||
max_image_pixels: 35M
|
||||
```
|
||||
---
|
||||
### `prevent_media_downloads_from`
|
||||
|
||||
A list of domains to never download media from. Media from these
|
||||
domains that is already downloaded will not be deleted, but will be
|
||||
inaccessible to users. This option does not affect admin APIs trying
|
||||
to download/operate on media.
|
||||
|
||||
This will not prevent the listed domains from accessing media themselves.
|
||||
It simply prevents users on this server from downloading media originating
|
||||
from the listed servers.
|
||||
|
||||
This will have no effect on media originating from the local server.
|
||||
This only affects media downloaded from other Matrix servers, to
|
||||
block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
|
||||
Defaults to an empty list (nothing blocked).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
prevent_media_downloads_from:
|
||||
- evil.example.org
|
||||
- evil2.example.org
|
||||
```
|
||||
---
|
||||
### `dynamic_thumbnails`
|
||||
|
||||
Whether to generate new thumbnails on the fly to precisely match
|
||||
@@ -2626,50 +2546,7 @@ Example configuration:
|
||||
```yaml
|
||||
nonrefreshable_access_token_lifetime: 24h
|
||||
```
|
||||
---
|
||||
### `ui_auth`
|
||||
|
||||
The amount of time to allow a user-interactive authentication session to be active.
|
||||
|
||||
This defaults to 0, meaning the user is queried for their credentials
|
||||
before every action, but this can be overridden to allow a single
|
||||
validation to be re-used. This weakens the protections afforded by
|
||||
the user-interactive authentication process, by allowing for multiple
|
||||
(and potentially different) operations to use the same validation session.
|
||||
|
||||
This is ignored for potentially "dangerous" operations (including
|
||||
deactivating an account, modifying an account password, adding a 3PID,
|
||||
and minting additional login tokens).
|
||||
|
||||
Use the `session_timeout` sub-option here to change the time allowed for credential validation.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
ui_auth:
|
||||
session_timeout: "15s"
|
||||
```
|
||||
---
|
||||
### `login_via_existing_session`
|
||||
|
||||
Matrix supports the ability of an existing session to mint a login token for
|
||||
another client.
|
||||
|
||||
Synapse disables this by default as it has security ramifications -- a malicious
|
||||
client could use the mechanism to spawn more than one session.
|
||||
|
||||
The duration of time the generated token is valid for can be configured with the
|
||||
`token_timeout` sub-option.
|
||||
|
||||
User-interactive authentication is required when this is enabled unless the
|
||||
`require_ui_auth` sub-option is set to `False`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
login_via_existing_session:
|
||||
enabled: true
|
||||
require_ui_auth: false
|
||||
token_timeout: "5m"
|
||||
```
|
||||
---
|
||||
## Metrics
|
||||
Config options related to metrics.
|
||||
@@ -3514,6 +3391,28 @@ password_config:
|
||||
require_uppercase: true
|
||||
```
|
||||
---
|
||||
### `ui_auth`
|
||||
|
||||
The amount of time to allow a user-interactive authentication session to be active.
|
||||
|
||||
This defaults to 0, meaning the user is queried for their credentials
|
||||
before every action, but this can be overridden to allow a single
|
||||
validation to be re-used. This weakens the protections afforded by
|
||||
the user-interactive authentication process, by allowing for multiple
|
||||
(and potentially different) operations to use the same validation session.
|
||||
|
||||
This is ignored for potentially "dangerous" operations (including
|
||||
deactivating an account, modifying an account password, and
|
||||
adding a 3PID).
|
||||
|
||||
Use the `session_timeout` sub-option here to change the time allowed for credential validation.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
ui_auth:
|
||||
session_timeout: "15s"
|
||||
```
|
||||
---
|
||||
## Push
|
||||
Configuration settings related to push notifications
|
||||
|
||||
@@ -3543,9 +3442,6 @@ This option has a number of sub-options. They are as follows:
|
||||
user has unread messages in. Defaults to true, meaning push clients will see the number of
|
||||
rooms with unread messages in them. Set to false to instead send the number
|
||||
of unread messages.
|
||||
* `jitter_delay`: Delays push notifications by a random amount up to the given
|
||||
duration. Useful for mitigating timing attacks. Optional, defaults to no
|
||||
delay. _Added in Synapse 1.84.0._
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3553,7 +3449,6 @@ push:
|
||||
enabled: true
|
||||
include_content: false
|
||||
group_unread_count_by_room: false
|
||||
jitter_delay: "10s"
|
||||
```
|
||||
---
|
||||
## Rooms
|
||||
@@ -3800,16 +3695,6 @@ default_power_level_content_override:
|
||||
trusted_private_chat: null
|
||||
public_chat: null
|
||||
```
|
||||
---
|
||||
### `forget_rooms_on_leave`
|
||||
|
||||
Set to true to automatically forget rooms for users when they leave them, either
|
||||
normally or via a kick or ban. Defaults to false.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
forget_rooms_on_leave: false
|
||||
```
|
||||
|
||||
---
|
||||
## Opentracing
|
||||
@@ -3960,34 +3845,20 @@ federation_sender_instances:
|
||||
---
|
||||
### `instance_map`
|
||||
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP
|
||||
replication listener of the worker, if configured, and to the main process. Each worker
|
||||
declared under [`stream_writers`](../../workers.md#stream-writers) and
|
||||
[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP
|
||||
replication listener, and that listener should be included in the `instance_map`. The
|
||||
main process also needs an entry on the `instance_map`, and it should be listed under
|
||||
`main` **if even one other worker exists**. Ensure the port matches with what is
|
||||
declared inside the `listener` block for a `replication` listener.
|
||||
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the
|
||||
HTTP replication listener of the worker, if configured.
|
||||
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
|
||||
a HTTP replication listener, and that listener should be included in the `instance_map`.
|
||||
(The main process also needs an HTTP replication listener, but it should not be
|
||||
listed in the `instance_map`.)
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
host: localhost
|
||||
port: 8030
|
||||
worker1:
|
||||
host: localhost
|
||||
port: 8034
|
||||
```
|
||||
Example configuration(#2, for UNIX sockets):
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
path: /var/run/synapse/main_replication.sock
|
||||
worker1:
|
||||
path: /var/run/synapse/worker1_replication.sock
|
||||
```
|
||||
---
|
||||
### `stream_writers`
|
||||
|
||||
@@ -4005,24 +3876,6 @@ stream_writers:
|
||||
typing: worker1
|
||||
```
|
||||
---
|
||||
### `outbound_federation_restricted_to`
|
||||
|
||||
When using workers, you can restrict outbound federation traffic to only go through a
|
||||
specific subset of workers. Any worker specified here must also be in the
|
||||
[`instance_map`](#instance_map).
|
||||
[`worker_replication_secret`](#worker_replication_secret) must also be configured to
|
||||
authorize inter-worker communication.
|
||||
|
||||
```yaml
|
||||
outbound_federation_restricted_to:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
```
|
||||
|
||||
Also see the [worker
|
||||
documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers)
|
||||
for more info.
|
||||
---
|
||||
### `run_background_tasks_on`
|
||||
|
||||
The [worker](../../workers.md#background-tasks) that is used to run
|
||||
@@ -4083,22 +3936,11 @@ This setting has the following sub-options:
|
||||
* `enabled`: whether to use Redis support. Defaults to false.
|
||||
* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
|
||||
localhost and 6379
|
||||
* `path`: The full path to a local Unix socket file. **If this is used, `host` and
|
||||
`port` are ignored.** Defaults to `/tmp/redis.sock'
|
||||
* `password`: Optional password if configured on the Redis instance.
|
||||
* `dbid`: Optional redis dbid if needs to connect to specific redis logical db.
|
||||
* `use_tls`: Whether to use tls connection. Defaults to false.
|
||||
* `certificate_file`: Optional path to the certificate file
|
||||
* `private_key_file`: Optional path to the private key file
|
||||
* `ca_file`: Optional path to the CA certificate file. Use this one or:
|
||||
* `ca_path`: Optional path to the folder containing the CA certificate file
|
||||
|
||||
_Added in Synapse 1.78.0._
|
||||
|
||||
_Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_
|
||||
|
||||
_Changed in Synapse 1.85.0: Added path option to use a local Unix socket_
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
redis:
|
||||
@@ -4107,10 +3949,6 @@ redis:
|
||||
port: 6379
|
||||
password: <secret_password>
|
||||
dbid: <dbid>
|
||||
#use_tls: True
|
||||
#certificate_file: <path_to_the_certificate_file>
|
||||
#private_key_file: <path_to_the_private_key_file>
|
||||
#ca_file: <path_to_the_ca_certificate_file>
|
||||
```
|
||||
---
|
||||
## Individual worker configuration
|
||||
@@ -4147,15 +3985,57 @@ Example configuration:
|
||||
worker_name: generic_worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_host`
|
||||
|
||||
The HTTP replication endpoint that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_host: 127.0.0.1
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_port`
|
||||
|
||||
The HTTP replication port that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_tls`
|
||||
|
||||
Whether TLS should be used for talking to the HTTP replication port on the main
|
||||
Synapse process.
|
||||
The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
|
||||
has the `replication` resource enabled.
|
||||
|
||||
**Please note:** by default, it is not safe to expose replication ports to the
|
||||
public Internet, even with TLS enabled.
|
||||
See [`worker_replication_secret`](#worker_replication_secret).
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
*Added in Synapse 1.72.0.*
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_tls: true
|
||||
```
|
||||
---
|
||||
### `worker_listeners`
|
||||
|
||||
A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
must be declared, in the same way as the [`listeners` option](#listeners)
|
||||
in the shared config.
|
||||
|
||||
Workers declared in [`stream_writers`](#stream_writers) and [`instance_map`](#instance_map)
|
||||
will need to include a `replication` listener here, in order to accept internal HTTP
|
||||
requests from other workers.
|
||||
Workers declared in [`stream_writers`](#stream_writers) will need to include a
|
||||
`replication` listener here, in order to accept internal HTTP requests from
|
||||
other workers.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -4165,18 +4045,6 @@ worker_listeners:
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
Example configuration(#2, using UNIX sockets with a `replication` listener):
|
||||
```yaml
|
||||
worker_listeners:
|
||||
- type: http
|
||||
path: /var/run/synapse/worker_public.sock
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
- type: http
|
||||
path: /var/run/synapse/worker_replication.sock
|
||||
resources:
|
||||
- names: [replication]
|
||||
```
|
||||
---
|
||||
### `worker_manhole`
|
||||
|
||||
|
||||
@@ -24,11 +24,6 @@ Finally, we also stylise the chapter titles in the left sidebar by indenting the
|
||||
slightly so that they are more visually distinguishable from the section headers
|
||||
(the bold titles). This is done through the `indent-section-headers.css` file.
|
||||
|
||||
In addition to these modifications, we have added a version picker to the documentation.
|
||||
Users can switch between documentations for different versions of Synapse.
|
||||
This functionality was implemented through the `version-picker.js` and
|
||||
`version-picker.css` files.
|
||||
|
||||
More information can be found in mdbook's official documentation for
|
||||
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
|
||||
and
|
||||
|
||||
@@ -131,18 +131,6 @@
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
{{/if}}
|
||||
<div class="version-picker">
|
||||
<div class="dropdown">
|
||||
<div class="select">
|
||||
<span></span>
|
||||
<i class="fa fa-chevron-down"></i>
|
||||
</div>
|
||||
<input type="hidden" name="version">
|
||||
<ul class="dropdown-menu">
|
||||
<!-- Versions will be added dynamically in version-picker.js -->
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">{{ book_title }}</h1>
|
||||
@@ -321,4 +309,4 @@
|
||||
{{/if}}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
@@ -1,78 +0,0 @@
|
||||
.version-picker {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.version-picker .dropdown {
|
||||
width: 130px;
|
||||
max-height: 29px;
|
||||
margin-left: 10px;
|
||||
display: inline-block;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
position: relative;
|
||||
font-size: 13px;
|
||||
color: var(--fg);
|
||||
height: 100%;
|
||||
text-align: left;
|
||||
}
|
||||
.version-picker .dropdown .select {
|
||||
cursor: pointer;
|
||||
display: block;
|
||||
padding: 5px 2px 5px 15px;
|
||||
}
|
||||
.version-picker .dropdown .select > i {
|
||||
font-size: 10px;
|
||||
color: var(--fg);
|
||||
cursor: pointer;
|
||||
float: right;
|
||||
line-height: 20px !important;
|
||||
}
|
||||
.version-picker .dropdown:hover {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
}
|
||||
.version-picker .dropdown:active {
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active:hover,
|
||||
.version-picker .dropdown.active {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 2px 2px 0 0;
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active .select > i {
|
||||
transform: rotate(-180deg);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
position: absolute;
|
||||
background-color: var(--theme-popup-bg);
|
||||
width: 100%;
|
||||
left: -1px;
|
||||
right: 1px;
|
||||
margin-top: 1px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 0 0 4px 4px;
|
||||
overflow: hidden;
|
||||
display: none;
|
||||
max-height: 300px;
|
||||
overflow-y: auto;
|
||||
z-index: 9;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li {
|
||||
font-size: 12px;
|
||||
padding: 6px 20px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
padding: 0;
|
||||
list-style: none;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li:hover {
|
||||
background-color: var(--theme-hover);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li.active::before {
|
||||
display: inline-block;
|
||||
content: "✓";
|
||||
margin-inline-start: -14px;
|
||||
width: 14px;
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
|
||||
const dropdown = document.querySelector('.version-picker .dropdown');
|
||||
const dropdownMenu = dropdown.querySelector('.dropdown-menu');
|
||||
|
||||
fetchVersions(dropdown, dropdownMenu).then(() => {
|
||||
initializeVersionDropdown(dropdown, dropdownMenu);
|
||||
});
|
||||
|
||||
/**
|
||||
* Initialize the dropdown functionality for version selection.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
*/
|
||||
function initializeVersionDropdown(dropdown, dropdownMenu) {
|
||||
// Toggle the dropdown menu on click
|
||||
dropdown.addEventListener('click', function () {
|
||||
this.setAttribute('tabindex', 1);
|
||||
this.classList.toggle('active');
|
||||
dropdownMenu.style.display = (dropdownMenu.style.display === 'block') ? 'none' : 'block';
|
||||
});
|
||||
|
||||
// Remove the 'active' class and hide the dropdown menu on focusout
|
||||
dropdown.addEventListener('focusout', function () {
|
||||
this.classList.remove('active');
|
||||
dropdownMenu.style.display = 'none';
|
||||
});
|
||||
|
||||
// Handle item selection within the dropdown menu
|
||||
const dropdownMenuItems = dropdownMenu.querySelectorAll('li');
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.addEventListener('click', function () {
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.classList.remove('active');
|
||||
});
|
||||
this.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = this.textContent;
|
||||
dropdown.querySelector('input').value = this.getAttribute('id');
|
||||
|
||||
window.location.href = changeVersion(window.location.href, this.textContent);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* This function fetches the available versions from a GitHub repository
|
||||
* and inserts them into the version picker.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
* @returns {Promise<Array<string>>} A promise that resolves with an array of available versions.
|
||||
*/
|
||||
function fetchVersions(dropdown, dropdownMenu) {
|
||||
return new Promise((resolve, reject) => {
|
||||
window.addEventListener("load", () => {
|
||||
|
||||
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
|
||||
cache: "force-cache",
|
||||
}).then(res =>
|
||||
res.json()
|
||||
).then(resObject => {
|
||||
const excluded = ['dev-docs', 'v1.91.0', 'v1.80.0', 'v1.69.0'];
|
||||
const tree = resObject.tree.filter(item => item.type === "tree" && !excluded.includes(item.path));
|
||||
const versions = tree.map(item => item.path).sort(sortVersions);
|
||||
|
||||
// Create a list of <li> items for versions
|
||||
versions.forEach((version) => {
|
||||
const li = document.createElement("li");
|
||||
li.textContent = version;
|
||||
li.id = version;
|
||||
|
||||
if (window.SYNAPSE_VERSION === version) {
|
||||
li.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = version;
|
||||
dropdown.querySelector('input').value = version;
|
||||
}
|
||||
|
||||
dropdownMenu.appendChild(li);
|
||||
});
|
||||
|
||||
resolve(versions);
|
||||
|
||||
}).catch(ex => {
|
||||
console.error("Failed to fetch version data", ex);
|
||||
reject(ex);
|
||||
})
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom sorting function to sort an array of version strings.
|
||||
*
|
||||
* @param {string} a - The first version string to compare.
|
||||
* @param {string} b - The second version string to compare.
|
||||
* @returns {number} - A negative number if a should come before b, a positive number if b should come before a, or 0 if they are equal.
|
||||
*/
|
||||
function sortVersions(a, b) {
|
||||
// Put 'develop' and 'latest' at the top
|
||||
if (a === 'develop' || a === 'latest') return -1;
|
||||
if (b === 'develop' || b === 'latest') return 1;
|
||||
|
||||
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
|
||||
return versionB.localeCompare(versionA);
|
||||
}
|
||||
|
||||
/**
|
||||
* Change the version in a URL path.
|
||||
*
|
||||
* @param {string} url - The original URL to be modified.
|
||||
* @param {string} newVersion - The new version to replace the existing version in the URL.
|
||||
* @returns {string} The updated URL with the new version.
|
||||
*/
|
||||
function changeVersion(url, newVersion) {
|
||||
const parsedURL = new URL(url);
|
||||
const pathSegments = parsedURL.pathname.split('/');
|
||||
|
||||
// Modify the version
|
||||
pathSegments[2] = newVersion;
|
||||
|
||||
// Reconstruct the URL
|
||||
parsedURL.pathname = pathSegments.join('/');
|
||||
|
||||
return parsedURL.href;
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
window.SYNAPSE_VERSION = 'v1.89';
|
||||
136
docs/workers.md
136
docs/workers.md
@@ -87,21 +87,12 @@ shared configuration file.
|
||||
|
||||
### Shared configuration
|
||||
|
||||
Normally, only a few changes are needed to make an existing configuration
|
||||
file suitable for use with workers:
|
||||
* First, you need to enable an
|
||||
Normally, only a couple of changes are needed to make an existing configuration
|
||||
file suitable for use with workers. First, you need to enable an
|
||||
["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
|
||||
for the main process
|
||||
* Secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis)
|
||||
* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
with the `main` process defined, as well as the relevant connection information from
|
||||
it's HTTP `replication` listener (defined in step 1 above).
|
||||
* Note that the `host` defined is the address the worker needs to look for the `main`
|
||||
process at, not necessarily the same address that is bound to.
|
||||
* If you are using Unix sockets for the `replication` resource, make sure to
|
||||
use a `path` to the socket file instead of a `port`.
|
||||
* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
for the main process; and secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis).
|
||||
Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
can be used to authenticate HTTP traffic between workers. For example:
|
||||
|
||||
```yaml
|
||||
@@ -120,11 +111,6 @@ worker_replication_secret: ""
|
||||
|
||||
redis:
|
||||
enabled: true
|
||||
|
||||
instance_map:
|
||||
main:
|
||||
host: 'localhost'
|
||||
port: 9093
|
||||
```
|
||||
|
||||
See the [configuration manual](usage/configuration/config_documentation.md)
|
||||
@@ -144,6 +130,9 @@ In the config file for each worker, you must specify:
|
||||
* The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
|
||||
The currently available worker applications are listed [below](#available-worker-applications).
|
||||
* A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
|
||||
* The HTTP replication endpoint that it should talk to on the main synapse process
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
|
||||
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
|
||||
with an `http` listener.
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
@@ -177,11 +166,11 @@ The following applies to Synapse installations that have been installed from sou
|
||||
|
||||
You can start the main Synapse process with Poetry by running the following command:
|
||||
```console
|
||||
poetry run synapse_homeserver --config-file [your homeserver.yaml]
|
||||
poetry run synapse_homeserver -c [your homeserver.yaml]
|
||||
```
|
||||
For worker setups, you can run the following command
|
||||
```console
|
||||
poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml]
|
||||
poetry run synapse_worker -c [your worker.yaml]
|
||||
```
|
||||
## Available worker applications
|
||||
|
||||
@@ -232,6 +221,7 @@ information.
|
||||
^/_matrix/client/v1/rooms/.*/hierarchy$
|
||||
^/_matrix/client/(v1|unstable)/rooms/.*/relations/
|
||||
^/_matrix/client/v1/rooms/.*/threads$
|
||||
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
|
||||
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
|
||||
^/_matrix/client/(r0|v3|unstable)/account/3pid$
|
||||
^/_matrix/client/(r0|v3|unstable)/account/whoami$
|
||||
@@ -335,7 +325,8 @@ load balancing can be done in different ways.
|
||||
|
||||
For `/sync` and `/initialSync` requests it will be more efficient if all
|
||||
requests from a particular user are routed to a single instance. This can
|
||||
be done in reverse proxy by extracting username part from the users access token.
|
||||
be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
|
||||
`hash $http_authorization consistent;` which contains the users access token.
|
||||
|
||||
Admins may additionally wish to separate out `/sync`
|
||||
requests that have a `since` query parameter from those that don't (and
|
||||
@@ -344,69 +335,6 @@ when a user logs in on a new device and can be *very* resource intensive, so
|
||||
isolating these requests will stop them from interfering with other users ongoing
|
||||
syncs.
|
||||
|
||||
Example `nginx` configuration snippet that handles the cases above. This is just an
|
||||
example and probably requires some changes according to your particular setup:
|
||||
|
||||
```nginx
|
||||
# Choose sync worker based on the existence of "since" query parameter
|
||||
map $arg_since $sync {
|
||||
default synapse_sync;
|
||||
'' synapse_initial_sync;
|
||||
}
|
||||
|
||||
# Extract username from access token passed as URL parameter
|
||||
map $arg_access_token $accesstoken_from_urlparam {
|
||||
# Defaults to just passing back the whole accesstoken
|
||||
default $arg_access_token;
|
||||
# Try to extract username part from accesstoken URL parameter
|
||||
"~syt_(?<username>.*?)_.*" $username;
|
||||
}
|
||||
|
||||
# Extract username from access token passed as authorization header
|
||||
map $http_authorization $mxid_localpart {
|
||||
# Defaults to just passing back the whole accesstoken
|
||||
default $http_authorization;
|
||||
# Try to extract username part from accesstoken header
|
||||
"~Bearer syt_(?<username>.*?)_.*" $username;
|
||||
# if no authorization-header exist, try mapper for URL parameter "access_token"
|
||||
"" $accesstoken_from_urlparam;
|
||||
}
|
||||
|
||||
upstream synapse_initial_sync {
|
||||
# Use the username mapper result for hash key
|
||||
hash $mxid_localpart consistent;
|
||||
server 127.0.0.1:8016;
|
||||
server 127.0.0.1:8036;
|
||||
}
|
||||
|
||||
upstream synapse_sync {
|
||||
# Use the username mapper result for hash key
|
||||
hash $mxid_localpart consistent;
|
||||
server 127.0.0.1:8013;
|
||||
server 127.0.0.1:8037;
|
||||
server 127.0.0.1:8038;
|
||||
server 127.0.0.1:8039;
|
||||
}
|
||||
|
||||
# Sync initial/normal
|
||||
location ~ ^/_matrix/client/(r0|v3)/sync$ {
|
||||
proxy_pass http://$sync;
|
||||
}
|
||||
|
||||
# Normal sync
|
||||
location ~ ^/_matrix/client/(api/v1|r0|v3)/events$ {
|
||||
proxy_pass http://synapse_sync;
|
||||
}
|
||||
|
||||
# Initial_sync
|
||||
location ~ ^/_matrix/client/(api/v1|r0|v3)/initialSync$ {
|
||||
proxy_pass http://synapse_initial_sync;
|
||||
}
|
||||
location ~ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ {
|
||||
proxy_pass http://synapse_initial_sync;
|
||||
}
|
||||
```
|
||||
|
||||
Federation and client requests can be balanced via simple round robin.
|
||||
|
||||
The inbound federation transaction request `^/_matrix/federation/v1/send/`
|
||||
@@ -427,14 +355,11 @@ effects of bursts of events from that bridge on events sent by normal users.
|
||||
Additionally, the writing of specific streams (such as events) can be moved off
|
||||
of the main process to a particular worker.
|
||||
|
||||
To enable this, the worker must have:
|
||||
* An [HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
|
||||
* Have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
To enable this, the worker must have a
|
||||
[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
|
||||
have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
config.
|
||||
* Have the main process declared on the [`instance_map`](usage/configuration/config_documentation.md#instance_map) as well.
|
||||
|
||||
Note: The same worker can handle multiple streams, but unless otherwise documented,
|
||||
config. The same worker can handle multiple streams, but unless otherwise documented,
|
||||
each stream can only have a single writer.
|
||||
|
||||
For example, to move event persistence off to a dedicated worker, the shared
|
||||
@@ -442,9 +367,6 @@ configuration would include:
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
host: localhost
|
||||
port: 8030
|
||||
event_persister1:
|
||||
host: localhost
|
||||
port: 8034
|
||||
@@ -531,30 +453,6 @@ the stream writer for the `presence` stream:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
#### Restrict outbound federation traffic to a specific set of workers
|
||||
|
||||
The
|
||||
[`outbound_federation_restricted_to`](usage/configuration/config_documentation.md#outbound_federation_restricted_to)
|
||||
configuration is useful to make sure outbound federation traffic only goes through a
|
||||
specified subset of workers. This allows you to set more strict access controls (like a
|
||||
firewall) for all workers and only allow the `federation_sender`'s to contact the
|
||||
outside world.
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
host: localhost
|
||||
port: 8030
|
||||
federation_sender1:
|
||||
host: localhost
|
||||
port: 8034
|
||||
|
||||
outbound_federation_restricted_to:
|
||||
- federation_sender1
|
||||
|
||||
worker_replication_secret: "secret_secret"
|
||||
```
|
||||
|
||||
#### Background tasks
|
||||
|
||||
There is also support for moving background tasks to a separate
|
||||
|
||||
286
flake.lock
generated
286
flake.lock
generated
@@ -1,271 +1,22 @@
|
||||
{
|
||||
"nodes": {
|
||||
"devenv": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"nix": "nix",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1683102061,
|
||||
"narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "ff1f29e41756553174d596cafe3a9fa77595100b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "main",
|
||||
"repo": "devenv",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1673956053,
|
||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"pre-commit-hooks",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1660459072,
|
||||
"narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"lowdown-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1633514407,
|
||||
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"lowdown-src": "lowdown-src",
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-regression": "nixpkgs-regression"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1676545802,
|
||||
"narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=",
|
||||
"owner": "domenkozar",
|
||||
"repo": "nix",
|
||||
"rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "domenkozar",
|
||||
"ref": "relaxed-flakes",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1678875422,
|
||||
"narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459",
|
||||
"type": "github"
|
||||
"lastModified": 1677075010,
|
||||
"narHash": "sha256-X+UmR1AkdR//lPVcShmLy8p1n857IGf7y+cyCArp8bU=",
|
||||
"path": "/nix/store/b1vy558z7lxph5mbg7n50b5njp393ia9-source",
|
||||
"rev": "c95bf18beba4290af25c60cbaaceea1110d0f727",
|
||||
"type": "path"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-regression": {
|
||||
"locked": {
|
||||
"lastModified": 1643052045,
|
||||
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1673800717,
|
||||
"narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1682519441,
|
||||
"narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7a32a141db568abde9bc389845949dc2a454dfd3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "master",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681358109,
|
||||
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pre-commit-hooks": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"devenv",
|
||||
"flake-compat"
|
||||
],
|
||||
"flake-utils": "flake-utils",
|
||||
"gitignore": "gitignore",
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1678376203,
|
||||
"narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=",
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"rev": "1a20b9708962096ec2481eeb2ddca29ed747770a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"type": "github"
|
||||
"id": "nixpkgs",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"devenv": "devenv",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"rust-overlay": "rust-overlay",
|
||||
"systems": "systems_2"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1689302058,
|
||||
"narHash": "sha256-yD74lcHTrw4niXcE9goJLbzsgyce48rQQoy5jK5ZK40=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "7b8dbbf4c67ed05a9bf3d9e658c12d4108bc24c8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
"nixpkgs": "nixpkgs",
|
||||
"utils": "utils"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
@@ -283,18 +34,21 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
|
||||
251
flake.nix
251
flake.nix
@@ -1,244 +1,17 @@
|
||||
# A Nix flake that sets up a complete Synapse development environment. Dependencies
|
||||
# for the SyTest (https://github.com/matrix-org/sytest) and Complement
|
||||
# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
|
||||
# installed automatically.
|
||||
#
|
||||
# You must have already installed Nix (https://nixos.org) on your system to use this.
|
||||
# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
|
||||
# directly supported, but Nix can be installed inside of WSL2 or even Docker
|
||||
# containers. Please refer to https://nixos.org/download for details.
|
||||
#
|
||||
# You must also enable support for flakes in Nix. See the following for how to
|
||||
# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
||||
#
|
||||
# Be warned: you'll need over 3.75 GB of free space to download all the dependencies.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# With Nix installed, navigate to the directory containing this flake and run
|
||||
# `nix develop --impure`. The `--impure` is necessary in order to store state
|
||||
# locally from "services", such as PostgreSQL and Redis.
|
||||
#
|
||||
# You should now be dropped into a new shell with all programs and dependencies
|
||||
# availabile to you!
|
||||
#
|
||||
# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by
|
||||
# running: `devenv up`. To stop them, use Ctrl-C.
|
||||
#
|
||||
# All state (the venv, postgres and redis data and config) are stored in
|
||||
# .devenv/state. Deleting a file from here and then re-entering the shell
|
||||
# will recreate these files from scratch.
|
||||
#
|
||||
# You can exit the development shell by typing `exit`, or using Ctrl-D.
|
||||
#
|
||||
# If you would like this development environment to activate automatically
|
||||
# upon entering this directory in your terminal, first install `direnv`
|
||||
# (https://direnv.net/). Then run `echo 'use flake . --impure' >> .envrc` at
|
||||
# the root of the Synapse repo. Finally, run `direnv allow .` to allow the
|
||||
# contents of '.envrc' to run every time you enter this directory. Voilà!
|
||||
|
||||
{
|
||||
description = "Synapse (development)";
|
||||
|
||||
inputs = {
|
||||
# Use the master/unstable branch of nixpkgs. The latest stable, 22.11,
|
||||
# does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest.
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/master";
|
||||
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
|
||||
systems.url = "github:nix-systems/default";
|
||||
# A development environment manager built on Nix. See https://devenv.sh.
|
||||
devenv.url = "github:cachix/devenv/main";
|
||||
# Rust toolchain.
|
||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
utils.url = "github:numtide/flake-utils";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
|
||||
let
|
||||
forEachSystem = nixpkgs.lib.genAttrs (import systems);
|
||||
in {
|
||||
devShells = forEachSystem (system:
|
||||
let
|
||||
overlays = [ (import rust-overlay) ];
|
||||
pkgs = import nixpkgs {
|
||||
inherit system overlays;
|
||||
};
|
||||
in {
|
||||
# Everything is configured via devenv - a Nix module for creating declarative
|
||||
# developer environments. See https://devenv.sh/reference/options/ for a list
|
||||
# of all possible options.
|
||||
default = devenv.lib.mkShell {
|
||||
inherit inputs pkgs;
|
||||
modules = [
|
||||
{
|
||||
# Make use of the Starship command prompt when this development environment
|
||||
# is manually activated (via `nix develop --impure`).
|
||||
# See https://starship.rs/ for details on the prompt itself.
|
||||
starship.enable = true;
|
||||
|
||||
# Configure packages to install.
|
||||
# Search for package names at https://search.nixos.org/packages?channel=unstable
|
||||
packages = with pkgs; [
|
||||
# The rust toolchain and related tools.
|
||||
# This will install the "default" profile of rust components.
|
||||
# https://rust-lang.github.io/rustup/concepts/profiles.html
|
||||
#
|
||||
# NOTE: We currently need to set the Rust version unnecessarily high
|
||||
# in order to work around https://github.com/matrix-org/synapse/issues/15939
|
||||
(rust-bin.stable."1.70.0".default.override {
|
||||
# Additionally install the "rust-src" extension to allow diving into the
|
||||
# Rust source code in an IDE (rust-analyzer will also make use of it).
|
||||
extensions = [ "rust-src" ];
|
||||
})
|
||||
# The rust-analyzer language server implementation.
|
||||
rust-analyzer
|
||||
|
||||
# Native dependencies for running Synapse.
|
||||
icu
|
||||
libffi
|
||||
libjpeg
|
||||
libpqxx
|
||||
libwebp
|
||||
libxml2
|
||||
libxslt
|
||||
sqlite
|
||||
|
||||
# Native dependencies for unit tests (SyTest also requires OpenSSL).
|
||||
openssl
|
||||
xmlsec
|
||||
|
||||
# Native dependencies for running Complement.
|
||||
olm
|
||||
|
||||
# For building the Synapse documentation website.
|
||||
mdbook
|
||||
|
||||
# For releasing Synapse
|
||||
debian-devscripts # (`dch` for manipulating the Debian changelog)
|
||||
libnotify # (the release script uses `notify-send` to tell you when CI jobs are done)
|
||||
];
|
||||
|
||||
# Install Python and manage a virtualenv with Poetry.
|
||||
languages.python.enable = true;
|
||||
languages.python.poetry.enable = true;
|
||||
# Automatically activate the poetry virtualenv upon entering the shell.
|
||||
languages.python.poetry.activate.enable = true;
|
||||
# Install all extra Python dependencies; this is needed to run the unit
|
||||
# tests and utilitise all Synapse features.
|
||||
languages.python.poetry.install.arguments = ["--extras all"];
|
||||
# Install the 'matrix-synapse' package from the local checkout.
|
||||
languages.python.poetry.install.installRootPackage = true;
|
||||
|
||||
# This is a work-around for NixOS systems. NixOS is special in
|
||||
# that you can have multiple versions of packages installed at
|
||||
# once, including your libc linker!
|
||||
#
|
||||
# Some binaries built for Linux expect those to be in a certain
|
||||
# filepath, but that is not the case on NixOS. In that case, we
|
||||
# force compiling those binaries locally instead.
|
||||
env.POETRY_INSTALLER_NO_BINARY = "ruff";
|
||||
|
||||
# Install dependencies for the additional programming languages
|
||||
# involved with Synapse development.
|
||||
#
|
||||
# * Golang is needed to run the Complement test suite.
|
||||
# * Perl is needed to run the SyTest test suite.
|
||||
# * Rust is used for developing and running Synapse.
|
||||
# It is installed manually with `packages` above.
|
||||
languages.go.enable = true;
|
||||
languages.perl.enable = true;
|
||||
|
||||
# Postgres is needed to run Synapse with postgres support and
|
||||
# to run certain unit tests that require postgres.
|
||||
services.postgres.enable = true;
|
||||
|
||||
# On the first invocation of `devenv up`, create a database for
|
||||
# Synapse to store data in.
|
||||
services.postgres.initdbArgs = ["--locale=C" "--encoding=UTF8"];
|
||||
services.postgres.initialDatabases = [
|
||||
{ name = "synapse"; }
|
||||
];
|
||||
# Create a postgres user called 'synapse_user' which has ownership
|
||||
# over the 'synapse' database.
|
||||
services.postgres.initialScript = ''
|
||||
CREATE USER synapse_user;
|
||||
ALTER DATABASE synapse OWNER TO synapse_user;
|
||||
'';
|
||||
|
||||
# Redis is needed in order to run Synapse in worker mode.
|
||||
services.redis.enable = true;
|
||||
|
||||
# Configure and start Synapse. Before starting Synapse, this shell code:
|
||||
# * generates a default homeserver.yaml config file if one does not exist, and
|
||||
# * ensures a directory containing two additional homeserver config files exists;
|
||||
# one to configure using the development environment's PostgreSQL as the
|
||||
# database backend and another for enabling Redis support.
|
||||
process.before = ''
|
||||
python -m synapse.app.homeserver -c homeserver.yaml --generate-config --server-name=synapse.dev --report-stats=no
|
||||
mkdir -p homeserver-config-overrides.d
|
||||
cat > homeserver-config-overrides.d/database.yaml << EOF
|
||||
## Do not edit this file. This file is generated by flake.nix
|
||||
database:
|
||||
name: psycopg2
|
||||
args:
|
||||
user: synapse_user
|
||||
database: synapse
|
||||
host: $PGHOST
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
EOF
|
||||
cat > homeserver-config-overrides.d/redis.yaml << EOF
|
||||
## Do not edit this file. This file is generated by flake.nix
|
||||
redis:
|
||||
enabled: true
|
||||
EOF
|
||||
'';
|
||||
# Start synapse when `devenv up` is run.
|
||||
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml -c homeserver-config-overrides.d";
|
||||
|
||||
# Define the perl modules we require to run SyTest.
|
||||
#
|
||||
# This list was compiled by cross-referencing https://metacpan.org/
|
||||
# with the modules defined in './cpanfile' and then finding the
|
||||
# corresponding Nix packages on https://search.nixos.org/packages.
|
||||
#
|
||||
# This was done until `./install-deps.pl --dryrun` produced no output.
|
||||
env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [
|
||||
DBI
|
||||
ClassMethodModifiers
|
||||
CryptEd25519
|
||||
DataDump
|
||||
DBDPg
|
||||
DigestHMAC
|
||||
DigestSHA1
|
||||
EmailAddressXS
|
||||
EmailMIME
|
||||
EmailSimple # required by Email::Mime
|
||||
EmailMessageID # required by Email::Mime
|
||||
EmailMIMEContentType # required by Email::Mime
|
||||
TextUnidecode # required by Email::Mime
|
||||
ModuleRuntime # required by Email::Mime
|
||||
EmailMIMEEncodings # required by Email::Mime
|
||||
FilePath
|
||||
FileSlurper
|
||||
Future
|
||||
GetoptLong
|
||||
HTTPMessage
|
||||
IOAsync
|
||||
IOAsyncSSL
|
||||
IOSocketSSL
|
||||
NetSSLeay
|
||||
JSON
|
||||
ListUtilsBy
|
||||
ScalarListUtils
|
||||
ModulePluggable
|
||||
NetAsyncHTTP
|
||||
MetricsAny # required by Net::Async::HTTP
|
||||
NetAsyncHTTPServer
|
||||
StructDumb
|
||||
URI
|
||||
YAMLLibYAML
|
||||
]}";
|
||||
}
|
||||
];
|
||||
};
|
||||
});
|
||||
};
|
||||
outputs = { self, nixpkgs, utils }:
|
||||
utils.lib.eachDefaultSystem (system: let
|
||||
pkgs = nixpkgs.legacyPackages."${system}";
|
||||
in rec {
|
||||
# `nix develop`
|
||||
devShell = pkgs.mkShell {
|
||||
nativeBuildInputs = with pkgs; [ rustc cargo python sqlite poetry postgresql icu clang ];
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
51
mypy.ini
51
mypy.ini
@@ -2,32 +2,17 @@
|
||||
namespace_packages = True
|
||||
plugins = pydantic.mypy, mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
|
||||
follow_imports = normal
|
||||
check_untyped_defs = True
|
||||
show_error_codes = True
|
||||
show_traceback = True
|
||||
mypy_path = stubs
|
||||
warn_unreachable = True
|
||||
warn_unused_ignores = True
|
||||
local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
|
||||
# Strict checks, see mypy --help
|
||||
warn_unused_configs = True
|
||||
# disallow_any_generics = True
|
||||
disallow_subclassing_any = True
|
||||
# disallow_untyped_calls = True
|
||||
disallow_untyped_defs = True
|
||||
disallow_incomplete_defs = True
|
||||
# check_untyped_defs = True
|
||||
# disallow_untyped_decorators = True
|
||||
warn_redundant_casts = True
|
||||
warn_unused_ignores = True
|
||||
# warn_return_any = True
|
||||
# no_implicit_reexport = True
|
||||
strict_equality = True
|
||||
strict_concatenate = True
|
||||
|
||||
# Run mypy type checking with the minimum supported Python version to catch new usage
|
||||
# that isn't backwards-compatible (types, overloads, etc).
|
||||
python_version = 3.8
|
||||
warn_redundant_casts = True
|
||||
|
||||
files =
|
||||
docker/,
|
||||
@@ -36,14 +21,35 @@ files =
|
||||
tests/,
|
||||
build_rust.py
|
||||
|
||||
# Note: Better exclusion syntax coming in mypy > 0.910
|
||||
# https://github.com/python/mypy/pull/11329
|
||||
#
|
||||
# For now, set the (?x) flag enable "verbose" regexes
|
||||
# https://docs.python.org/3/library/re.html#re.X
|
||||
exclude = (?x)
|
||||
^(
|
||||
|synapse/storage/databases/__init__.py
|
||||
|synapse/storage/databases/main/cache.py
|
||||
|synapse/storage/schema/
|
||||
)$
|
||||
|
||||
[mypy-synapse.federation.transport.client]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.http.matrixfederationclient]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.metrics._reactor_metrics]
|
||||
disallow_untyped_defs = False
|
||||
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
|
||||
# See https://github.com/matrix-org/synapse/pull/11771.
|
||||
warn_unused_ignores = False
|
||||
|
||||
[mypy-synapse.util.caches.treecache]
|
||||
disallow_untyped_defs = False
|
||||
disallow_incomplete_defs = False
|
||||
|
||||
[mypy-tests.util.caches.test_descriptors]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
@@ -53,18 +59,18 @@ disallow_incomplete_defs = False
|
||||
;; which we can pull in as a dev dependency by adding to `pyproject.toml`'s
|
||||
;; `[tool.poetry.dev-dependencies]` list.
|
||||
|
||||
# https://github.com/lepture/authlib/issues/460
|
||||
[mypy-authlib.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-ijson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
# https://github.com/msgpack/msgpack-python/issues/448
|
||||
[mypy-lxml]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-msgpack]
|
||||
ignore_missing_imports = True
|
||||
|
||||
# https://github.com/wolever/parameterized/issues/143
|
||||
[mypy-parameterized.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -86,7 +92,6 @@ ignore_missing_imports = True
|
||||
[mypy-srvlookup.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
# https://github.com/twisted/treq/pull/366
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
|
||||
1446
poetry.lock
generated
1446
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.89.0"
|
||||
version = "1.82.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -147,7 +147,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.8.0"
|
||||
python = "^3.7.1"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
@@ -203,9 +203,11 @@ ijson = ">=3.1.4"
|
||||
matrix-common = "^1.3.0"
|
||||
# We need packaging.requirements.Requirement, added in 16.1.
|
||||
packaging = ">=16.1"
|
||||
# At the time of writing, we only use functions from the version `importlib.metadata`
|
||||
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
|
||||
importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
# This is the most recent version of Pydantic with available on common distros.
|
||||
# We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858)
|
||||
pydantic = "^1.7.4"
|
||||
pydantic = ">=1.7.4"
|
||||
|
||||
# This is for building the rust components during "poetry install", which
|
||||
# currently ignores the `build-system.requires` directive (c.f.
|
||||
@@ -309,10 +311,9 @@ all = [
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
ruff = "0.0.277"
|
||||
ruff = "0.0.259"
|
||||
|
||||
# Typechecking
|
||||
lxml-stubs = ">=0.4.0"
|
||||
mypy = "*"
|
||||
mypy-zope = "*"
|
||||
types-bleach = ">=4.1.0"
|
||||
@@ -367,21 +368,13 @@ furo = ">=2022.12.7,<2024.0.0"
|
||||
# system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"]
|
||||
requires = ["poetry-core>=1.0.0,<=1.5.0", "setuptools_rust>=1.3,<=1.5.2"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
|
||||
# We skip:
|
||||
# - CPython 3.6 and 3.7: EOLed
|
||||
# - PyPy 3.7: we only support Python 3.8+
|
||||
# - musllinux i686: excluded to reduce number of wheels we build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
|
||||
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/14259
|
||||
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
|
||||
@@ -7,7 +7,7 @@ name = "synapse"
|
||||
version = "0.1.0"
|
||||
|
||||
edition = "2021"
|
||||
rust-version = "1.60.0"
|
||||
rust-version = "1.58.1"
|
||||
|
||||
[lib]
|
||||
name = "synapse"
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(test)]
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
use synapse::push::{
|
||||
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue,
|
||||
PushRules, SimpleJsonValue,
|
||||
@@ -195,6 +197,7 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
);
|
||||
|
||||
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
|
||||
|
||||
@@ -57,7 +57,7 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.master"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
actions: Cow::Borrowed(&[Action::DontNotify]),
|
||||
default: true,
|
||||
default_enabled: false,
|
||||
}];
|
||||
@@ -88,7 +88,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
pattern: Cow::Borrowed("m.notice"),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
actions: Cow::Borrowed(&[Action::DontNotify]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
@@ -122,7 +122,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
pattern: Cow::Borrowed("m.room.member"),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
actions: Cow::Borrowed(&[Action::DontNotify]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
@@ -142,11 +142,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.is_user_mention"),
|
||||
rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(
|
||||
KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
|
||||
key: Cow::Borrowed("content.m\\.mentions.user_ids"),
|
||||
key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.user_ids"),
|
||||
value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
|
||||
}),
|
||||
)]),
|
||||
@@ -163,11 +163,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.is_room_mention"),
|
||||
rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
|
||||
key: Cow::Borrowed("content.m\\.mentions.room"),
|
||||
key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.room"),
|
||||
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
|
||||
})),
|
||||
Condition::Known(KnownCondition::SenderNotificationPermission {
|
||||
|
||||
@@ -70,9 +70,7 @@ pub struct PushRuleEvaluator {
|
||||
/// The "content.body", if any.
|
||||
body: String,
|
||||
|
||||
/// True if the event has a m.mentions property. (Note that this is a separate
|
||||
/// flag instead of checking flattened_keys since the m.mentions property
|
||||
/// might be an empty map and not appear in flattened_keys.
|
||||
/// True if the event has a mentions property and MSC3952 support is enabled.
|
||||
has_mentions: bool,
|
||||
|
||||
/// The number of users in the room.
|
||||
@@ -142,7 +140,7 @@ impl PushRuleEvaluator {
|
||||
/// name.
|
||||
///
|
||||
/// Returns the set of actions, if any, that match (filtering out any
|
||||
/// `dont_notify` and `coalesce` actions).
|
||||
/// `dont_notify` actions).
|
||||
pub fn run(
|
||||
&self,
|
||||
push_rules: &FilteredPushRules,
|
||||
@@ -157,7 +155,9 @@ impl PushRuleEvaluator {
|
||||
let rule_id = &push_rule.rule_id().to_string();
|
||||
|
||||
// For backwards-compatibility the legacy mention rules are disabled
|
||||
// if the event contains the 'm.mentions' property.
|
||||
// if the event contains the 'm.mentions' property (and if the
|
||||
// experimental feature is enabled, both of these are represented
|
||||
// by the has_mentions flag).
|
||||
if self.has_mentions
|
||||
&& (rule_id == "global/override/.m.rule.contains_display_name"
|
||||
|| rule_id == "global/content/.m.rule.contains_user_name"
|
||||
@@ -198,9 +198,8 @@ impl PushRuleEvaluator {
|
||||
let actions = push_rule
|
||||
.actions
|
||||
.iter()
|
||||
// Filter out "dont_notify" and "coalesce" actions, as we don't store them
|
||||
// (since they result in no action by the pushers).
|
||||
.filter(|a| **a != Action::DontNotify && **a != Action::Coalesce)
|
||||
// Filter out "dont_notify" actions, as we don't store them.
|
||||
.filter(|a| **a != Action::DontNotify)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
@@ -562,7 +561,7 @@ fn test_requires_room_version_supports_condition() {
|
||||
};
|
||||
let rules = PushRules::new(vec![custom_rule]);
|
||||
result = evaluator.run(
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false),
|
||||
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
@@ -164,12 +164,10 @@ impl PushRule {
|
||||
/// The "action" Synapse should perform for a matching push rule.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Action {
|
||||
Notify,
|
||||
SetTweak(SetTweak),
|
||||
|
||||
// Legacy actions that should be understood, but are equivalent to no-ops.
|
||||
DontNotify,
|
||||
Notify,
|
||||
Coalesce,
|
||||
SetTweak(SetTweak),
|
||||
|
||||
// An unrecognized custom action.
|
||||
Unknown(Value),
|
||||
@@ -527,6 +525,7 @@ pub struct FilteredPushRules {
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
msc3952_intentional_mentions: bool,
|
||||
msc3958_suppress_edits_enabled: bool,
|
||||
}
|
||||
|
||||
@@ -539,6 +538,7 @@ impl FilteredPushRules {
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
msc3952_intentional_mentions: bool,
|
||||
msc3958_suppress_edits_enabled: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
@@ -547,6 +547,7 @@ impl FilteredPushRules {
|
||||
msc1767_enabled,
|
||||
msc3381_polls_enabled,
|
||||
msc3664_enabled,
|
||||
msc3952_intentional_mentions,
|
||||
msc3958_suppress_edits_enabled,
|
||||
}
|
||||
}
|
||||
@@ -567,10 +568,7 @@ impl FilteredPushRules {
|
||||
.filter(|rule| {
|
||||
// Ignore disabled experimental push rules
|
||||
|
||||
if !self.msc1767_enabled
|
||||
&& (rule.rule_id.contains("org.matrix.msc1767")
|
||||
|| rule.rule_id.contains("org.matrix.msc3933"))
|
||||
{
|
||||
if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -584,6 +582,10 @@ impl FilteredPushRules {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.msc3952_intentional_mentions && rule.rule_id.contains("org.matrix.msc3952")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if !self.msc3958_suppress_edits_enabled
|
||||
&& rule.rule_id == "global/override/.com.beeper.suppress_edits"
|
||||
{
|
||||
|
||||
@@ -20,21 +20,15 @@ from concurrent.futures import ThreadPoolExecutor
|
||||
from types import FrameType
|
||||
from typing import Collection, Optional, Sequence, Set
|
||||
|
||||
# These are expanded inside the dockerfile to be a fully qualified image name.
|
||||
# e.g. docker.io/library/debian:bullseye
|
||||
#
|
||||
# If an EOL is forced by a Python version and we're dropping support for it, make sure
|
||||
# to remove references to the distibution across Synapse (search for "bullseye" for
|
||||
# example)
|
||||
DISTS = (
|
||||
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
|
||||
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:trixie", # (EOL not specified yet)
|
||||
"debian:buster", # oldstable: EOL 2022-08
|
||||
"debian:bullseye",
|
||||
"debian:bookworm",
|
||||
"debian:sid",
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
||||
@@ -40,32 +40,10 @@ def main(force_colors: bool) -> None:
|
||||
exec(r, locals)
|
||||
current_schema_version = locals["SCHEMA_VERSION"]
|
||||
|
||||
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
|
||||
|
||||
# Get the schema version of the local file to check against current schema on develop
|
||||
with open("synapse/storage/schema/__init__.py", "r") as file:
|
||||
local_schema = file.read()
|
||||
new_locals: Dict[str, Any] = {}
|
||||
exec(local_schema, new_locals)
|
||||
local_schema_version = new_locals["SCHEMA_VERSION"]
|
||||
|
||||
if local_schema_version != current_schema_version:
|
||||
# local schema version must be +/-1 the current schema version on develop
|
||||
if abs(local_schema_version - current_schema_version) != 1:
|
||||
click.secho(
|
||||
"The proposed schema version has diverged more than one version from develop, please fix!",
|
||||
fg="red",
|
||||
bold=True,
|
||||
color=force_colors,
|
||||
)
|
||||
click.get_current_context().exit(1)
|
||||
|
||||
# right, we've changed the schema version within the allowable tolerance so
|
||||
# let's now use the local version as the canonical version
|
||||
current_schema_version = local_schema_version
|
||||
|
||||
click.secho(f"Current schema version: {current_schema_version}")
|
||||
|
||||
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
|
||||
|
||||
seen_deltas = False
|
||||
bad_files = []
|
||||
for diff in diffs:
|
||||
|
||||
@@ -11,11 +11,6 @@
|
||||
# filepath of a local Complement checkout or by setting the COMPLEMENT_REF
|
||||
# environment variable to pull a different branch or commit.
|
||||
#
|
||||
# To use the 'podman' command instead 'docker', set the PODMAN environment
|
||||
# variable. Example:
|
||||
#
|
||||
# PODMAN=1 ./complement.sh
|
||||
#
|
||||
# By default Synapse is run in monolith mode. This can be overridden by
|
||||
# setting the WORKERS environment variable.
|
||||
#
|
||||
@@ -35,6 +30,7 @@
|
||||
# Exit if a line returns a non-zero exit code
|
||||
set -e
|
||||
|
||||
|
||||
# Helper to emit annotations that collapse portions of the log in GitHub Actions
|
||||
echo_if_github() {
|
||||
if [[ -n "$GITHUB_WORKFLOW" ]]; then
|
||||
@@ -104,16 +100,6 @@ done
|
||||
# enable buildkit for the docker builds
|
||||
export DOCKER_BUILDKIT=1
|
||||
|
||||
# Determine whether to use the docker or podman container runtime.
|
||||
if [ -n "$PODMAN" ]; then
|
||||
export CONTAINER_RUNTIME=podman
|
||||
export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock
|
||||
export BUILDAH_FORMAT=docker
|
||||
export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal
|
||||
else
|
||||
export CONTAINER_RUNTIME=docker
|
||||
fi
|
||||
|
||||
# Change to the repository root
|
||||
cd "$(dirname $0)/.."
|
||||
|
||||
@@ -140,16 +126,16 @@ if [ -n "$use_editable_synapse" ]; then
|
||||
editable_mount="$(realpath .):/editable-src:z"
|
||||
if [ -n "$rebuild_editable_synapse" ]; then
|
||||
unset skip_docker_build
|
||||
elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then
|
||||
elif docker inspect complement-synapse-editable &>/dev/null; then
|
||||
# complement-synapse-editable already exists: see if we can still use it:
|
||||
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
|
||||
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
|
||||
|
||||
# First set up the module in the right place for an editable installation.
|
||||
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
|
||||
if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
|
||||
&& $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
|
||||
if (docker run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
|
||||
&& docker run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
|
||||
skip_docker_build=1
|
||||
else
|
||||
echo "Editable Synapse image is stale. Will rebuild."
|
||||
@@ -163,25 +149,25 @@ if [ -z "$skip_docker_build" ]; then
|
||||
|
||||
# Build a special image designed for use in development with editable
|
||||
# installs.
|
||||
$CONTAINER_RUNTIME build -t synapse-editable \
|
||||
docker build -t synapse-editable \
|
||||
-f "docker/editable.Dockerfile" .
|
||||
|
||||
$CONTAINER_RUNTIME build -t synapse-workers-editable \
|
||||
docker build -t synapse-workers-editable \
|
||||
--build-arg FROM=synapse-editable \
|
||||
-f "docker/Dockerfile-workers" .
|
||||
|
||||
$CONTAINER_RUNTIME build -t complement-synapse-editable \
|
||||
docker build -t complement-synapse-editable \
|
||||
--build-arg FROM=synapse-workers-editable \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
|
||||
# Prepare the Rust module
|
||||
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
|
||||
|
||||
else
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
$CONTAINER_RUNTIME build -t matrixdotorg/synapse \
|
||||
docker build -t matrixdotorg/synapse \
|
||||
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
|
||||
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
|
||||
-f "docker/Dockerfile" .
|
||||
@@ -189,12 +175,12 @@ if [ -z "$skip_docker_build" ]; then
|
||||
|
||||
# Build the workers docker image (from the base Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
|
||||
$CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
$CONTAINER_RUNTIME build -t complement-synapse \
|
||||
docker build -t complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
@@ -214,7 +200,7 @@ fi
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc3874,msc3890,msc3391,msc3930,faster_joins"
|
||||
test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins"
|
||||
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
@@ -246,6 +232,10 @@ else
|
||||
else
|
||||
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
|
||||
fi
|
||||
|
||||
# The tests for importing historical messages (MSC2716)
|
||||
# only pass with monoliths, currently.
|
||||
test_tags="$test_tags,msc2716"
|
||||
fi
|
||||
|
||||
if [[ -n "$ASYNCIO_REACTOR" ]]; then
|
||||
@@ -253,10 +243,6 @@ if [[ -n "$ASYNCIO_REACTOR" ]]; then
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
|
||||
fi
|
||||
|
||||
if [[ -n "$UNIX_SOCKETS" ]]; then
|
||||
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
|
||||
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
|
||||
fi
|
||||
|
||||
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
# Set the log level to what is desired
|
||||
@@ -269,10 +255,6 @@ if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
export PASS_SYNAPSE_LOG_SENSITIVE=1
|
||||
fi
|
||||
|
||||
# Log a few more useful things for a developer attempting to debug something
|
||||
# particularly tricky.
|
||||
export PASS_SYNAPSE_LOG_TESTING=1
|
||||
|
||||
# Run the tests!
|
||||
echo "Images built; running complement"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
@@ -136,11 +136,11 @@ def request(
|
||||
authorization_headers.append(header)
|
||||
print("Authorization: %s" % header, file=sys.stderr)
|
||||
|
||||
dest = "matrix-federation://%s%s" % (destination, path)
|
||||
dest = "matrix://%s%s" % (destination, path)
|
||||
print("Requesting %s" % dest, file=sys.stderr)
|
||||
|
||||
s = requests.Session()
|
||||
s.mount("matrix-federation://", MatrixConnectionAdapter())
|
||||
s.mount("matrix://", MatrixConnectionAdapter())
|
||||
|
||||
headers: Dict[str, str] = {
|
||||
"Authorization": authorization_headers[0],
|
||||
|
||||
@@ -18,11 +18,10 @@ can crop up, e.g the cache descriptors.
|
||||
|
||||
from typing import Callable, Optional, Type
|
||||
|
||||
from mypy.erasetype import remove_instance_last_known_values
|
||||
from mypy.nodes import ARG_NAMED_OPT
|
||||
from mypy.plugin import MethodSigContext, Plugin
|
||||
from mypy.typeops import bind_self
|
||||
from mypy.types import CallableType, Instance, NoneType, UnionType
|
||||
from mypy.types import CallableType, NoneType, UnionType
|
||||
|
||||
|
||||
class SynapsePlugin(Plugin):
|
||||
@@ -93,41 +92,10 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
||||
arg_names.append("on_invalidate")
|
||||
arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg.
|
||||
|
||||
# Finally we ensure the return type is a Deferred.
|
||||
if (
|
||||
isinstance(signature.ret_type, Instance)
|
||||
and signature.ret_type.type.fullname == "twisted.internet.defer.Deferred"
|
||||
):
|
||||
# If it is already a Deferred, nothing to do.
|
||||
ret_type = signature.ret_type
|
||||
else:
|
||||
ret_arg = None
|
||||
if isinstance(signature.ret_type, Instance):
|
||||
# If a coroutine, wrap the coroutine's return type in a Deferred.
|
||||
if signature.ret_type.type.fullname == "typing.Coroutine":
|
||||
ret_arg = signature.ret_type.args[2]
|
||||
|
||||
# If an awaitable, wrap the awaitable's final value in a Deferred.
|
||||
elif signature.ret_type.type.fullname == "typing.Awaitable":
|
||||
ret_arg = signature.ret_type.args[0]
|
||||
|
||||
# Otherwise, wrap the return value in a Deferred.
|
||||
if ret_arg is None:
|
||||
ret_arg = signature.ret_type
|
||||
|
||||
# This should be able to use ctx.api.named_generic_type, but that doesn't seem
|
||||
# to find the correct symbol for anything more than 1 module deep.
|
||||
#
|
||||
# modules is not part of CheckerPluginInterface. The following is a combination
|
||||
# of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo.
|
||||
sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined]
|
||||
ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)])
|
||||
|
||||
signature = signature.copy_modified(
|
||||
arg_types=arg_types,
|
||||
arg_names=arg_names,
|
||||
arg_kinds=arg_kinds,
|
||||
ret_type=ret_type,
|
||||
)
|
||||
|
||||
return signature
|
||||
|
||||
@@ -27,7 +27,7 @@ import time
|
||||
import urllib.request
|
||||
from os import path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Any, List, Match, Optional, Union
|
||||
from typing import Any, List, Optional
|
||||
|
||||
import attr
|
||||
import click
|
||||
@@ -233,7 +233,7 @@ def _prepare() -> None:
|
||||
subprocess.check_output(["poetry", "version", new_version])
|
||||
|
||||
# Generate changelogs.
|
||||
generate_and_write_changelog(synapse_repo, current_version, new_version)
|
||||
generate_and_write_changelog(current_version, new_version)
|
||||
|
||||
# Generate debian changelogs
|
||||
if parsed_new_version.pre is not None:
|
||||
@@ -814,7 +814,7 @@ def get_changes_for_version(wanted_version: version.Version) -> str:
|
||||
|
||||
|
||||
def generate_and_write_changelog(
|
||||
repo: Repo, current_version: version.Version, new_version: str
|
||||
current_version: version.Version, new_version: str
|
||||
) -> None:
|
||||
# We do this by getting a draft so that we can edit it before writing to the
|
||||
# changelog.
|
||||
@@ -827,10 +827,6 @@ def generate_and_write_changelog(
|
||||
new_changes = new_changes.replace(
|
||||
"No significant changes.", f"No significant changes since {current_version}."
|
||||
)
|
||||
new_changes += build_dependabot_changelog(
|
||||
repo,
|
||||
current_version,
|
||||
)
|
||||
|
||||
# Prepend changes to changelog
|
||||
with open("CHANGES.md", "r+") as f:
|
||||
@@ -845,49 +841,5 @@ def generate_and_write_changelog(
|
||||
os.remove(filename)
|
||||
|
||||
|
||||
def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> str:
|
||||
"""Summarise dependabot commits between `current_version` and `release_branch`.
|
||||
|
||||
Returns an empty string if there have been no such commits; otherwise outputs a
|
||||
third-level markdown header followed by an unordered list."""
|
||||
last_release_commit = repo.tag("v" + str(current_version)).commit
|
||||
rev_spec = f"{last_release_commit.hexsha}.."
|
||||
commits = list(git.objects.Commit.iter_items(repo, rev_spec))
|
||||
messages = []
|
||||
for commit in reversed(commits):
|
||||
if commit.author.name == "dependabot[bot]":
|
||||
message: Union[str, bytes] = commit.message
|
||||
if isinstance(message, bytes):
|
||||
message = message.decode("utf-8")
|
||||
messages.append(message.split("\n", maxsplit=1)[0])
|
||||
|
||||
if not messages:
|
||||
print(f"No dependabot commits in range {rev_spec}", file=sys.stderr)
|
||||
return ""
|
||||
|
||||
messages.sort()
|
||||
|
||||
def replacer(match: Match[str]) -> str:
|
||||
desc = match.group(1)
|
||||
number = match.group(2)
|
||||
return f"* {desc}. ([\\#{number}](https://github.com/matrix-org/synapse/issues/{number}))"
|
||||
|
||||
for i, message in enumerate(messages):
|
||||
messages[i] = re.sub(r"(.*) \(#(\d+)\)$", replacer, message)
|
||||
messages.insert(0, "### Updates to locked dependencies\n")
|
||||
# Add an extra blank line to the bottom of the section
|
||||
messages.append("")
|
||||
return "\n".join(messages)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("since")
|
||||
def test_dependabot_changelog(since: str) -> None:
|
||||
"""Test building the dependabot changelog.
|
||||
|
||||
Summarises all dependabot commits between the SINCE tag and the current git HEAD."""
|
||||
print(build_dependabot_changelog(git.Repo("."), version.Version(since)))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
|
||||
@@ -46,6 +46,7 @@ class FilteredPushRules:
|
||||
msc1767_enabled: bool,
|
||||
msc3381_polls_enabled: bool,
|
||||
msc3664_enabled: bool,
|
||||
msc3952_intentional_mentions: bool,
|
||||
msc3958_suppress_edits_enabled: bool,
|
||||
): ...
|
||||
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
|
||||
|
||||
@@ -61,9 +61,6 @@ def lazyConnection(
|
||||
# most methods to it via ConnectionHandler.__getattr__.
|
||||
class ConnectionHandler(RedisProtocol):
|
||||
def disconnect(self) -> "Deferred[None]": ...
|
||||
def __repr__(self) -> str: ...
|
||||
|
||||
class UnixConnectionHandler(ConnectionHandler): ...
|
||||
|
||||
class RedisFactory(protocol.ReconnectingClientFactory):
|
||||
continueTrying: bool
|
||||
|
||||
@@ -25,8 +25,8 @@ from synapse.util.rust import check_rust_lib_up_to_date
|
||||
from synapse.util.stringutils import strtobool
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 8):
|
||||
print("Synapse requires Python 3.8 or above.")
|
||||
if sys.version_info < (3, 7):
|
||||
print("Synapse requires Python 3.7 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
|
||||
@@ -54,14 +54,13 @@ from synapse.logging.context import (
|
||||
)
|
||||
from synapse.notifier import ReplicationNotifier
|
||||
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
|
||||
from synapse.storage.databases.main import FilteringWorkerStore, PushRuleStore
|
||||
from synapse.storage.databases.main import PushRuleStore
|
||||
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
|
||||
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
EventsBackgroundUpdatesStore,
|
||||
@@ -70,7 +69,6 @@ from synapse.storage.databases.main.media_repository import (
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.profile import ProfileWorkerStore
|
||||
from synapse.storage.databases.main.pusher import (
|
||||
PusherBackgroundUpdatesStore,
|
||||
PusherWorkerStore,
|
||||
@@ -126,7 +124,6 @@ BOOLEAN_COLUMNS = {
|
||||
"users": ["shadow_banned", "approved"],
|
||||
"un_partial_stated_event_stream": ["rejection_status_changed"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
}
|
||||
|
||||
|
||||
@@ -197,11 +194,6 @@ IGNORED_TABLES = {
|
||||
"ui_auth_sessions",
|
||||
"ui_auth_sessions_credentials",
|
||||
"ui_auth_sessions_ips",
|
||||
# Ignore the worker locks table, as a) there shouldn't be any acquired locks
|
||||
# after porting, and b) the circular foreign key constraints make it hard to
|
||||
# port.
|
||||
"worker_read_write_locks_mode",
|
||||
"worker_read_write_locks",
|
||||
}
|
||||
|
||||
|
||||
@@ -237,15 +229,12 @@ class Store(
|
||||
EndToEndRoomKeyBackgroundStore,
|
||||
StatsStore,
|
||||
AccountDataWorkerStore,
|
||||
FilteringWorkerStore,
|
||||
ProfileWorkerStore,
|
||||
PushRuleStore,
|
||||
PusherWorkerStore,
|
||||
PusherBackgroundUpdatesStore,
|
||||
PresenceBackgroundUpdateStore,
|
||||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
@@ -810,9 +799,7 @@ class Porter:
|
||||
)
|
||||
# Map from table name to args passed to `handle_table`, i.e. a tuple
|
||||
# of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`.
|
||||
tables_to_port_info_map = {
|
||||
r[0]: r[1:] for r in setup_res if r[0] not in IGNORED_TABLES
|
||||
}
|
||||
tables_to_port_info_map = {r[0]: r[1:] for r in setup_res}
|
||||
|
||||
# Step 5. Do the copying.
|
||||
#
|
||||
@@ -1378,9 +1365,6 @@ def main() -> None:
|
||||
sys.stderr.write("Database must use the 'psycopg2' connector.\n")
|
||||
sys.exit(3)
|
||||
|
||||
# Don't run the background tasks that get started by the data stores.
|
||||
hs_config["run_background_tasks_on"] = "some_other_process"
|
||||
|
||||
config = HomeServerConfig()
|
||||
config.parse_config_dict(hs_config, "", "")
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2023 The Matrix.org Foundation.
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,6 +14,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
|
||||
import pymacaroons
|
||||
from netaddr import IPAddress
|
||||
|
||||
from twisted.web.server import Request
|
||||
@@ -23,11 +24,19 @@ from synapse.api.constants import EventTypes, HistoryVisibility, Membership
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
UnstableSpecAuthError,
|
||||
)
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.http import get_request_user_agent
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import (
|
||||
active_span,
|
||||
force_tracing,
|
||||
start_active_span,
|
||||
trace,
|
||||
)
|
||||
from synapse.types import Requester, create_requester
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
@@ -37,13 +46,26 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseAuth:
|
||||
"""Common base class for all auth implementations."""
|
||||
# guests always get this device id.
|
||||
GUEST_DEVICE_ID = "guest_device"
|
||||
|
||||
|
||||
class Auth:
|
||||
"""
|
||||
This class contains functions for authenticating users of our client-server API.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastores().main
|
||||
self._account_validity_handler = hs.get_account_validity_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._macaroon_generator = hs.get_macaroon_generator()
|
||||
|
||||
self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
|
||||
self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips
|
||||
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
||||
|
||||
async def check_user_in_room(
|
||||
self,
|
||||
@@ -97,49 +119,139 @@ class BaseAuth:
|
||||
errcode=Codes.NOT_JOINED,
|
||||
)
|
||||
|
||||
@trace
|
||||
async def check_user_in_room_or_world_readable(
|
||||
self, room_id: str, requester: Requester, allow_departed_users: bool = False
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""Checks that the user is or was in the room or the room is world
|
||||
readable. If it isn't then an exception is raised.
|
||||
@cancellable
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
room_id: room to check
|
||||
user_id: user to check
|
||||
allow_departed_users: if True, accept users that were previously
|
||||
members but have now departed
|
||||
request: An HTTP request with an access_token query parameter.
|
||||
allow_guest: If False, will raise an AuthError if the user making the
|
||||
request is a guest.
|
||||
allow_expired: If True, allow the request through even if the account
|
||||
is expired, or session token lifetime has ended. Note that
|
||||
/login will deliver access tokens regardless of expiration.
|
||||
|
||||
Returns:
|
||||
Resolves to the current membership of the user in the room and the
|
||||
membership event ID of the user. If the user is not in the room and
|
||||
never has been, then `(Membership.JOIN, None)` is returned.
|
||||
Resolves to the requester
|
||||
Raises:
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
parent_span = active_span()
|
||||
with start_active_span("get_user_by_req"):
|
||||
requester = await self._wrapped_get_user_by_req(
|
||||
request, allow_guest, allow_expired
|
||||
)
|
||||
|
||||
if parent_span:
|
||||
if requester.authenticated_entity in self._force_tracing_for_users:
|
||||
# request tracing is enabled for this user, so we need to force it
|
||||
# tracing on for the parent span (which will be the servlet span).
|
||||
#
|
||||
# It's too late for the get_user_by_req span to inherit the setting,
|
||||
# so we also force it on for that.
|
||||
force_tracing()
|
||||
force_tracing(parent_span)
|
||||
parent_span.set_tag(
|
||||
"authenticated_entity", requester.authenticated_entity
|
||||
)
|
||||
parent_span.set_tag("user_id", requester.user.to_string())
|
||||
if requester.device_id is not None:
|
||||
parent_span.set_tag("device_id", requester.device_id)
|
||||
if requester.app_service is not None:
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
@cancellable
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool,
|
||||
allow_expired: bool,
|
||||
) -> Requester:
|
||||
"""Helper for get_user_by_req
|
||||
|
||||
Once get_user_by_req has set up the opentracing span, this does the actual work.
|
||||
"""
|
||||
try:
|
||||
# check_user_in_room will return the most recent membership
|
||||
# event for the user if:
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
return await self.check_user_in_room(
|
||||
room_id, requester, allow_departed_users=allow_departed_users
|
||||
)
|
||||
except AuthError:
|
||||
visibility = await self._storage_controllers.state.get_current_state_event(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
if (
|
||||
visibility
|
||||
and visibility.content.get("history_visibility")
|
||||
== HistoryVisibility.WORLD_READABLE
|
||||
ip_addr = request.getClientAddress().host
|
||||
user_agent = get_request_user_agent(request)
|
||||
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
# First check if it could be a request from an appservice
|
||||
requester = await self._get_appservice_user(request)
|
||||
if not requester:
|
||||
# If not, it should be from a regular user
|
||||
requester = await self.get_user_by_access_token(
|
||||
access_token, allow_expired=allow_expired
|
||||
)
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
# This check is only done for regular users, not appservice ones.
|
||||
if not allow_expired:
|
||||
if await self._account_validity_handler.is_user_expired(
|
||||
requester.user.to_string()
|
||||
):
|
||||
# Raise the error if either an account validity module has determined
|
||||
# the account has expired, or the legacy account validity
|
||||
# implementation is enabled and determined the account has expired
|
||||
raise AuthError(
|
||||
403,
|
||||
"User account has expired",
|
||||
errcode=Codes.EXPIRED_ACCOUNT,
|
||||
)
|
||||
|
||||
if ip_addr and (
|
||||
not requester.app_service or self._track_appservice_user_ips
|
||||
):
|
||||
return Membership.JOIN, None
|
||||
raise AuthError(
|
||||
403,
|
||||
"User %r not in room %s, and room previews are disabled"
|
||||
% (requester.user, room_id),
|
||||
)
|
||||
# XXX(quenting): I'm 95% confident that we could skip setting the
|
||||
# device_id to "dummy-device" for appservices, and that the only impact
|
||||
# would be some rows which whould not deduplicate in the 'user_ips'
|
||||
# table during the transition
|
||||
recorded_device_id = (
|
||||
"dummy-device"
|
||||
if requester.device_id is None and requester.app_service is not None
|
||||
else requester.device_id
|
||||
)
|
||||
await self.store.insert_client_ip(
|
||||
user_id=requester.authenticated_entity,
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
user_agent=user_agent,
|
||||
device_id=recorded_device_id,
|
||||
)
|
||||
|
||||
# Track also the puppeted user client IP if enabled and the user is puppeting
|
||||
if (
|
||||
requester.user.to_string() != requester.authenticated_entity
|
||||
and self._track_puppeted_user_ips
|
||||
):
|
||||
await self.store.insert_client_ip(
|
||||
user_id=requester.user.to_string(),
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
user_agent=user_agent,
|
||||
device_id=requester.device_id,
|
||||
)
|
||||
|
||||
if requester.is_guest and not allow_guest:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Guest access not allowed",
|
||||
errcode=Codes.GUEST_ACCESS_FORBIDDEN,
|
||||
)
|
||||
|
||||
request.requester = requester
|
||||
return requester
|
||||
except KeyError:
|
||||
raise MissingClientTokenError()
|
||||
|
||||
async def validate_appservice_can_control_user_id(
|
||||
self, app_service: ApplicationService, user_id: str
|
||||
@@ -172,16 +284,184 @@ class BaseAuth:
|
||||
403, "Application service has not registered this user (%s)" % user_id
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def _get_appservice_user(self, request: Request) -> Optional[Requester]:
|
||||
"""
|
||||
Given a request, reads the request parameters to determine:
|
||||
- whether it's an application service that's making this request
|
||||
- what user the application service should be treated as controlling
|
||||
(the user_id URI parameter allows an application service to masquerade
|
||||
any applicable user in its namespace)
|
||||
- what device the application service should be treated as controlling
|
||||
(the device_id[^1] URI parameter allows an application service to masquerade
|
||||
as any device that exists for the relevant user)
|
||||
|
||||
[^1] Unstable and provided by MSC3202.
|
||||
Must use `org.matrix.msc3202.device_id` in place of `device_id` for now.
|
||||
|
||||
Returns:
|
||||
the application service `Requester` of that request
|
||||
|
||||
Postconditions:
|
||||
- The `app_service` field in the returned `Requester` is set
|
||||
- The `user_id` field in the returned `Requester` is either the application
|
||||
service sender or the controlled user set by the `user_id` URI parameter
|
||||
- The returned application service is permitted to control the returned user ID.
|
||||
- The returned device ID, if present, has been checked to be a valid device ID
|
||||
for the returned user ID.
|
||||
"""
|
||||
DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id"
|
||||
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
self.get_access_token_from_request(request)
|
||||
)
|
||||
if app_service is None:
|
||||
return None
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(request.getClientAddress().host)
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
return None
|
||||
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
if b"user_id" in request.args:
|
||||
effective_user_id = request.args[b"user_id"][0].decode("utf8")
|
||||
await self.validate_appservice_can_control_user_id(
|
||||
app_service, effective_user_id
|
||||
)
|
||||
else:
|
||||
effective_user_id = app_service.sender
|
||||
|
||||
effective_device_id: Optional[str] = None
|
||||
|
||||
if (
|
||||
self.hs.config.experimental.msc3202_device_masquerading_enabled
|
||||
and DEVICE_ID_ARG_NAME in request.args
|
||||
):
|
||||
effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8")
|
||||
# We only just set this so it can't be None!
|
||||
assert effective_device_id is not None
|
||||
device_opt = await self.store.get_device(
|
||||
effective_user_id, effective_device_id
|
||||
)
|
||||
if device_opt is None:
|
||||
# For now, use 400 M_EXCLUSIVE if the device doesn't exist.
|
||||
# This is an open thread of discussion on MSC3202 as of 2021-12-09.
|
||||
raise AuthError(
|
||||
400,
|
||||
f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})",
|
||||
Codes.EXCLUSIVE,
|
||||
)
|
||||
|
||||
return create_requester(
|
||||
effective_user_id, app_service=app_service, device_id=effective_device_id
|
||||
)
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
token: str,
|
||||
allow_expired: bool = False,
|
||||
) -> Requester:
|
||||
"""Validate access token and get user_id from it
|
||||
|
||||
Args:
|
||||
token: The access token to get the user by
|
||||
allow_expired: If False, raises an InvalidClientTokenError
|
||||
if the token is expired
|
||||
|
||||
Raises:
|
||||
InvalidClientTokenError if a user by that token exists, but the token is
|
||||
expired
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid
|
||||
"""
|
||||
|
||||
# First look in the database to see if the access token is present
|
||||
# as an opaque token.
|
||||
user_info = await self.store.get_user_by_access_token(token)
|
||||
if user_info:
|
||||
valid_until_ms = user_info.valid_until_ms
|
||||
if (
|
||||
not allow_expired
|
||||
and valid_until_ms is not None
|
||||
and valid_until_ms < self.clock.time_msec()
|
||||
):
|
||||
# there was a valid access token, but it has expired.
|
||||
# soft-logout the user.
|
||||
raise InvalidClientTokenError(
|
||||
msg="Access token has expired", soft_logout=True
|
||||
)
|
||||
|
||||
# Mark the token as used. This is used to invalidate old refresh
|
||||
# tokens after some time.
|
||||
await self.store.mark_access_token_as_used(user_info.token_id)
|
||||
|
||||
requester = create_requester(
|
||||
user_id=user_info.user_id,
|
||||
access_token_id=user_info.token_id,
|
||||
is_guest=user_info.is_guest,
|
||||
shadow_banned=user_info.shadow_banned,
|
||||
device_id=user_info.device_id,
|
||||
authenticated_entity=user_info.token_owner,
|
||||
)
|
||||
|
||||
return requester
|
||||
|
||||
# If the token isn't found in the database, then it could still be a
|
||||
# macaroon for a guest, so we check that here.
|
||||
try:
|
||||
user_id = self._macaroon_generator.verify_guest_token(token)
|
||||
|
||||
# Guest access tokens are not stored in the database (there can
|
||||
# only be one access token per guest, anyway).
|
||||
#
|
||||
# In order to prevent guest access tokens being used as regular
|
||||
# user access tokens (and hence getting around the invalidation
|
||||
# process), we look up the user id and check that it is indeed
|
||||
# a guest user.
|
||||
#
|
||||
# It would of course be much easier to store guest access
|
||||
# tokens in the database as well, but that would break existing
|
||||
# guest tokens.
|
||||
stored_user = await self.store.get_user_by_id(user_id)
|
||||
if not stored_user:
|
||||
raise InvalidClientTokenError("Unknown user_id %s" % user_id)
|
||||
if not stored_user["is_guest"]:
|
||||
raise InvalidClientTokenError(
|
||||
"Guest access token used for regular user"
|
||||
)
|
||||
|
||||
return create_requester(
|
||||
user_id=user_id,
|
||||
is_guest=True,
|
||||
# all guests get the same device id
|
||||
device_id=GUEST_DEVICE_ID,
|
||||
authenticated_entity=user_id,
|
||||
)
|
||||
except (
|
||||
pymacaroons.exceptions.MacaroonException,
|
||||
TypeError,
|
||||
ValueError,
|
||||
) as e:
|
||||
logger.warning(
|
||||
"Invalid access token in auth: %s %s.",
|
||||
type(e),
|
||||
e,
|
||||
)
|
||||
raise InvalidClientTokenError("Invalid access token passed.")
|
||||
|
||||
async def is_server_admin(self, requester: Requester) -> bool:
|
||||
"""Check if the given user is a local server admin.
|
||||
|
||||
Args:
|
||||
requester: user to check
|
||||
requester: The user making the request, according to the access token.
|
||||
|
||||
Returns:
|
||||
True if the user is an admin
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
return await self.store.is_server_admin(requester.user)
|
||||
|
||||
async def check_can_change_room_list(
|
||||
self, room_id: str, requester: Requester
|
||||
@@ -190,8 +470,8 @@ class BaseAuth:
|
||||
published room list.
|
||||
|
||||
Args:
|
||||
room_id
|
||||
user
|
||||
room_id: The room to check.
|
||||
requester: The user making the request, according to the access token.
|
||||
"""
|
||||
|
||||
is_admin = await self.is_server_admin(requester)
|
||||
@@ -238,6 +518,7 @@ class BaseAuth:
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
@staticmethod
|
||||
@cancellable
|
||||
def get_access_token_from_request(request: Request) -> str:
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
@@ -275,77 +556,47 @@ class BaseAuth:
|
||||
|
||||
return query_params[0].decode("ascii")
|
||||
|
||||
@cancellable
|
||||
async def get_appservice_user(
|
||||
self, request: Request, access_token: str
|
||||
) -> Optional[Requester]:
|
||||
"""
|
||||
Given a request, reads the request parameters to determine:
|
||||
- whether it's an application service that's making this request
|
||||
- what user the application service should be treated as controlling
|
||||
(the user_id URI parameter allows an application service to masquerade
|
||||
any applicable user in its namespace)
|
||||
- what device the application service should be treated as controlling
|
||||
(the device_id[^1] URI parameter allows an application service to masquerade
|
||||
as any device that exists for the relevant user)
|
||||
@trace
|
||||
async def check_user_in_room_or_world_readable(
|
||||
self, room_id: str, requester: Requester, allow_departed_users: bool = False
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""Checks that the user is or was in the room or the room is world
|
||||
readable. If it isn't then an exception is raised.
|
||||
|
||||
[^1] Unstable and provided by MSC3202.
|
||||
Must use `org.matrix.msc3202.device_id` in place of `device_id` for now.
|
||||
Args:
|
||||
room_id: The room to check.
|
||||
requester: The user making the request, according to the access token.
|
||||
allow_departed_users: If True, accept users that were previously
|
||||
members but have now departed.
|
||||
|
||||
Returns:
|
||||
the application service `Requester` of that request
|
||||
|
||||
Postconditions:
|
||||
- The `app_service` field in the returned `Requester` is set
|
||||
- The `user_id` field in the returned `Requester` is either the application
|
||||
service sender or the controlled user set by the `user_id` URI parameter
|
||||
- The returned application service is permitted to control the returned user ID.
|
||||
- The returned device ID, if present, has been checked to be a valid device ID
|
||||
for the returned user ID.
|
||||
Resolves to the current membership of the user in the room and the
|
||||
membership event ID of the user. If the user is not in the room and
|
||||
never has been, then `(Membership.JOIN, None)` is returned.
|
||||
"""
|
||||
DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id"
|
||||
|
||||
app_service = self.store.get_app_service_by_token(access_token)
|
||||
if app_service is None:
|
||||
return None
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(request.getClientAddress().host)
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
return None
|
||||
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
if b"user_id" in request.args:
|
||||
effective_user_id = request.args[b"user_id"][0].decode("utf8")
|
||||
await self.validate_appservice_can_control_user_id(
|
||||
app_service, effective_user_id
|
||||
try:
|
||||
# check_user_in_room will return the most recent membership
|
||||
# event for the user if:
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
return await self.check_user_in_room(
|
||||
room_id, requester, allow_departed_users=allow_departed_users
|
||||
)
|
||||
else:
|
||||
effective_user_id = app_service.sender
|
||||
|
||||
effective_device_id: Optional[str] = None
|
||||
|
||||
if (
|
||||
self.hs.config.experimental.msc3202_device_masquerading_enabled
|
||||
and DEVICE_ID_ARG_NAME in request.args
|
||||
):
|
||||
effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8")
|
||||
# We only just set this so it can't be None!
|
||||
assert effective_device_id is not None
|
||||
device_opt = await self.store.get_device(
|
||||
effective_user_id, effective_device_id
|
||||
except AuthError:
|
||||
visibility = await self._storage_controllers.state.get_current_state_event(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
if (
|
||||
visibility
|
||||
and visibility.content.get("history_visibility")
|
||||
== HistoryVisibility.WORLD_READABLE
|
||||
):
|
||||
return Membership.JOIN, None
|
||||
raise UnstableSpecAuthError(
|
||||
403,
|
||||
"User %s not in room %s, and room previews are disabled"
|
||||
% (requester.user, room_id),
|
||||
errcode=Codes.NOT_JOINED,
|
||||
)
|
||||
if device_opt is None:
|
||||
# For now, use 400 M_EXCLUSIVE if the device doesn't exist.
|
||||
# This is an open thread of discussion on MSC3202 as of 2021-12-09.
|
||||
raise AuthError(
|
||||
400,
|
||||
f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})",
|
||||
Codes.EXCLUSIVE,
|
||||
)
|
||||
|
||||
return create_requester(
|
||||
effective_user_id, app_service=app_service, device_id=effective_device_id
|
||||
)
|
||||
@@ -1,175 +0,0 @@
|
||||
# Copyright 2023 The Matrix.org Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from typing_extensions import Protocol
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import Requester
|
||||
|
||||
# guests always get this device id.
|
||||
GUEST_DEVICE_ID = "guest_device"
|
||||
|
||||
|
||||
class Auth(Protocol):
|
||||
"""The interface that an auth provider must implement."""
|
||||
|
||||
async def check_user_in_room(
|
||||
self,
|
||||
room_id: str,
|
||||
requester: Requester,
|
||||
allow_departed_users: bool = False,
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""Check if the user is in the room, or was at some point.
|
||||
Args:
|
||||
room_id: The room to check.
|
||||
|
||||
user_id: The user to check.
|
||||
|
||||
current_state: Optional map of the current state of the room.
|
||||
If provided then that map is used to check whether they are a
|
||||
member of the room. Otherwise the current membership is
|
||||
loaded from the database.
|
||||
|
||||
allow_departed_users: if True, accept users that were previously
|
||||
members but have now departed.
|
||||
|
||||
Raises:
|
||||
AuthError if the user is/was not in the room.
|
||||
Returns:
|
||||
The current membership of the user in the room and the
|
||||
membership event ID of the user.
|
||||
"""
|
||||
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
request: An HTTP request with an access_token query parameter.
|
||||
allow_guest: If False, will raise an AuthError if the user making the
|
||||
request is a guest.
|
||||
allow_expired: If True, allow the request through even if the account
|
||||
is expired, or session token lifetime has ended. Note that
|
||||
/login will deliver access tokens regardless of expiration.
|
||||
|
||||
Returns:
|
||||
Resolves to the requester
|
||||
Raises:
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
|
||||
async def validate_appservice_can_control_user_id(
|
||||
self, app_service: ApplicationService, user_id: str
|
||||
) -> None:
|
||||
"""Validates that the app service is allowed to control
|
||||
the given user.
|
||||
|
||||
Args:
|
||||
app_service: The app service that controls the user
|
||||
user_id: The author MXID that the app service is controlling
|
||||
|
||||
Raises:
|
||||
AuthError: If the application service is not allowed to control the user
|
||||
(user namespace regex does not match, wrong homeserver, etc)
|
||||
or if the user has not been registered yet.
|
||||
"""
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
token: str,
|
||||
allow_expired: bool = False,
|
||||
) -> Requester:
|
||||
"""Validate access token and get user_id from it
|
||||
|
||||
Args:
|
||||
token: The access token to get the user by
|
||||
allow_expired: If False, raises an InvalidClientTokenError
|
||||
if the token is expired
|
||||
|
||||
Raises:
|
||||
InvalidClientTokenError if a user by that token exists, but the token is
|
||||
expired
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid
|
||||
"""
|
||||
|
||||
async def is_server_admin(self, requester: Requester) -> bool:
|
||||
"""Check if the given user is a local server admin.
|
||||
|
||||
Args:
|
||||
requester: user to check
|
||||
|
||||
Returns:
|
||||
True if the user is an admin
|
||||
"""
|
||||
|
||||
async def check_can_change_room_list(
|
||||
self, room_id: str, requester: Requester
|
||||
) -> bool:
|
||||
"""Determine whether the user is allowed to edit the room's entry in the
|
||||
published room list.
|
||||
|
||||
Args:
|
||||
room_id
|
||||
user
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def has_access_token(request: Request) -> bool:
|
||||
"""Checks if the request has an access_token.
|
||||
|
||||
Returns:
|
||||
False if no access_token was given, True otherwise.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def get_access_token_from_request(request: Request) -> str:
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
Args:
|
||||
request: The http request.
|
||||
Returns:
|
||||
The access_token
|
||||
Raises:
|
||||
MissingClientTokenError: If there isn't a single access_token in the
|
||||
request
|
||||
"""
|
||||
|
||||
async def check_user_in_room_or_world_readable(
|
||||
self, room_id: str, requester: Requester, allow_departed_users: bool = False
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""Checks that the user is or was in the room or the room is world
|
||||
readable. If it isn't then an exception is raised.
|
||||
|
||||
Args:
|
||||
room_id: room to check
|
||||
user_id: user to check
|
||||
allow_departed_users: if True, accept users that were previously
|
||||
members but have now departed
|
||||
|
||||
Returns:
|
||||
Resolves to the current membership of the user in the room and the
|
||||
membership event ID of the user. If the user is not in the room and
|
||||
never has been, then `(Membership.JOIN, None)` is returned.
|
||||
"""
|
||||
@@ -1,291 +0,0 @@
|
||||
# Copyright 2023 The Matrix.org Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pymacaroons
|
||||
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
)
|
||||
from synapse.http import get_request_user_agent
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||
from synapse.types import Requester, create_requester
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
from . import GUEST_DEVICE_ID
|
||||
from .base import BaseAuth
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InternalAuth(BaseAuth):
|
||||
"""
|
||||
This class contains functions for authenticating users of our client-server API.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
self._account_validity_handler = hs.get_account_validity_handler()
|
||||
self._macaroon_generator = hs.get_macaroon_generator()
|
||||
|
||||
self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
|
||||
self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips
|
||||
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
||||
|
||||
@cancellable
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
request: An HTTP request with an access_token query parameter.
|
||||
allow_guest: If False, will raise an AuthError if the user making the
|
||||
request is a guest.
|
||||
allow_expired: If True, allow the request through even if the account
|
||||
is expired, or session token lifetime has ended. Note that
|
||||
/login will deliver access tokens regardless of expiration.
|
||||
|
||||
Returns:
|
||||
Resolves to the requester
|
||||
Raises:
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
parent_span = active_span()
|
||||
with start_active_span("get_user_by_req"):
|
||||
requester = await self._wrapped_get_user_by_req(
|
||||
request, allow_guest, allow_expired
|
||||
)
|
||||
|
||||
if parent_span:
|
||||
if requester.authenticated_entity in self._force_tracing_for_users:
|
||||
# request tracing is enabled for this user, so we need to force it
|
||||
# tracing on for the parent span (which will be the servlet span).
|
||||
#
|
||||
# It's too late for the get_user_by_req span to inherit the setting,
|
||||
# so we also force it on for that.
|
||||
force_tracing()
|
||||
force_tracing(parent_span)
|
||||
parent_span.set_tag(
|
||||
"authenticated_entity", requester.authenticated_entity
|
||||
)
|
||||
parent_span.set_tag("user_id", requester.user.to_string())
|
||||
if requester.device_id is not None:
|
||||
parent_span.set_tag("device_id", requester.device_id)
|
||||
if requester.app_service is not None:
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
@cancellable
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool,
|
||||
allow_expired: bool,
|
||||
) -> Requester:
|
||||
"""Helper for get_user_by_req
|
||||
|
||||
Once get_user_by_req has set up the opentracing span, this does the actual work.
|
||||
"""
|
||||
try:
|
||||
ip_addr = request.getClientAddress().host
|
||||
user_agent = get_request_user_agent(request)
|
||||
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
# First check if it could be a request from an appservice
|
||||
requester = await self.get_appservice_user(request, access_token)
|
||||
if not requester:
|
||||
# If not, it should be from a regular user
|
||||
requester = await self.get_user_by_access_token(
|
||||
access_token, allow_expired=allow_expired
|
||||
)
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
# This check is only done for regular users, not appservice ones.
|
||||
if not allow_expired:
|
||||
if await self._account_validity_handler.is_user_expired(
|
||||
requester.user.to_string()
|
||||
):
|
||||
# Raise the error if either an account validity module has determined
|
||||
# the account has expired, or the legacy account validity
|
||||
# implementation is enabled and determined the account has expired
|
||||
raise AuthError(
|
||||
403,
|
||||
"User account has expired",
|
||||
errcode=Codes.EXPIRED_ACCOUNT,
|
||||
)
|
||||
|
||||
if ip_addr and (
|
||||
not requester.app_service or self._track_appservice_user_ips
|
||||
):
|
||||
# XXX(quenting): I'm 95% confident that we could skip setting the
|
||||
# device_id to "dummy-device" for appservices, and that the only impact
|
||||
# would be some rows which whould not deduplicate in the 'user_ips'
|
||||
# table during the transition
|
||||
recorded_device_id = (
|
||||
"dummy-device"
|
||||
if requester.device_id is None and requester.app_service is not None
|
||||
else requester.device_id
|
||||
)
|
||||
await self.store.insert_client_ip(
|
||||
user_id=requester.authenticated_entity,
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
user_agent=user_agent,
|
||||
device_id=recorded_device_id,
|
||||
)
|
||||
|
||||
# Track also the puppeted user client IP if enabled and the user is puppeting
|
||||
if (
|
||||
requester.user.to_string() != requester.authenticated_entity
|
||||
and self._track_puppeted_user_ips
|
||||
):
|
||||
await self.store.insert_client_ip(
|
||||
user_id=requester.user.to_string(),
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
user_agent=user_agent,
|
||||
device_id=requester.device_id,
|
||||
)
|
||||
|
||||
if requester.is_guest and not allow_guest:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Guest access not allowed",
|
||||
errcode=Codes.GUEST_ACCESS_FORBIDDEN,
|
||||
)
|
||||
|
||||
request.requester = requester
|
||||
return requester
|
||||
except KeyError:
|
||||
raise MissingClientTokenError()
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
token: str,
|
||||
allow_expired: bool = False,
|
||||
) -> Requester:
|
||||
"""Validate access token and get user_id from it
|
||||
|
||||
Args:
|
||||
token: The access token to get the user by
|
||||
allow_expired: If False, raises an InvalidClientTokenError
|
||||
if the token is expired
|
||||
|
||||
Raises:
|
||||
InvalidClientTokenError if a user by that token exists, but the token is
|
||||
expired
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid
|
||||
"""
|
||||
|
||||
# First look in the database to see if the access token is present
|
||||
# as an opaque token.
|
||||
user_info = await self.store.get_user_by_access_token(token)
|
||||
if user_info:
|
||||
valid_until_ms = user_info.valid_until_ms
|
||||
if (
|
||||
not allow_expired
|
||||
and valid_until_ms is not None
|
||||
and valid_until_ms < self.clock.time_msec()
|
||||
):
|
||||
# there was a valid access token, but it has expired.
|
||||
# soft-logout the user.
|
||||
raise InvalidClientTokenError(
|
||||
msg="Access token has expired", soft_logout=True
|
||||
)
|
||||
|
||||
# Mark the token as used. This is used to invalidate old refresh
|
||||
# tokens after some time.
|
||||
await self.store.mark_access_token_as_used(user_info.token_id)
|
||||
|
||||
requester = create_requester(
|
||||
user_id=user_info.user_id,
|
||||
access_token_id=user_info.token_id,
|
||||
is_guest=user_info.is_guest,
|
||||
shadow_banned=user_info.shadow_banned,
|
||||
device_id=user_info.device_id,
|
||||
authenticated_entity=user_info.token_owner,
|
||||
)
|
||||
|
||||
return requester
|
||||
|
||||
# If the token isn't found in the database, then it could still be a
|
||||
# macaroon for a guest, so we check that here.
|
||||
try:
|
||||
user_id = self._macaroon_generator.verify_guest_token(token)
|
||||
|
||||
# Guest access tokens are not stored in the database (there can
|
||||
# only be one access token per guest, anyway).
|
||||
#
|
||||
# In order to prevent guest access tokens being used as regular
|
||||
# user access tokens (and hence getting around the invalidation
|
||||
# process), we look up the user id and check that it is indeed
|
||||
# a guest user.
|
||||
#
|
||||
# It would of course be much easier to store guest access
|
||||
# tokens in the database as well, but that would break existing
|
||||
# guest tokens.
|
||||
stored_user = await self.store.get_user_by_id(user_id)
|
||||
if not stored_user:
|
||||
raise InvalidClientTokenError("Unknown user_id %s" % user_id)
|
||||
if not stored_user["is_guest"]:
|
||||
raise InvalidClientTokenError(
|
||||
"Guest access token used for regular user"
|
||||
)
|
||||
|
||||
return create_requester(
|
||||
user_id=user_id,
|
||||
is_guest=True,
|
||||
# all guests get the same device id
|
||||
device_id=GUEST_DEVICE_ID,
|
||||
authenticated_entity=user_id,
|
||||
)
|
||||
except (
|
||||
pymacaroons.exceptions.MacaroonException,
|
||||
TypeError,
|
||||
ValueError,
|
||||
) as e:
|
||||
logger.warning(
|
||||
"Invalid access token in auth: %s %s.",
|
||||
type(e),
|
||||
e,
|
||||
)
|
||||
raise InvalidClientTokenError("Invalid access token passed.")
|
||||
|
||||
async def is_server_admin(self, requester: Requester) -> bool:
|
||||
"""Check if the given user is a local server admin.
|
||||
|
||||
Args:
|
||||
requester: The user making the request, according to the access token.
|
||||
|
||||
Returns:
|
||||
True if the user is an admin
|
||||
"""
|
||||
return await self.store.is_server_admin(requester.user)
|
||||
@@ -1,352 +0,0 @@
|
||||
# Copyright 2023 The Matrix.org Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from authlib.oauth2 import ClientAuth
|
||||
from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret_post
|
||||
from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign
|
||||
from authlib.oauth2.rfc7662 import IntrospectionToken
|
||||
from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url
|
||||
|
||||
from twisted.web.client import readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
from synapse.api.auth.base import BaseAuth
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
HttpResponseException,
|
||||
InvalidClientTokenError,
|
||||
OAuthInsufficientScopeError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.types import Requester, UserID, create_requester
|
||||
from synapse.util import json_decoder
|
||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Scope as defined by MSC2967
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
|
||||
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
SCOPE_MATRIX_GUEST = "urn:matrix:org.matrix.msc2967.client:api:guest"
|
||||
SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
|
||||
|
||||
# Scope which allows access to the Synapse admin API
|
||||
SCOPE_SYNAPSE_ADMIN = "urn:synapse:admin:*"
|
||||
|
||||
|
||||
def scope_to_list(scope: str) -> List[str]:
|
||||
"""Convert a scope string to a list of scope tokens"""
|
||||
return scope.strip().split(" ")
|
||||
|
||||
|
||||
class PrivateKeyJWTWithKid(PrivateKeyJWT): # type: ignore[misc]
|
||||
"""An implementation of the private_key_jwt client auth method that includes a kid header.
|
||||
|
||||
This is needed because some providers (Keycloak) require the kid header to figure
|
||||
out which key to use to verify the signature.
|
||||
"""
|
||||
|
||||
def sign(self, auth: Any, token_endpoint: str) -> bytes:
|
||||
return private_key_jwt_sign(
|
||||
auth.client_secret,
|
||||
client_id=auth.client_id,
|
||||
token_endpoint=token_endpoint,
|
||||
claims=self.claims,
|
||||
header={"kid": auth.client_secret["kid"]},
|
||||
)
|
||||
|
||||
|
||||
class MSC3861DelegatedAuth(BaseAuth):
|
||||
AUTH_METHODS = {
|
||||
"client_secret_post": encode_client_secret_post,
|
||||
"client_secret_basic": encode_client_secret_basic,
|
||||
"client_secret_jwt": ClientSecretJWT(),
|
||||
"private_key_jwt": PrivateKeyJWTWithKid(),
|
||||
}
|
||||
|
||||
EXTERNAL_ID_PROVIDER = "oauth-delegated"
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self._config = hs.config.experimental.msc3861
|
||||
auth_method = MSC3861DelegatedAuth.AUTH_METHODS.get(
|
||||
self._config.client_auth_method.value, None
|
||||
)
|
||||
# Those assertions are already checked when parsing the config
|
||||
assert self._config.enabled, "OAuth delegation is not enabled"
|
||||
assert self._config.issuer, "No issuer provided"
|
||||
assert self._config.client_id, "No client_id provided"
|
||||
assert auth_method is not None, "Invalid client_auth_method provided"
|
||||
|
||||
self._http_client = hs.get_proxied_http_client()
|
||||
self._hostname = hs.hostname
|
||||
self._admin_token = self._config.admin_token
|
||||
|
||||
self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata)
|
||||
|
||||
if isinstance(auth_method, PrivateKeyJWTWithKid):
|
||||
# Use the JWK as the client secret when using the private_key_jwt method
|
||||
assert self._config.jwk, "No JWK provided"
|
||||
self._client_auth = ClientAuth(
|
||||
self._config.client_id, self._config.jwk, auth_method
|
||||
)
|
||||
else:
|
||||
# Else use the client secret
|
||||
assert self._config.client_secret, "No client_secret provided"
|
||||
self._client_auth = ClientAuth(
|
||||
self._config.client_id, self._config.client_secret, auth_method
|
||||
)
|
||||
|
||||
async def _load_metadata(self) -> OpenIDProviderMetadata:
|
||||
if self._config.issuer_metadata is not None:
|
||||
return OpenIDProviderMetadata(**self._config.issuer_metadata)
|
||||
url = get_well_known_url(self._config.issuer, external=True)
|
||||
response = await self._http_client.get_json(url)
|
||||
metadata = OpenIDProviderMetadata(**response)
|
||||
# metadata.validate_introspection_endpoint()
|
||||
return metadata
|
||||
|
||||
async def _introspect_token(self, token: str) -> IntrospectionToken:
|
||||
"""
|
||||
Send a token to the introspection endpoint and returns the introspection response
|
||||
|
||||
Parameters:
|
||||
token: The token to introspect
|
||||
|
||||
Raises:
|
||||
HttpResponseException: If the introspection endpoint returns a non-2xx response
|
||||
ValueError: If the introspection endpoint returns an invalid JSON response
|
||||
JSONDecodeError: If the introspection endpoint returns a non-JSON response
|
||||
Exception: If the HTTP request fails
|
||||
|
||||
Returns:
|
||||
The introspection response
|
||||
"""
|
||||
metadata = await self._issuer_metadata.get()
|
||||
introspection_endpoint = metadata.get("introspection_endpoint")
|
||||
raw_headers: Dict[str, str] = {
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
"User-Agent": str(self._http_client.user_agent, "utf-8"),
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
args = {"token": token, "token_type_hint": "access_token"}
|
||||
body = urlencode(args, True)
|
||||
|
||||
# Fill the body/headers with credentials
|
||||
uri, raw_headers, body = self._client_auth.prepare(
|
||||
method="POST", uri=introspection_endpoint, headers=raw_headers, body=body
|
||||
)
|
||||
headers = Headers({k: [v] for (k, v) in raw_headers.items()})
|
||||
|
||||
# Do the actual request
|
||||
# We're not using the SimpleHttpClient util methods as we don't want to
|
||||
# check the HTTP status code, and we do the body encoding ourselves.
|
||||
response = await self._http_client.request(
|
||||
method="POST",
|
||||
uri=uri,
|
||||
data=body.encode("utf-8"),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
resp_body = await make_deferred_yieldable(readBody(response))
|
||||
|
||||
if response.code < 200 or response.code >= 300:
|
||||
raise HttpResponseException(
|
||||
response.code,
|
||||
response.phrase.decode("ascii", errors="replace"),
|
||||
resp_body,
|
||||
)
|
||||
|
||||
resp = json_decoder.decode(resp_body.decode("utf-8"))
|
||||
|
||||
if not isinstance(resp, dict):
|
||||
raise ValueError(
|
||||
"The introspection endpoint returned an invalid JSON response."
|
||||
)
|
||||
|
||||
return IntrospectionToken(**resp)
|
||||
|
||||
async def is_server_admin(self, requester: Requester) -> bool:
|
||||
return "urn:synapse:admin:*" in requester.scope
|
||||
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
) -> Requester:
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
requester = await self.get_appservice_user(request, access_token)
|
||||
if not requester:
|
||||
# TODO: we probably want to assert the allow_guest inside this call
|
||||
# so that we don't provision the user if they don't have enough permission:
|
||||
requester = await self.get_user_by_access_token(access_token, allow_expired)
|
||||
|
||||
if not allow_guest and requester.is_guest:
|
||||
raise OAuthInsufficientScopeError([SCOPE_MATRIX_API])
|
||||
|
||||
request.requester = requester
|
||||
|
||||
return requester
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
token: str,
|
||||
allow_expired: bool = False,
|
||||
) -> Requester:
|
||||
if self._admin_token is not None and token == self._admin_token:
|
||||
# XXX: This is a temporary solution so that the admin API can be called by
|
||||
# the OIDC provider. This will be removed once we have OIDC client
|
||||
# credentials grant support in matrix-authentication-service.
|
||||
logging.info("Admin toked used")
|
||||
# XXX: that user doesn't exist and won't be provisioned.
|
||||
# This is mostly fine for admin calls, but we should also think about doing
|
||||
# requesters without a user_id.
|
||||
admin_user = UserID("__oidc_admin", self._hostname)
|
||||
return create_requester(
|
||||
user_id=admin_user,
|
||||
scope=["urn:synapse:admin:*"],
|
||||
)
|
||||
|
||||
try:
|
||||
introspection_result = await self._introspect_token(token)
|
||||
except Exception:
|
||||
logger.exception("Failed to introspect token")
|
||||
raise SynapseError(503, "Unable to introspect the access token")
|
||||
|
||||
logger.info(f"Introspection result: {introspection_result!r}")
|
||||
|
||||
# TODO: introspection verification should be more extensive, especially:
|
||||
# - verify the audience
|
||||
if not introspection_result.get("active"):
|
||||
raise InvalidClientTokenError("Token is not active")
|
||||
|
||||
# Let's look at the scope
|
||||
scope: List[str] = scope_to_list(introspection_result.get("scope", ""))
|
||||
|
||||
# Determine type of user based on presence of particular scopes
|
||||
has_user_scope = SCOPE_MATRIX_API in scope
|
||||
has_guest_scope = SCOPE_MATRIX_GUEST in scope
|
||||
|
||||
if not has_user_scope and not has_guest_scope:
|
||||
raise InvalidClientTokenError("No scope in token granting user rights")
|
||||
|
||||
# Match via the sub claim
|
||||
sub: Optional[str] = introspection_result.get("sub")
|
||||
if sub is None:
|
||||
raise InvalidClientTokenError(
|
||||
"Invalid sub claim in the introspection result"
|
||||
)
|
||||
|
||||
user_id_str = await self.store.get_user_by_external_id(
|
||||
MSC3861DelegatedAuth.EXTERNAL_ID_PROVIDER, sub
|
||||
)
|
||||
if user_id_str is None:
|
||||
# If we could not find a user via the external_id, it either does not exist,
|
||||
# or the external_id was never recorded
|
||||
|
||||
# TODO: claim mapping should be configurable
|
||||
username: Optional[str] = introspection_result.get("username")
|
||||
if username is None or not isinstance(username, str):
|
||||
raise AuthError(
|
||||
500,
|
||||
"Invalid username claim in the introspection result",
|
||||
)
|
||||
user_id = UserID(username, self._hostname)
|
||||
|
||||
# First try to find a user from the username claim
|
||||
user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string())
|
||||
if user_info is None:
|
||||
# If the user does not exist, we should create it on the fly
|
||||
# TODO: we could use SCIM to provision users ahead of time and listen
|
||||
# for SCIM SET events if those ever become standard:
|
||||
# https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00
|
||||
|
||||
# TODO: claim mapping should be configurable
|
||||
# If present, use the name claim as the displayname
|
||||
name: Optional[str] = introspection_result.get("name")
|
||||
|
||||
await self.store.register_user(
|
||||
user_id=user_id.to_string(), create_profile_with_displayname=name
|
||||
)
|
||||
|
||||
# And record the sub as external_id
|
||||
await self.store.record_user_external_id(
|
||||
MSC3861DelegatedAuth.EXTERNAL_ID_PROVIDER, sub, user_id.to_string()
|
||||
)
|
||||
else:
|
||||
user_id = UserID.from_string(user_id_str)
|
||||
|
||||
# Find device_ids in scope
|
||||
# We only allow a single device_id in the scope, so we find them all in the
|
||||
# scope list, and raise if there are more than one. The OIDC server should be
|
||||
# the one enforcing valid scopes, so we raise a 500 if we find an invalid scope.
|
||||
device_ids = [
|
||||
tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :]
|
||||
for tok in scope
|
||||
if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX)
|
||||
]
|
||||
|
||||
if len(device_ids) > 1:
|
||||
raise AuthError(
|
||||
500,
|
||||
"Multiple device IDs in scope",
|
||||
)
|
||||
|
||||
device_id = device_ids[0] if device_ids else None
|
||||
if device_id is not None:
|
||||
# Sanity check the device_id
|
||||
if len(device_id) > 255 or len(device_id) < 1:
|
||||
raise AuthError(
|
||||
500,
|
||||
"Invalid device ID in scope",
|
||||
)
|
||||
|
||||
# Create the device on the fly if it does not exist
|
||||
try:
|
||||
await self.store.get_device(
|
||||
user_id=user_id.to_string(), device_id=device_id
|
||||
)
|
||||
except StoreError:
|
||||
await self.store.store_device(
|
||||
user_id=user_id.to_string(),
|
||||
device_id=device_id,
|
||||
initial_device_display_name="OIDC-native client",
|
||||
)
|
||||
|
||||
# TODO: there is a few things missing in the requester here, which still need
|
||||
# to be figured out, like:
|
||||
# - impersonation, with the `authenticated_entity`, which is used for
|
||||
# rate-limiting, MAU limits, etc.
|
||||
# - shadow-banning, with the `shadow_banned` flag
|
||||
# - a proper solution for appservices, which still needs to be figured out in
|
||||
# the context of MSC3861
|
||||
return create_requester(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
scope=scope,
|
||||
is_guest=(has_guest_scope and not has_user_scope),
|
||||
)
|
||||
@@ -39,7 +39,7 @@ class AuthBlocking:
|
||||
self._mau_limits_reserved_threepids = (
|
||||
hs.config.server.mau_limits_reserved_threepids
|
||||
)
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
self._server_name = hs.hostname
|
||||
self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
|
||||
|
||||
async def check_auth_blocking(
|
||||
@@ -77,7 +77,7 @@ class AuthBlocking:
|
||||
if requester:
|
||||
if requester.authenticated_entity.startswith("@"):
|
||||
user_id = requester.authenticated_entity
|
||||
elif self._is_mine_server_name(requester.authenticated_entity):
|
||||
elif requester.authenticated_entity == self._server_name:
|
||||
# We never block the server from doing actions on behalf of
|
||||
# users.
|
||||
return
|
||||
|
||||
@@ -123,6 +123,10 @@ class EventTypes:
|
||||
SpaceChild: Final = "m.space.child"
|
||||
SpaceParent: Final = "m.space.parent"
|
||||
|
||||
MSC2716_INSERTION: Final = "org.matrix.msc2716.insertion"
|
||||
MSC2716_BATCH: Final = "org.matrix.msc2716.batch"
|
||||
MSC2716_MARKER: Final = "org.matrix.msc2716.marker"
|
||||
|
||||
Reaction: Final = "m.reaction"
|
||||
|
||||
|
||||
@@ -218,11 +222,21 @@ class EventContentFields:
|
||||
# Used in m.room.guest_access events.
|
||||
GUEST_ACCESS: Final = "guest_access"
|
||||
|
||||
# Used on normal messages to indicate they were historically imported after the fact
|
||||
MSC2716_HISTORICAL: Final = "org.matrix.msc2716.historical"
|
||||
# For "insertion" events to indicate what the next batch ID should be in
|
||||
# order to connect to it
|
||||
MSC2716_NEXT_BATCH_ID: Final = "next_batch_id"
|
||||
# Used on "batch" events to indicate which insertion event it connects to
|
||||
MSC2716_BATCH_ID: Final = "batch_id"
|
||||
# For "marker" events
|
||||
MSC2716_INSERTION_EVENT_REFERENCE: Final = "insertion_event_reference"
|
||||
|
||||
# The authorising user for joining a restricted room.
|
||||
AUTHORISING_USER: Final = "join_authorised_via_users_server"
|
||||
|
||||
# Use for mentioning users.
|
||||
MENTIONS: Final = "m.mentions"
|
||||
MSC3952_MENTIONS: Final = "org.matrix.msc3952.mentions"
|
||||
|
||||
# an unspecced field added to to-device messages to identify them uniquely-ish
|
||||
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
|
||||
@@ -243,7 +257,6 @@ class AccountDataTypes:
|
||||
DIRECT: Final = "m.direct"
|
||||
IGNORED_USER_LIST: Final = "m.ignored_user_list"
|
||||
TAG: Final = "m.tag"
|
||||
PUSH_RULES: Final = "m.push_rules"
|
||||
|
||||
|
||||
class HistoryVisibility:
|
||||
|
||||
@@ -108,10 +108,10 @@ class Codes(str, Enum):
|
||||
|
||||
USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
|
||||
|
||||
AS_PING_URL_NOT_SET = "M_URL_NOT_SET"
|
||||
AS_PING_BAD_STATUS = "M_BAD_STATUS"
|
||||
AS_PING_CONNECTION_TIMEOUT = "M_CONNECTION_TIMEOUT"
|
||||
AS_PING_CONNECTION_FAILED = "M_CONNECTION_FAILED"
|
||||
AS_PING_URL_NOT_SET = "FI.MAU.MSC2659_URL_NOT_SET"
|
||||
AS_PING_BAD_STATUS = "FI.MAU.MSC2659_BAD_STATUS"
|
||||
AS_PING_CONNECTION_TIMEOUT = "FI.MAU.MSC2659_CONNECTION_TIMEOUT"
|
||||
AS_PING_CONNECTION_FAILED = "FI.MAU.MSC2659_CONNECTION_FAILED"
|
||||
|
||||
# Attempt to send a second annotation with the same event type & annotation key
|
||||
# MSC2677
|
||||
@@ -119,20 +119,14 @@ class Codes(str, Enum):
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
"""An exception with integer code, a message string attributes and optional headers.
|
||||
"""An exception with integer code and message string attributes.
|
||||
|
||||
Attributes:
|
||||
code: HTTP error code
|
||||
msg: string describing the error
|
||||
headers: optional response headers to send
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
code: Union[int, HTTPStatus],
|
||||
msg: str,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
def __init__(self, code: Union[int, HTTPStatus], msg: str):
|
||||
super().__init__("%d: %s" % (code, msg))
|
||||
|
||||
# Some calls to this method pass instances of http.HTTPStatus for `code`.
|
||||
@@ -143,7 +137,6 @@ class CodeMessageException(RuntimeError):
|
||||
# To eliminate this behaviour, we convert them to their integer equivalents here.
|
||||
self.code = int(code)
|
||||
self.msg = msg
|
||||
self.headers = headers
|
||||
|
||||
|
||||
class RedirectException(CodeMessageException):
|
||||
@@ -189,7 +182,6 @@ class SynapseError(CodeMessageException):
|
||||
msg: str,
|
||||
errcode: str = Codes.UNKNOWN,
|
||||
additional_fields: Optional[Dict] = None,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
"""Constructs a synapse error.
|
||||
|
||||
@@ -198,7 +190,7 @@ class SynapseError(CodeMessageException):
|
||||
msg: The human-readable error message.
|
||||
errcode: The matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
super().__init__(code, msg, headers)
|
||||
super().__init__(code, msg)
|
||||
self.errcode = errcode
|
||||
if additional_fields is None:
|
||||
self._additional_fields: Dict = {}
|
||||
@@ -217,13 +209,6 @@ class InvalidAPICallError(SynapseError):
|
||||
super().__init__(HTTPStatus.BAD_REQUEST, msg, Codes.BAD_JSON)
|
||||
|
||||
|
||||
class InvalidProxyCredentialsError(SynapseError):
|
||||
"""Error raised when the proxy credentials are invalid."""
|
||||
|
||||
def __init__(self, msg: str, errcode: str = Codes.UNKNOWN):
|
||||
super().__init__(401, msg, errcode)
|
||||
|
||||
|
||||
class ProxiedRequestError(SynapseError):
|
||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||
|
||||
@@ -350,20 +335,6 @@ class AuthError(SynapseError):
|
||||
super().__init__(code, msg, errcode, additional_fields)
|
||||
|
||||
|
||||
class OAuthInsufficientScopeError(SynapseError):
|
||||
"""An error raised when the caller does not have sufficient scope to perform the requested action"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
required_scopes: List[str],
|
||||
):
|
||||
headers = {
|
||||
"WWW-Authenticate": 'Bearer error="insufficient_scope", scope="%s"'
|
||||
% (" ".join(required_scopes))
|
||||
}
|
||||
super().__init__(401, "Insufficient scope", Codes.FORBIDDEN, None, headers)
|
||||
|
||||
|
||||
class UnstableSpecAuthError(AuthError):
|
||||
"""An error raised when a new error code is being proposed to replace a previous one.
|
||||
This error will return a "org.matrix.unstable.errcode" property with the new error code,
|
||||
|
||||
@@ -128,7 +128,20 @@ USER_FILTER_SCHEMA = {
|
||||
"account_data": {"$ref": "#/definitions/filter"},
|
||||
"room": {"$ref": "#/definitions/room_filter"},
|
||||
"event_format": {"type": "string", "enum": ["client", "federation"]},
|
||||
"event_fields": {"type": "array", "items": {"type": "string"}},
|
||||
"event_fields": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
# Don't allow '\\' in event field filters. This makes matching
|
||||
# events a lot easier as we can then use a negative lookbehind
|
||||
# assertion to split '\.' If we allowed \\ then it would
|
||||
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
||||
#
|
||||
# Note that because this is a regular expression, we have to escape
|
||||
# each backslash in the pattern.
|
||||
"pattern": r"^((?!\\\\).)*$",
|
||||
},
|
||||
},
|
||||
},
|
||||
"additionalProperties": True, # Allow new fields for forward compatibility
|
||||
}
|
||||
@@ -152,14 +165,16 @@ class Filtering:
|
||||
self.DEFAULT_FILTER_COLLECTION = FilterCollection(hs, {})
|
||||
|
||||
async def get_user_filter(
|
||||
self, user_id: UserID, filter_id: Union[int, str]
|
||||
self, user_localpart: str, filter_id: Union[int, str]
|
||||
) -> "FilterCollection":
|
||||
result = await self.store.get_user_filter(user_id, filter_id)
|
||||
result = await self.store.get_user_filter(user_localpart, filter_id)
|
||||
return FilterCollection(self._hs, result)
|
||||
|
||||
def add_user_filter(self, user_id: UserID, user_filter: JsonDict) -> Awaitable[int]:
|
||||
def add_user_filter(
|
||||
self, user_localpart: str, user_filter: JsonDict
|
||||
) -> Awaitable[int]:
|
||||
self.check_valid_filter(user_filter)
|
||||
return self.store.add_user_filter(user_id, user_filter)
|
||||
return self.store.add_user_filter(user_localpart, user_filter)
|
||||
|
||||
# TODO(paul): surely we should probably add a delete_user_filter or
|
||||
# replace_user_filter at some point? There's no REST API specified for
|
||||
|
||||
@@ -78,29 +78,37 @@ class RoomVersion:
|
||||
# MSC2209: Check 'notifications' key while verifying
|
||||
# m.room.power_levels auth rules.
|
||||
limit_notifications_power_levels: bool
|
||||
# No longer include the creator in m.room.create events.
|
||||
implicit_room_creator: bool
|
||||
# Apply updated redaction rules algorithm from room version 11.
|
||||
updated_redaction_rules: bool
|
||||
# Support the 'restricted' join rule.
|
||||
restricted_join_rule: bool
|
||||
# Support for the proper redaction rules for the restricted join rule. This requires
|
||||
# restricted_join_rule to be enabled.
|
||||
restricted_join_rule_fix: bool
|
||||
# Support the 'knock' join rule.
|
||||
knock_join_rule: bool
|
||||
# MSC3389: Protect relation information from redaction.
|
||||
msc3389_relation_redactions: bool
|
||||
# Support the 'knock_restricted' join rule.
|
||||
knock_restricted_join_rule: bool
|
||||
# Enforce integer power levels
|
||||
enforce_int_power_levels: bool
|
||||
# MSC2175: No longer include the creator in m.room.create events.
|
||||
msc2175_implicit_room_creator: bool
|
||||
# MSC2174/MSC2176: Apply updated redaction rules algorithm, move redacts to
|
||||
# content property.
|
||||
msc2176_redaction_rules: bool
|
||||
# MSC3083: Support the 'restricted' join_rule.
|
||||
msc3083_join_rules: bool
|
||||
# MSC3375: Support for the proper redaction rules for MSC3083. This mustn't
|
||||
# be enabled if MSC3083 is not.
|
||||
msc3375_redaction_rules: bool
|
||||
# MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending
|
||||
# m.room.membership event with membership 'knock'.
|
||||
msc2403_knocking: bool
|
||||
# MSC2716: Adds m.room.power_levels -> content.historical field to control
|
||||
# whether "insertion", "chunk", "marker" events can be sent
|
||||
msc2716_historical: bool
|
||||
# MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
|
||||
msc2716_redactions: bool
|
||||
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
|
||||
# knocks and restricted join rules into the same join condition.
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
# MSC3667: Enforce integer power levels
|
||||
msc3667_int_only_power_levels: bool
|
||||
# MSC3931: Adds a push rule condition for "room version feature flags", making
|
||||
# some push rules room version dependent. Note that adding a flag to this list
|
||||
# is not enough to mark it "supported": the push rule evaluator also needs to
|
||||
# support the flag. Unknown flags are ignored by the evaluator, making conditions
|
||||
# fail if used.
|
||||
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
|
||||
# MSC3989: Redact the origin field.
|
||||
msc3989_redaction_rules: bool
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -113,15 +121,17 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -132,15 +142,17 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -151,15 +163,17 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -170,15 +184,17 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -189,15 +205,17 @@ class RoomVersions:
|
||||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -208,15 +226,38 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=True,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -227,15 +268,17 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -246,15 +289,17 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -265,15 +310,38 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
@@ -284,15 +352,38 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC2716v4 = RoomVersion(
|
||||
"org.matrix.msc2716v4",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=True,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC1767v10 = RoomVersion(
|
||||
# MSC1767 (Extensible Events) based on room version "10"
|
||||
@@ -304,34 +395,38 @@ class RoomVersions:
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V11 = RoomVersion(
|
||||
"11",
|
||||
RoomDisposition.STABLE,
|
||||
MSC3989 = RoomVersion(
|
||||
"org.matrix.msc3989",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
implicit_room_creator=True, # Used by MSC3820
|
||||
updated_redaction_rules=True, # Used by MSC3820
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -344,11 +439,14 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.V4,
|
||||
RoomVersions.V5,
|
||||
RoomVersions.V6,
|
||||
RoomVersions.MSC2176,
|
||||
RoomVersions.V7,
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC3787,
|
||||
RoomVersions.V10,
|
||||
RoomVersions.V11,
|
||||
RoomVersions.MSC2716v4,
|
||||
RoomVersions.MSC3989,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -377,12 +475,12 @@ MSC3244_CAPABILITIES = {
|
||||
RoomVersionCapability(
|
||||
"knock",
|
||||
RoomVersions.V7,
|
||||
lambda room_version: room_version.knock_join_rule,
|
||||
lambda room_version: room_version.msc2403_knocking,
|
||||
),
|
||||
RoomVersionCapability(
|
||||
"restricted",
|
||||
RoomVersions.V9,
|
||||
lambda room_version: room_version.restricted_join_rule,
|
||||
lambda room_version: room_version.msc3083_join_rules,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import socket
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from textwrap import indent
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -65,6 +64,7 @@ from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.events.presence_router import load_legacy_presence_router
|
||||
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
||||
from synapse.handlers.auth import load_legacy_password_auth_providers
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
@@ -73,9 +73,6 @@ from synapse.metrics import install_gc_manager, register_threadpool
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||
from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers
|
||||
from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
|
||||
load_legacy_third_party_event_rules,
|
||||
)
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
|
||||
@@ -213,12 +210,8 @@ def handle_startup_exception(e: Exception) -> NoReturn:
|
||||
# Exceptions that occur between setting up the logging and forking or starting
|
||||
# the reactor are written to the logs, followed by a summary to stderr.
|
||||
logger.exception("Exception during startup")
|
||||
|
||||
error_string = "".join(traceback.format_exception(type(e), e, e.__traceback__))
|
||||
indented_error_string = indent(error_string, " ")
|
||||
|
||||
quit_with_error(
|
||||
f"Error during initialisation:\n{indented_error_string}\nThere may be more information in the logs."
|
||||
f"Error during initialisation:\n {e}\nThere may be more information in the logs."
|
||||
)
|
||||
|
||||
|
||||
@@ -386,7 +379,6 @@ def listen_unix(
|
||||
|
||||
|
||||
def listen_http(
|
||||
hs: "HomeServer",
|
||||
listener_config: ListenerConfig,
|
||||
root_resource: Resource,
|
||||
version_string: str,
|
||||
@@ -407,7 +399,6 @@ def listen_http(
|
||||
version_string,
|
||||
max_request_body_size=max_request_body_size,
|
||||
reactor=reactor,
|
||||
hs=hs,
|
||||
)
|
||||
|
||||
if isinstance(listener_config, TCPListenerConfig):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user