1
0

Compare commits

..

1 Commits

Author SHA1 Message Date
Action Bot
22a9305c53 Version picker added for v1.85 docs 2023-12-11 14:51:44 +00:00
495 changed files with 12939 additions and 26676 deletions

View File

@@ -29,12 +29,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For PRs, we only run each type of test with the oldest Python version supported (which
# is Python 3.8 right now)
# For each type of test we only run on Py3.7 on PRs
trial_sqlite_tests = [
{
"python-version": "3.8",
"python-version": "3.7",
"database": "sqlite",
"extras": "all",
}
@@ -47,12 +46,13 @@ if not IS_PR:
"database": "sqlite",
"extras": "all",
}
for version in ("3.9", "3.10", "3.11", "3.12.0-rc.2")
for version in ("3.8", "3.9", "3.10", "3.11")
)
trial_postgres_tests = [
{
"python-version": "3.8",
"python-version": "3.7",
"database": "postgres",
"postgres-version": "11",
"extras": "all",
@@ -64,14 +64,14 @@ if not IS_PR:
{
"python-version": "3.11",
"database": "postgres",
"postgres-version": "16",
"postgres-version": "15",
"extras": "all",
}
)
trial_no_extra_tests = [
{
"python-version": "3.8",
"python-version": "3.7",
"database": "sqlite",
"extras": "",
}
@@ -133,6 +133,11 @@ if not IS_PR:
"sytest-tag": "testing",
"postgres": "postgres",
},
{
"sytest-tag": "buster",
"postgres": "multi-postgres",
"workers": "workers",
},
]
)

View File

@@ -18,35 +18,25 @@ jobs:
steps:
- name: Set up QEMU
id: qemu
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- name: Inspect builder
run: docker buildx inspect
- name: Checkout repository
uses: actions/checkout@v4
- name: Extract version from pyproject.toml
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell
shell: bash
run: |
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
- name: Log in to DockerHub
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Log in to GHCR
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@@ -68,12 +58,10 @@ jobs:
type=pep440,pattern={{raw}}
- name: Build and push all platforms
uses: docker/build-push-action@v5
uses: docker/build-push-action@v4
with:
push: true
labels: |
gitsha1=${{ github.sha }}
org.opencontainers.image.version=${{ env.SYNAPSE_VERSION }}
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64

View File

@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@268677152d06ba59fcec7a7f0b5d961b6ccd7e1e # v2.28.0
uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 # v2.27.0
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}

View File

@@ -12,7 +12,7 @@ jobs:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
@@ -39,7 +39,7 @@ jobs:
name: Check links in documentation
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0

View File

@@ -50,7 +50,7 @@ jobs:
needs:
- pre
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
@@ -80,7 +80,7 @@ jobs:
needs:
- pre
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: "Set up Sphinx"
uses: matrix-org/setup-python-poetry@v1

View File

@@ -22,24 +22,10 @@ concurrency:
cancel-in-progress: true
jobs:
check_repo:
# Prevent this workflow from running on any fork of Synapse other than matrix-org/synapse, as it is
# only useful to the Synapse core team.
# All other workflow steps depend on this one, thus if 'should_run_workflow' is not 'true', the rest
# of the workflow will be skipped as well.
runs-on: ubuntu-latest
outputs:
should_run_workflow: ${{ steps.check_condition.outputs.should_run_workflow }}
steps:
- id: check_condition
run: echo "should_run_workflow=${{ github.repository == 'matrix-org/synapse' }}" >> "$GITHUB_OUTPUT"
mypy:
needs: check_repo
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
@@ -57,12 +43,10 @@ jobs:
# `pip install matrix-synapse[all]` as closely as possible.
- run: poetry update --no-dev
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
- name: Remove unhelpful options from mypy config
run: sed -e '/warn_unused_ignores = True/d' -e '/warn_redundant_casts = True/d' -i mypy.ini
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
needs: check_repo
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
strategy:
matrix:
@@ -72,7 +56,7 @@ jobs:
postgres-version: "14"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
@@ -121,8 +105,6 @@ jobs:
sytest:
needs: check_repo
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:testing
@@ -145,7 +127,7 @@ jobs:
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
@@ -174,8 +156,7 @@ jobs:
complement:
needs: check_repo
if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'"
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
strategy:
@@ -192,8 +173,8 @@ jobs:
database: Postgres
steps:
- name: Run actions/checkout@v4 for synapse
uses: actions/checkout@v4
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
@@ -211,7 +192,7 @@ jobs:
# Open an issue if the build fails, so we know about it.
# Only do this if we're not experimenting with this action in a PR.
open-issue:
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request' && needs.check_repo.outputs.should_run_workflow == 'true'"
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
needs:
# TODO: should mypy be included here? It feels more brittle than the others.
- mypy
@@ -222,7 +203,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -16,7 +16,7 @@ jobs:
name: "Check locked dependencies have sdists"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.x'

View File

@@ -33,29 +33,29 @@ jobs:
packages: write
steps:
- name: Checkout specific branch (debug build)
uses: actions/checkout@v4
uses: actions/checkout@v3
if: github.event_name == 'workflow_dispatch'
with:
ref: ${{ inputs.branch }}
- name: Checkout clean copy of develop (scheduled build)
uses: actions/checkout@v4
uses: actions/checkout@v3
if: github.event_name == 'schedule'
with:
ref: develop
- name: Checkout clean copy of master (on-push)
uses: actions/checkout@v4
uses: actions/checkout@v3
if: github.event_name == 'push'
with:
ref: master
- name: Login to registry
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Work out labels for complement image
id: meta
uses: docker/metadata-action@v5
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}/complement-synapse
tags: |

View File

@@ -27,14 +27,13 @@ jobs:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.x'
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
# NOTE: inside the actual Dockerfile-dhvirtualenv, the image name is expanded into its full image path
dists='["debian:sid"]'
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
@@ -55,13 +54,13 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
path: src
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
with:
install: true
@@ -121,7 +120,7 @@ jobs:
arch: aarch64
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
@@ -134,7 +133,7 @@ jobs:
- name: Set up QEMU to emulate aarch64
if: matrix.arch == 'aarch64'
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
@@ -144,7 +143,7 @@ jobs:
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
@@ -167,7 +166,7 @@ jobs:
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.10'

View File

@@ -12,19 +12,12 @@ concurrency:
cancel-in-progress: true
jobs:
check-signoff:
if: "github.event_name == 'pull_request'"
uses: "matrix-org/backend-meta/.github/workflows/sign-off.yml@v2"
# Job to detect what has changed so we don't run e.g. Rust checks on PRs that
# don't modify Rust code.
changes:
runs-on: ubuntu-latest
outputs:
rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }}
trial: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.trial }}
integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }}
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
steps:
- uses: dorny/paths-filter@v2
id: filter
@@ -36,49 +29,13 @@ jobs:
- 'rust/**'
- 'Cargo.toml'
- 'Cargo.lock'
- '.rustfmt.toml'
trial:
- 'synapse/**'
- 'tests/**'
- 'rust/**'
- 'Cargo.toml'
- 'Cargo.lock'
- 'pyproject.toml'
- 'poetry.lock'
integration:
- 'synapse/**'
- 'rust/**'
- 'docker/**'
- 'Cargo.toml'
- 'Cargo.lock'
- 'pyproject.toml'
- 'poetry.lock'
- 'docker/**'
linting:
- 'synapse/**'
- 'docker/**'
- 'tests/**'
- 'scripts-dev/**'
- 'contrib/**'
- 'synmark/**'
- 'stubs/**'
- '.ci/**'
- 'mypy.ini'
- 'pyproject.toml'
- 'poetry.lock'
check-sampleconfig:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.linting == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.61.0
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
@@ -90,11 +47,8 @@ jobs:
check-schema-delta:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.linting == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
@@ -104,7 +58,7 @@ jobs:
check-lockfile:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
@@ -112,12 +66,9 @@ jobs:
lint:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.linting == 'true' }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Setup Poetry
uses: matrix-org/setup-python-poetry@v1
@@ -137,16 +88,9 @@ jobs:
lint-mypy:
runs-on: ubuntu-latest
name: Typechecking
needs: changes
if: ${{ needs.changes.outputs.linting == 'true' }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@1.61.0
- uses: Swatinem/rust-cache@v2
uses: actions/checkout@v3
- name: Setup Poetry
uses: matrix-org/setup-python-poetry@v1
@@ -159,6 +103,10 @@ jobs:
# To make CI green, err towards caution and install the project.
install-project: "true"
- name: Install Rust
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
# Cribbed from
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
- name: Restore/persist mypy's cache
@@ -175,7 +123,7 @@ jobs:
lint-crlf:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
@@ -183,7 +131,7 @@ jobs:
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
@@ -197,15 +145,12 @@ jobs:
lint-pydantic:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.linting == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Rust
uses: dtolnay/rust-toolchain@1.61.0
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
@@ -219,10 +164,10 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.61.0
uses: dtolnay/rust-toolchain@1.58.1
with:
components: clippy
- uses: Swatinem/rust-cache@v2
@@ -237,7 +182,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@master
@@ -254,7 +199,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@master
@@ -280,7 +225,6 @@ jobs:
- check-lockfile
- lint-clippy
- lint-rustfmt
- check-signoff
runs-on: ubuntu-latest
steps:
- run: "true"
@@ -290,7 +234,7 @@ jobs:
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.x"
@@ -301,17 +245,15 @@ jobs:
sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }}
trial:
if: ${{ !cancelled() && !failure() && needs.changes.outputs.trial == 'true' }} # Allow previous steps to be skipped, but not fail
needs:
- calculate-test-jobs
- changes
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: calculate-test-jobs
runs-on: ubuntu-latest
strategy:
matrix:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
@@ -326,7 +268,7 @@ jobs:
postgres:${{ matrix.job.postgres-version }}
- name: Install Rust
uses: dtolnay/rust-toolchain@1.61.0
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -359,16 +301,14 @@ jobs:
trial-olddeps:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() && needs.changes.outputs.trial == 'true' }} # Allow previous steps to be skipped, but not fail
needs:
- linting-done
- changes
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.61.0
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
@@ -380,7 +320,7 @@ jobs:
- uses: actions/setup-python@v4
with:
python-version: '3.8'
python-version: '3.7'
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
@@ -417,18 +357,16 @@ jobs:
trial-pypy:
# Very slow; only run if the branch name includes 'pypy'
# Note: sqlite only; no postgres. Completely untested since poetry move.
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() && needs.changes.outputs.trial == 'true' }}
needs:
- linting-done
- changes
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.8"]
python-version: ["pypy-3.7"]
extras: ["all"]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
@@ -451,10 +389,8 @@ jobs:
|| true
sytest:
if: ${{ !failure() && !cancelled() && needs.changes.outputs.integration == 'true' }}
needs:
- calculate-test-jobs
- changes
if: ${{ !failure() && !cancelled() }}
needs: calculate-test-jobs
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }}
@@ -463,8 +399,8 @@ jobs:
env:
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.job.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }}
ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') || '' }}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') && 1 }}
WORKERS: ${{ matrix.job.workers && 1 }}
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}
@@ -475,12 +411,12 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
uses: dtolnay/rust-toolchain@1.61.0
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
@@ -499,8 +435,8 @@ jobs:
/logs/**/*.log*
export-data:
if: ${{ !failure() && !cancelled() && needs.changes.outputs.integration == 'true'}} # Allow previous steps to be skipped, but not fail
needs: [linting-done, portdb, changes]
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: [linting-done, portdb]
runs-on: ubuntu-latest
env:
TOP: ${{ github.workspace }}
@@ -520,7 +456,7 @@ jobs:
--health-retries 5
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
@@ -535,15 +471,13 @@ jobs:
portdb:
if: ${{ !failure() && !cancelled() && needs.changes.outputs.integration == 'true'}} # Allow previous steps to be skipped, but not fail
needs:
- linting-done
- changes
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
include:
- python-version: "3.8"
- python-version: "3.7"
postgres-version: "11"
- python-version: "3.11"
@@ -564,7 +498,7 @@ jobs:
--health-retries 5
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Add PostgreSQL apt repository
# We need a version of pg_dump that can handle the version of
# PostgreSQL being tested against. The Ubuntu package repository lags
@@ -598,10 +532,8 @@ jobs:
schema_diff
complement:
if: "${{ !failure() && !cancelled() && needs.changes.outputs.integration == 'true' }}"
needs:
- linting-done
- changes
if: "${{ !failure() && !cancelled() }}"
needs: linting-done
runs-on: ubuntu-latest
strategy:
@@ -618,13 +550,13 @@ jobs:
database: Postgres
steps:
- name: Run actions/checkout@v4 for synapse
uses: actions/checkout@v4
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
- name: Install Rust
uses: dtolnay/rust-toolchain@1.61.0
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
- uses: actions/setup-go@v4
@@ -649,10 +581,10 @@ jobs:
- changes
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@1.61.0
uses: dtolnay/rust-toolchain@1.58.1
- uses: Swatinem/rust-cache@v2
- run: cargo test
@@ -667,7 +599,7 @@ jobs:
- changes
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@master
@@ -695,16 +627,9 @@ jobs:
with:
needs: ${{ toJSON(needs) }}
# Various bits are skipped if there was no applicable changes.
# The newsfile and signoff lint may be skipped on non PR builds.
# The newsfile lint may be skipped on non PR builds
# Cargo test is skipped if there is no changes on Rust code
skippable: |
trial
trial-olddeps
sytest
portdb
export-data
complement
check-signoff
lint-newsfile
cargo-test
cargo-bench

View File

@@ -5,9 +5,6 @@ on:
- cron: 0 8 * * *
workflow_dispatch:
# NB: inputs are only present when this workflow is dispatched manually.
# (The default below is the default field value in the form to trigger
# a manual dispatch). Otherwise the inputs will evaluate to null.
inputs:
twisted_ref:
description: Commit, branch or tag to checkout from upstream Twisted.
@@ -21,26 +18,11 @@ concurrency:
cancel-in-progress: true
jobs:
check_repo:
# Prevent this workflow from running on any fork of Synapse other than matrix-org/synapse, as it is
# only useful to the Synapse core team.
# All other workflow steps depend on this one, thus if 'should_run_workflow' is not 'true', the rest
# of the workflow will be skipped as well.
if: github.repository == 'matrix-org/synapse'
runs-on: ubuntu-latest
outputs:
should_run_workflow: ${{ steps.check_condition.outputs.should_run_workflow }}
steps:
- id: check_condition
run: echo "should_run_workflow=${{ github.repository == 'matrix-org/synapse' }}" >> "$GITHUB_OUTPUT"
mypy:
needs: check_repo
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
@@ -52,19 +34,17 @@ jobs:
extras: "all"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref || 'trunk' }}
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }}
poetry install --no-interaction --extras "all test"
- name: Remove unhelpful options from mypy config
run: sed -e '/warn_unused_ignores = True/d' -e '/warn_redundant_casts = True/d' -i mypy.ini
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
needs: check_repo
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
@@ -95,20 +75,14 @@ jobs:
|| true
sytest:
needs: check_repo
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
container:
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
image: matrixdotorg/sytest-synapse:focal
image: matrixdotorg/sytest-synapse:buster
volumes:
- ${{ github.workspace }}:/src
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
@@ -145,8 +119,7 @@ jobs:
/logs/**/*.log*
complement:
needs: check_repo
if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'"
if: "${{ !failure() && !cancelled() }}"
runs-on: ubuntu-latest
strategy:
@@ -163,8 +136,8 @@ jobs:
database: Postgres
steps:
- name: Run actions/checkout@v4 for synapse
uses: actions/checkout@v4
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
@@ -193,7 +166,7 @@ jobs:
# open an issue if the build fails, so we know about it.
open-issue:
if: failure() && needs.check_repo.outputs.should_run_workflow == 'true'
if: failure()
needs:
- mypy
- trial
@@ -203,7 +176,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

1
.gitignore vendored
View File

@@ -34,7 +34,6 @@ __pycache__/
/logs
/media_store/
/uploads
/homeserver-config-overrides.d
# For direnv users
/.envrc

3543
CHANGES.md

File diff suppressed because it is too large Load Diff

66
Cargo.lock generated
View File

@@ -4,18 +4,18 @@ version = 3
[[package]]
name = "aho-corasick"
version = "1.0.2"
version = "0.7.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
version = "1.0.75"
version = "1.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
[[package]]
name = "arc-swap"
@@ -132,15 +132,15 @@ dependencies = [
[[package]]
name = "log"
version = "0.4.20"
version = "0.4.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de"
[[package]]
name = "memchr"
version = "2.6.3"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "memoffset"
@@ -182,9 +182,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.64"
version = "1.0.52"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
dependencies = [
"unicode-ident",
]
@@ -229,9 +229,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
version = "0.8.3"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f47b0777feb17f61eea78667d61103758b243a871edc09a7786500a50467b605"
checksum = "f9c8b57fe71fb5dcf38970ebedc2b1531cf1c14b1b9b4c560a182a57e115575c"
dependencies = [
"arc-swap",
"log",
@@ -273,9 +273,9 @@ dependencies = [
[[package]]
name = "quote"
version = "1.0.29"
version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
dependencies = [
"proc-macro2",
]
@@ -291,21 +291,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.9.6"
version = "1.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9"
checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d"
dependencies = [
"aho-corasick",
"memchr",
@@ -314,9 +302,9 @@ dependencies = [
[[package]]
name = "regex-syntax"
version = "0.7.5"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "ryu"
@@ -332,29 +320,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.188"
version = "1.0.163"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e"
checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.188"
version = "1.0.163"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2"
checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.28",
"syn 2.0.10",
]
[[package]]
name = "serde_json"
version = "1.0.107"
version = "1.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65"
checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1"
dependencies = [
"itoa",
"ryu",
@@ -386,9 +374,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.28"
version = "2.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567"
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
dependencies = [
"proc-macro2",
"quote",

View File

@@ -3,4 +3,3 @@
[workspace]
members = ["rust"]
resolver = "2"

View File

@@ -769,7 +769,7 @@ def main(server_url, identity_server_url, username, token, config_path):
global CONFIG_JSON
CONFIG_JSON = config_path # bit cheeky, but just overwrite the global
try:
with open(config_path) as config:
with open(config_path, "r") as config:
syn_cmd.config = json.load(config)
try:
http_client.verbose = "on" == syn_cmd.config["verbose"]

View File

@@ -37,6 +37,7 @@ class HttpClient:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
"""
pass
def get_json(self, url, args=None):
"""Gets some json from the given host homeserver and path
@@ -52,6 +53,7 @@ class HttpClient:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
"""
pass
class TwistedHttpClient(HttpClient):

File diff suppressed because it is too large Load Diff

View File

@@ -29,7 +29,7 @@
"level": "error"
},
{
"line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix-federation://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
"line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
"level": "warning"
},
{

144
debian/changelog vendored
View File

@@ -1,147 +1,3 @@
matrix-synapse-py3 (1.94.0) stable; urgency=medium
* New Synapse release 1.94.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 10 Oct 2023 10:57:41 +0100
matrix-synapse-py3 (1.94.0~rc1) stable; urgency=medium
* New Synapse release 1.94.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Oct 2023 11:48:18 +0100
matrix-synapse-py3 (1.93.0) stable; urgency=medium
* New Synapse release 1.93.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 26 Sep 2023 15:54:40 +0100
matrix-synapse-py3 (1.93.0~rc1) stable; urgency=medium
* New synapse release 1.93.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Sep 2023 11:55:00 +0000
matrix-synapse-py3 (1.92.3) stable; urgency=medium
* New Synapse release 1.92.3.
-- Synapse Packaging team <packages@matrix.org> Mon, 18 Sep 2023 15:05:04 +0200
matrix-synapse-py3 (1.92.2) stable; urgency=medium
* New Synapse release 1.92.2.
-- Synapse Packaging team <packages@matrix.org> Fri, 15 Sep 2023 13:17:41 +0100
matrix-synapse-py3 (1.92.1) stable; urgency=medium
* New Synapse release 1.92.1.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Sep 2023 13:19:42 +0200
matrix-synapse-py3 (1.92.0) stable; urgency=medium
* New Synapse release 1.92.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Sep 2023 11:59:23 +0200
matrix-synapse-py3 (1.91.2) stable; urgency=medium
* New synapse release 1.91.2.
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Sep 2023 14:59:30 +0000
matrix-synapse-py3 (1.92.0~rc1) stable; urgency=medium
* New Synapse release 1.92.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Sep 2023 11:21:43 +0100
matrix-synapse-py3 (1.91.1) stable; urgency=medium
* New Synapse release 1.91.1.
-- Synapse Packaging team <packages@matrix.org> Mon, 04 Sep 2023 14:03:18 +0100
matrix-synapse-py3 (1.91.0) stable; urgency=medium
* New Synapse release 1.91.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 30 Aug 2023 11:18:10 +0100
matrix-synapse-py3 (1.91.0~rc1) stable; urgency=medium
* New Synapse release 1.91.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 23 Aug 2023 09:47:18 -0700
matrix-synapse-py3 (1.90.0) stable; urgency=medium
* New Synapse release 1.90.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Aug 2023 11:17:34 +0100
matrix-synapse-py3 (1.90.0~rc1) stable; urgency=medium
* New Synapse release 1.90.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Aug 2023 15:29:34 +0100
matrix-synapse-py3 (1.89.0) stable; urgency=medium
* New Synapse release 1.89.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Aug 2023 11:07:15 +0100
matrix-synapse-py3 (1.89.0~rc1) stable; urgency=medium
* New Synapse release 1.89.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Jul 2023 14:31:07 +0200
matrix-synapse-py3 (1.88.0) stable; urgency=medium
* New Synapse release 1.88.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 18 Jul 2023 13:59:28 +0100
matrix-synapse-py3 (1.88.0~rc1) stable; urgency=medium
* New Synapse release 1.88.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jul 2023 10:20:19 +0100
matrix-synapse-py3 (1.87.0) stable; urgency=medium
* New Synapse release 1.87.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Jul 2023 16:24:00 +0100
matrix-synapse-py3 (1.87.0~rc1) stable; urgency=medium
* New synapse release 1.87.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Jun 2023 15:27:04 +0000
matrix-synapse-py3 (1.86.0) stable; urgency=medium
* New Synapse release 1.86.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Jun 2023 17:22:46 +0200
matrix-synapse-py3 (1.86.0~rc2) stable; urgency=medium
* New Synapse release 1.86.0rc2.
-- Synapse Packaging team <packages@matrix.org> Wed, 14 Jun 2023 12:16:27 +0200
matrix-synapse-py3 (1.86.0~rc1) stable; urgency=medium
* New Synapse release 1.86.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Jun 2023 14:30:45 +0200
matrix-synapse-py3 (1.85.2) stable; urgency=medium
* New Synapse release 1.85.2.

View File

@@ -25,9 +25,9 @@ ARG PYTHON_VERSION=3.11
###
### Stage 0: generate requirements.txt
###
# We hardcode the use of Debian bookworm here because this could change upstream
# and other Dockerfiles used for testing are expecting bookworm.
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements
# We hardcode the use of Debian bullseye here because this could change upstream
# and other Dockerfiles used for testing are expecting bullseye.
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
@@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
###
### Stage 1: builder
###
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
# install the OS build deps
RUN \
@@ -158,7 +158,7 @@ RUN --mount=type=cache,target=/synapse/target,sharing=locked \
### Stage 2: runtime
###
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
@@ -173,10 +173,10 @@ RUN \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp7 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
libicu72 \
libicu67 \
libssl-dev \
openssl \
&& rm -rf /var/lib/apt/lists/*

View File

@@ -24,16 +24,16 @@ ARG distro=""
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
# it's not obviously easier to use that than to build our own.)
FROM docker.io/library/${distro} as builder
FROM ${distro} as builder
RUN apt-get update -qq -o Acquire::Languages=none
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends \
build-essential \
ca-certificates \
devscripts \
equivs \
wget
-yqq --no-install-recommends \
build-essential \
ca-certificates \
devscripts \
equivs \
wget
# fetch and unpack the package
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
@@ -55,36 +55,40 @@ RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b
###
### Stage 1
###
FROM docker.io/library/${distro}
FROM ${distro}
# Get the distro we want to pull from as a dynamic build variable
# (We need to define it in each build stage)
ARG distro=""
ENV distro ${distro}
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
# http://bugs.python.org/issue19846
ENV LANG C.UTF-8
# Install the build dependencies
#
# NB: keep this list in sync with the list of build-deps in debian/control
# TODO: it would be nice to do that automatically.
RUN apt-get update -qq -o Acquire::Languages=none \
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
build-essential \
curl \
debhelper \
devscripts \
libsystemd-dev \
lsb-release \
pkg-config \
python3-dev \
python3-pip \
python3-setuptools \
python3-venv \
sqlite3 \
libpq-dev \
libicu-dev \
pkg-config \
xmlsec1
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
build-essential \
curl \
debhelper \
devscripts \
libsystemd-dev \
lsb-release \
pkg-config \
python3-dev \
python3-pip \
python3-setuptools \
python3-venv \
sqlite3 \
libpq-dev \
libicu-dev \
pkg-config \
xmlsec1
# Install rust and ensure it's in the PATH
ENV RUSTUP_HOME=/rust

View File

@@ -7,7 +7,7 @@ ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
# target image. For repeated rebuilds, this is much faster than apt installing
# each time.
FROM docker.io/library/debian:bookworm-slim AS deps_base
FROM debian:bullseye-slim AS deps_base
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
@@ -21,7 +21,7 @@ FROM docker.io/library/debian:bookworm-slim AS deps_base
# which makes it much easier to copy (but we need to make sure we use an image
# based on the same debian version as the synapse image, to make sure we get
# the expected version of libc.
FROM docker.io/library/redis:7-bookworm AS redis_base
FROM redis:6-bullseye AS redis_base
# now build the final image, based on the the regular Synapse docker image
FROM $FROM

View File

@@ -73,8 +73,7 @@ The following environment variables are supported in `generate` mode:
will log sensitive information such as access tokens.
This should not be needed unless you are a developer attempting to debug something
particularly tricky.
* `SYNAPSE_LOG_TESTING`: if set, Synapse will log additional information useful
for testing.
## Postgres

View File

@@ -7,7 +7,6 @@
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
ARG SYNAPSE_VERSION=latest
# This is an intermediate image, to be built locally (not pulled from a registry).
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
FROM $FROM
@@ -20,8 +19,8 @@ FROM $FROM
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=docker.io/library/postgres:13-bookworm /usr/lib/postgresql /usr/lib/postgresql
COPY --from=docker.io/library/postgres:13-bookworm /usr/share/postgresql /usr/share/postgresql
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data

View File

@@ -92,6 +92,8 @@ allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
# Enable history backfilling support
msc2716_enabled: true
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable support for polls

View File

@@ -35,11 +35,7 @@ server {
# Send all other traffic to the main process
location ~* ^(\\/_matrix|\\/_synapse) {
{% if using_unix_sockets %}
proxy_pass http://unix:/run/main_public.sock;
{% else %}
proxy_pass http://localhost:8080;
{% endif %}
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;

View File

@@ -6,9 +6,6 @@
{% if enable_redis %}
redis:
enabled: true
{% if using_unix_sockets %}
path: /tmp/redis.sock
{% endif %}
{% endif %}
{% if appservice_registrations is not none %}

View File

@@ -19,11 +19,7 @@ username=www-data
autorestart=true
[program:redis]
{% if using_unix_sockets %}
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock
{% else %}
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
{% endif %}
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0

View File

@@ -8,11 +8,7 @@ worker_name: "{{ name }}"
worker_listeners:
- type: http
{% if using_unix_sockets %}
path: "/run/worker.{{ port }}"
{% else %}
port: {{ port }}
{% endif %}
{% if listener_resources %}
resources:
- names:

View File

@@ -36,17 +36,12 @@ listeners:
# Allow configuring in case we want to reverse proxy 8008
# using another process in the same container
{% if SYNAPSE_USE_UNIX_SOCKET %}
# Unix sockets don't care about TLS or IP addresses or ports
- path: '/run/main_public.sock'
type: http
{% else %}
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
tls: false
bind_addresses: ['::']
type: http
x_forwarded: false
{% endif %}
resources:
- names: [client]
compress: true
@@ -62,11 +57,8 @@ database:
user: "{{ POSTGRES_USER or "synapse" }}"
password: "{{ POSTGRES_PASSWORD }}"
database: "{{ POSTGRES_DB or "synapse" }}"
{% if not SYNAPSE_USE_UNIX_SOCKET %}
{# Synapse will use a default unix socket for Postgres when host/port is not specified (behavior from `psycopg2`). #}
host: "{{ POSTGRES_HOST or "db" }}"
port: "{{ POSTGRES_PORT or "5432" }}"
{% endif %}
cp_min: 5
cp_max: 10
{% else %}

View File

@@ -49,35 +49,17 @@ handlers:
class: logging.StreamHandler
formatter: precise
{% if not SYNAPSE_LOG_SENSITIVE %}
{#
If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
so that DEBUG entries (containing sensitive information) are not emitted.
#}
loggers:
# This is just here so we can leave `loggers` in the config regardless of whether
# we configure other loggers below (avoid empty yaml dict error).
_placeholder:
level: "INFO"
{% if not SYNAPSE_LOG_SENSITIVE %}
{#
If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
so that DEBUG entries (containing sensitive information) are not emitted.
#}
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: INFO
{% endif %}
{% if SYNAPSE_LOG_TESTING %}
{#
If Synapse is under test, log a few more useful things for a developer
attempting to debug something particularly tricky.
With `synapse.visibility.filtered_event_debug`, it logs when events are (maybe
unexpectedly) filtered out of responses in tests. It's just nice to be able to
look at the CI log and figure out why an event isn't being returned.
#}
synapse.visibility.filtered_event_debug:
level: DEBUG
{% endif %}
{% endif %}
root:
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}

View File

@@ -40,8 +40,6 @@
# log level. INFO is the default.
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
# regardless of the SYNAPSE_LOG_LEVEL setting.
# * SYNAPSE_LOG_TESTING: if set, Synapse will log additional information useful
# for testing.
#
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
# in the project's README), this script may be run multiple times, and functionality should
@@ -74,9 +72,6 @@ MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
MAIN_PROCESS_INSTANCE_NAME = "main"
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
MAIN_PROCESS_REPLICATION_PORT = 9093
# Obviously, these would only be used with the UNIX socket option
MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
# during processing with the name of the worker.
@@ -183,7 +178,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(r0|v3|unstable)/password_policy$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$",
"^/_matrix/client/(r0|v3|unstable)/capabilities$",
"^/_matrix/client/(r0|v3|unstable)/notifications$",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -248,6 +242,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/knock/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
"^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -411,15 +406,11 @@ def add_worker_roles_to_shared_config(
)
# Map of stream writer instance names to host/ports combos
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
instance_map[worker_name] = {
"path": f"/run/worker.{worker_port}",
}
else:
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
# Update the list of stream writers. It's convenient that the name of the worker
# type is the same as the stream to write. Iterate over the whole list in case there
# is more than one.
@@ -431,15 +422,10 @@ def add_worker_roles_to_shared_config(
# Map of stream writer instance names to host/ports combos
# For now, all stream writers need http replication ports
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
instance_map[worker_name] = {
"path": f"/run/worker.{worker_port}",
}
else:
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
instance_map[worker_name] = {
"host": "localhost",
"port": worker_port,
}
def merge_worker_template_configs(
@@ -731,29 +717,17 @@ def generate_worker_files(
# Note that yaml cares about indentation, so care should be taken to insert lines
# into files at the correct indentation below.
# Convenience helper for if using unix sockets instead of host:port
using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
# First read the original config file and extract the listeners block. Then we'll
# add another listener for replication. Later we'll write out the result to the
# shared config file.
listeners: List[Any]
if using_unix_sockets:
listeners = [
{
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
"type": "http",
"resources": [{"names": ["replication"]}],
}
]
else:
listeners = [
{
"port": MAIN_PROCESS_REPLICATION_PORT,
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
"type": "http",
"resources": [{"names": ["replication"]}],
}
]
listeners = [
{
"port": MAIN_PROCESS_REPLICATION_PORT,
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
"type": "http",
"resources": [{"names": ["replication"]}],
}
]
with open(config_path) as file_stream:
original_config = yaml.safe_load(file_stream)
original_listeners = original_config.get("listeners")
@@ -794,17 +768,7 @@ def generate_worker_files(
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
# This list ends up being part of the command line to curl, (curl added support for
# Unix sockets in version 7.40).
if using_unix_sockets:
healthcheck_urls = [
f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
# The scheme and hostname from the following URL are ignored.
# The only thing that matters is the path `/health`
"http://localhost/health"
]
else:
healthcheck_urls = ["http://localhost:8080/health"]
healthcheck_urls = ["http://localhost:8080/health"]
# Get the set of all worker types that we have configured
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
@@ -841,12 +805,8 @@ def generate_worker_files(
# given worker_type needs to stay assigned and not be replaced.
worker_config["shared_extra_conf"].update(shared_config)
shared_config = worker_config["shared_extra_conf"]
if using_unix_sockets:
healthcheck_urls.append(
f"--unix-socket /run/worker.{worker_port} http://localhost/health"
)
else:
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Update the shared config with sharding-related options if necessary
add_worker_roles_to_shared_config(
@@ -862,10 +822,9 @@ def generate_worker_files(
# Then a worker config file
convert(
"/conf/worker.yaml.j2",
f"/conf/workers/{worker_name}.yaml",
"/conf/workers/{name}.yaml".format(name=worker_name),
**worker_config,
worker_log_config_filepath=log_config_filepath,
using_unix_sockets=using_unix_sockets,
)
# Save this worker's port number to the correct nginx upstreams
@@ -886,13 +845,8 @@ def generate_worker_files(
nginx_upstream_config = ""
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
body = ""
if using_unix_sockets:
for port in upstream_worker_ports:
body += f" server unix:/run/worker.{port};\n"
else:
for port in upstream_worker_ports:
body += f" server localhost:{port};\n"
for port in upstream_worker_ports:
body += f" server localhost:{port};\n"
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
@@ -922,15 +876,10 @@ def generate_worker_files(
# If there are workers, add the main process to the instance_map too.
if workers_in_use:
instance_map = shared_config.setdefault("instance_map", {})
if using_unix_sockets:
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
}
else:
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
"port": MAIN_PROCESS_REPLICATION_PORT,
}
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
"port": MAIN_PROCESS_REPLICATION_PORT,
}
# Shared homeserver config
convert(
@@ -940,7 +889,6 @@ def generate_worker_files(
appservice_registrations=appservice_registrations,
enable_redis=workers_in_use,
workers_in_use=workers_in_use,
using_unix_sockets=using_unix_sockets,
)
# Nginx config
@@ -951,7 +899,6 @@ def generate_worker_files(
upstream_directives=nginx_upstream_config,
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
using_unix_sockets=using_unix_sockets,
)
# Supervisord config
@@ -961,7 +908,6 @@ def generate_worker_files(
"/etc/supervisor/supervisord.conf",
main_config_path=config_path,
enable_redis=workers_in_use,
using_unix_sockets=using_unix_sockets,
)
convert(
@@ -1001,7 +947,6 @@ def generate_worker_log_config(
extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
"SYNAPSE_LOG_SENSITIVE"
)
extra_log_template_args["SYNAPSE_LOG_TESTING"] = environ.get("SYNAPSE_LOG_TESTING")
# Render and write the file
log_config_filepath = f"/conf/workers/{worker_name}.log.config"

View File

@@ -8,9 +8,9 @@ ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
###
# We hardcode the use of Debian bookworm here because this could change upstream
# and other Dockerfiles used for testing are expecting bookworm.
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm
# We hardcode the use of Debian bullseye here because this could change upstream
# and other Dockerfiles used for testing are expecting bullseye.
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
# Install Rust and other dependencies (stolen from normal Dockerfile)
# install the OS build deps
@@ -33,7 +33,7 @@ RUN \
gosu \
libjpeg62-turbo \
libpq5 \
libwebp7 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
&& rm -rf /var/lib/apt/lists/*

View File

@@ -82,7 +82,7 @@ def generate_config_from_template(
with open(filename) as handle:
value = handle.read()
else:
log(f"Generating a random secret for {secret}")
log("Generating a random secret for {}".format(secret))
value = codecs.encode(os.urandom(32), "hex").decode()
with open(filename, "w") as handle:
handle.write(value)
@@ -239,7 +239,7 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
log("Could not find %s, will not use" % (jemallocpath,))
# if there are no config files passed to synapse, try adding the default file
if not any(p.startswith(("--config-path", "-c")) for p in args):
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"

View File

@@ -97,7 +97,6 @@
- [Cancellation](development/synapse_architecture/cancellation.md)
- [Log Contexts](log_contexts.md)
- [Replication](replication.md)
- [Streams](development/synapse_architecture/streams.md)
- [TCP Replication](tcp_replication.md)
- [Faster remote joins](development/synapse_architecture/faster_joins.md)
- [Internal Documentation](development/internal_documentation/README.md)

View File

@@ -1,7 +1,5 @@
# Account validity API
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
This API allows a server administrator to manage the validity of an account. To
use it, you must enable the account validity feature (under
`account_validity`) in Synapse's configuration.

View File

@@ -1,7 +1,5 @@
# Shared-Secret Registration
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
This API allows for the creation of users in an administrative and
non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.

View File

@@ -419,7 +419,7 @@ The following query parameters are available:
* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
* `to` - The token to stop returning events at.
* `to` - The token to spot returning events at.
* `limit` - The maximum number of events to return. Defaults to `10`.
* `filter` - A JSON RoomEventFilter to filter returned events with.
* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting

View File

@@ -54,8 +54,7 @@ It returns a JSON body like the following:
"external_id": "<user_id_provider_2>"
}
],
"user_type": null,
"locked": false
"user_type": null
}
```
@@ -104,8 +103,7 @@ with a body of:
],
"admin": false,
"deactivated": false,
"user_type": null,
"locked": false
"user_type": null
}
```
@@ -157,7 +155,6 @@ Body parameters:
Note: a user cannot be erased with this API. For more details on
deactivating and erasing users see [Deactivate Account](#deactivate-account).
- `locked` - **bool**, optional. If unspecified, locked state will be left unchanged.
- `user_type` - **string** or null, optional. If not provided, the user type will be
not be changed. If `null` is given, the user type will be cleared.
Other allowed options are: `bot` and `support`.
@@ -186,8 +183,7 @@ A response body like the following is returned:
"shadow_banned": 0,
"displayname": "<User One>",
"avatar_url": null,
"creation_ts": 1560432668000,
"locked": false
"creation_ts": 1560432668000
}, {
"name": "<user_id2>",
"is_guest": 0,
@@ -198,8 +194,7 @@ A response body like the following is returned:
"shadow_banned": 0,
"displayname": "<User Two>",
"avatar_url": "<avatar_url>",
"creation_ts": 1561550621000,
"locked": false
"creation_ts": 1561550621000
}
],
"next_token": "100",
@@ -222,9 +217,7 @@ The following parameters should be set in the URL:
- `name` - Is optional and filters to only return users with user ID localparts
**or** displaynames that contain this value.
- `guests` - string representing a bool - Is optional and if `false` will **exclude** guest users.
Defaults to `true` to include guest users. This parameter is not supported when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
- `admins` - Optional flag to filter admins. If `true`, only admins are queried. If `false`, admins are excluded from
the query. When the flag is absent (the default), **both** admins and non-admins are included in the search results.
Defaults to `true` to include guest users.
- `deactivated` - string representing a bool - Is optional and if `true` will **include** deactivated users.
Defaults to `false` to exclude deactivated users.
- `limit` - string representing a positive integer - Is optional but is used for pagination,
@@ -246,15 +239,9 @@ The following parameters should be set in the URL:
- `displayname` - Users are ordered alphabetically by `displayname`.
- `avatar_url` - Users are ordered alphabetically by avatar URL.
- `creation_ts` - Users are ordered by when the users was created in ms.
- `last_seen_ts` - Users are ordered by when the user was lastly seen in ms.
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
- `not_user_type` - Exclude certain user types, such as bot users, from the request.
Can be provided multiple times. Possible values are `bot`, `support` or "empty string".
"empty string" here means to exclude users without a type.
- `locked` - string representing a bool - Is optional and if `true` will **include** locked users.
Defaults to `false` to exclude locked users. Note: Introduced in v1.93.
Caution. The database only has indexes on the columns `name` and `creation_ts`.
This means that if a different sort order is used (`is_guest`, `admin`,
@@ -279,12 +266,10 @@ The following fields are returned in the JSON response body:
- `displayname` - string - The user's display name if they have set one.
- `avatar_url` - string - The user's avatar URL if they have set one.
- `creation_ts` - integer - The user's creation timestamp in ms.
- `last_seen_ts` - integer - The user's last activity timestamp in ms.
- `locked` - bool - Status if that user has been marked as locked. Note: Introduced in v1.93.
- `next_token`: string representing a positive integer - Indication for pagination. See above.
- `total` - integer - Total number of media.
*Added in Synapse 1.93:* the `locked` query parameter and response field.
## Query current sessions for a user
@@ -399,8 +384,6 @@ The following actions are **NOT** performed. The list may be incomplete.
## Reset password
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
Changes the password of another user. This will automatically log the user out of all their devices.
The api is:
@@ -424,8 +407,6 @@ The parameter `logout_devices` is optional and defaults to `true`.
## Get whether a user is a server administrator or not
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
The api is:
```
@@ -443,8 +424,6 @@ A response body like the following is returned:
## Change whether a user is a server administrator or not
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
Note that you cannot demote yourself.
The api is:
@@ -738,8 +717,6 @@ delete largest/smallest or newest/oldest files first.
## Login as a user
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
Get an access token that can be used to authenticate as that user. Useful for
when admins wish to do actions on behalf of a user.
@@ -752,8 +729,7 @@ POST /_synapse/admin/v1/users/<user_id>/login
An optional `valid_until_ms` field can be specified in the request body as an
integer timestamp that specifies when the token should expire. By default tokens
do not expire. Note that this API does not allow a user to login as themselves
(to create more tokens).
do not expire.
A response body like the following is returned:
@@ -1204,7 +1180,7 @@ The following parameters should be set in the URL:
- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
be local.
## Check username availability
### Check username availability
Checks to see if a username is available, and valid, for the server. See [the client-server
API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available)
@@ -1222,7 +1198,7 @@ GET /_synapse/admin/v1/username_available?username=$localpart
The request and response format is the same as the
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
## Find a user based on their ID in an auth provider
### Find a user based on their ID in an auth provider
The API is:
@@ -1261,7 +1237,7 @@ Returns a `404` HTTP status code if no user was found, with a response body like
_Added in Synapse 1.68.0._
## Find a user based on their Third Party ID (ThreePID or 3PID)
### Find a user based on their Third Party ID (ThreePID or 3PID)
The API is:

View File

@@ -1,7 +1,7 @@
# Version API
This API returns the running Synapse version.
This is useful when a Synapse instance
This API returns the running Synapse version and the Python version
on which Synapse is being run. This is useful when a Synapse instance
is behind a proxy that does not forward the 'Server' header (which also
contains Synapse version information).
@@ -15,9 +15,7 @@ It returns a JSON body like the following:
```json
{
"server_version": "0.99.2rc1 (b=develop, abcdef123)"
"server_version": "0.99.2rc1 (b=develop, abcdef123)",
"python_version": "3.7.8"
}
```
*Changed in Synapse 1.94.0:* The `python_version` key was removed from the
response body.

View File

@@ -24,7 +24,7 @@ Server with a domain specific API.
1. **Messaging Layer**
This is what the rest of the homeserver hits to send messages, join rooms,
etc. It also allows you to register callbacks for when it gets notified by
etc. It also allows you to register callbacks for when it get's notified by
lower levels that e.g. a new message has been received.
It is responsible for serializing requests to send to the data

View File

@@ -164,7 +164,7 @@ Synapse 1.6.0rc2 (2019-11-25)
Bugfixes
--------
- Fix a bug which could cause the background database update handler for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407))
- Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407))
Synapse 1.6.0rc1 (2019-11-20)
@@ -191,7 +191,7 @@ Bugfixes
- Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash. ([\#6306](https://github.com/matrix-org/synapse/issues/6306))
- Fix `/purge_room` admin API. ([\#6307](https://github.com/matrix-org/synapse/issues/6307))
- Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0. ([\#6313](https://github.com/matrix-org/synapse/issues/6313))
- Fix bug which caused rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320))
- Fix bug which casued rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320))
- Fix bug where `rc_login` ratelimiting would prematurely kick in. ([\#6335](https://github.com/matrix-org/synapse/issues/6335))
- Prevent the server taking a long time to start up when guest registration is enabled. ([\#6338](https://github.com/matrix-org/synapse/issues/6338))
- Fix bug where upgrading a guest account to a full user would fail when account validity is enabled. ([\#6359](https://github.com/matrix-org/synapse/issues/6359))
@@ -232,7 +232,7 @@ Internal Changes
- Add some documentation about worker replication. ([\#6305](https://github.com/matrix-org/synapse/issues/6305))
- Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6308](https://github.com/matrix-org/synapse/issues/6308))
- Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only. ([\#6312](https://github.com/matrix-org/synapse/issues/6312))
- Add optional python dependencies and dependent binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317))
- Add optional python dependencies and dependant binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317))
- Remove the dependency on psutil and replace functionality with the stdlib `resource` module. ([\#6318](https://github.com/matrix-org/synapse/issues/6318), [\#6336](https://github.com/matrix-org/synapse/issues/6336))
- Improve documentation for EventContext fields. ([\#6319](https://github.com/matrix-org/synapse/issues/6319))
- Add some checks that we aren't using state from rejected events. ([\#6330](https://github.com/matrix-org/synapse/issues/6330))
@@ -653,7 +653,7 @@ Internal Changes
- Return 502 not 500 when failing to reach any remote server. ([\#5810](https://github.com/matrix-org/synapse/issues/5810))
- Reduce global pauses in the events stream caused by expensive state resolution during persistence. ([\#5826](https://github.com/matrix-org/synapse/issues/5826))
- Add a lower bound to well-known lookup cache time to avoid repeated lookups. ([\#5836](https://github.com/matrix-org/synapse/issues/5836))
- Whitelist history visibility sytests in worker mode tests. ([\#5843](https://github.com/matrix-org/synapse/issues/5843))
- Whitelist history visbility sytests in worker mode tests. ([\#5843](https://github.com/matrix-org/synapse/issues/5843))
Synapse 1.2.1 (2019-07-26)
@@ -817,7 +817,7 @@ See the [upgrade notes](docs/upgrade.md#upgrading-to-v110) for more details.
Features
--------
- Added possibility to disable local password authentication. Contributed by Daniel Hoffend. ([\#5092](https://github.com/matrix-org/synapse/issues/5092))
- Added possibilty to disable local password authentication. Contributed by Daniel Hoffend. ([\#5092](https://github.com/matrix-org/synapse/issues/5092))
- Add monthly active users to phonehome stats. ([\#5252](https://github.com/matrix-org/synapse/issues/5252))
- Allow expired user to trigger renewal email sending manually. ([\#5363](https://github.com/matrix-org/synapse/issues/5363))
- Statistics on forward extremities per room are now exposed via Prometheus. ([\#5384](https://github.com/matrix-org/synapse/issues/5384), [\#5458](https://github.com/matrix-org/synapse/issues/5458), [\#5461](https://github.com/matrix-org/synapse/issues/5461))
@@ -850,7 +850,7 @@ Bugfixes
- Fix bug where clients could tight loop calling `/sync` for a period. ([\#5507](https://github.com/matrix-org/synapse/issues/5507))
- Fix bug with `jinja2` preventing Synapse from starting. Users who had this problem should now simply need to run `pip install matrix-synapse`. ([\#5514](https://github.com/matrix-org/synapse/issues/5514))
- Fix a regression where homeservers on private IP addresses were incorrectly blacklisted. ([\#5523](https://github.com/matrix-org/synapse/issues/5523))
- Fixed m.login.jwt using unregistered user_id and added pyjwt>=1.6.4 as jwt conditional dependencies. Contributed by Pau Rodriguez-Estivill. ([\#5555](https://github.com/matrix-org/synapse/issues/5555), [\#5586](https://github.com/matrix-org/synapse/issues/5586))
- Fixed m.login.jwt using unregistred user_id and added pyjwt>=1.6.4 as jwt conditional dependencies. Contributed by Pau Rodriguez-Estivill. ([\#5555](https://github.com/matrix-org/synapse/issues/5555), [\#5586](https://github.com/matrix-org/synapse/issues/5586))
- Fix a bug that would cause invited users to receive several emails for a single 3PID invite in case the inviter is rate limited. ([\#5576](https://github.com/matrix-org/synapse/issues/5576))

View File

@@ -251,7 +251,7 @@ Internal Changes
- Optimise `/createRoom` with multiple invited users. ([\#8559](https://github.com/matrix-org/synapse/issues/8559))
- Implement and use an `@lru_cache` decorator. ([\#8595](https://github.com/matrix-org/synapse/issues/8595))
- Don't instantiate Requester directly. ([\#8614](https://github.com/matrix-org/synapse/issues/8614))
- Don't instansiate Requester directly. ([\#8614](https://github.com/matrix-org/synapse/issues/8614))
- Type hints for `RegistrationStore`. ([\#8615](https://github.com/matrix-org/synapse/issues/8615))
- Change schema to support access tokens belonging to one user but granting access to another. ([\#8616](https://github.com/matrix-org/synapse/issues/8616))
- Remove unused OPTIONS handlers. ([\#8621](https://github.com/matrix-org/synapse/issues/8621))
@@ -518,7 +518,7 @@ Bugfixes
- Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. ([\#8278](https://github.com/matrix-org/synapse/issues/8278))
- Fix edge case where push could get delayed for a user until a later event was pushed. ([\#8287](https://github.com/matrix-org/synapse/issues/8287))
- Fix fetching malformed events from remote servers. ([\#8324](https://github.com/matrix-org/synapse/issues/8324))
- Fix `UnboundLocalError` from occurring when appservices send a malformed register request. ([\#8329](https://github.com/matrix-org/synapse/issues/8329))
- Fix `UnboundLocalError` from occuring when appservices send a malformed register request. ([\#8329](https://github.com/matrix-org/synapse/issues/8329))
- Don't send push notifications to expired user accounts. ([\#8353](https://github.com/matrix-org/synapse/issues/8353))
- Fix a regression in v1.19.0 with reactivating users through the admin API. ([\#8362](https://github.com/matrix-org/synapse/issues/8362))
- Fix a bug where during device registration the length of the device name wasn't limited. ([\#8364](https://github.com/matrix-org/synapse/issues/8364))
@@ -815,7 +815,7 @@ Bugfixes
- Fix a bug introduced in Synapse v1.7.2 which caused inaccurate membership counts in the room directory. ([\#7977](https://github.com/matrix-org/synapse/issues/7977))
- Fix a long standing bug: 'Duplicate key value violates unique constraint "event_relations_id"' when message retention is configured. ([\#7978](https://github.com/matrix-org/synapse/issues/7978))
- Fix "no create event in auth events" when trying to reject invitation after inviter leaves. Bug introduced in Synapse v1.10.0. ([\#7980](https://github.com/matrix-org/synapse/issues/7980))
- Fix various comments and minor discrepancies in server notices code. ([\#7996](https://github.com/matrix-org/synapse/issues/7996))
- Fix various comments and minor discrepencies in server notices code. ([\#7996](https://github.com/matrix-org/synapse/issues/7996))
- Fix a long standing bug where HTTP HEAD requests resulted in a 400 error. ([\#7999](https://github.com/matrix-org/synapse/issues/7999))
- Fix a long-standing bug which caused two copies of some log lines to be written when synctl was used along with a MemoryHandler logger. ([\#8011](https://github.com/matrix-org/synapse/issues/8011), [\#8012](https://github.com/matrix-org/synapse/issues/8012))
@@ -1460,7 +1460,7 @@ Bugfixes
- Transfer alias mappings on room upgrade. ([\#6946](https://github.com/matrix-org/synapse/issues/6946))
- Ensure that a user interactive authentication session is tied to a single request. ([\#7068](https://github.com/matrix-org/synapse/issues/7068), [\#7455](https://github.com/matrix-org/synapse/issues/7455))
- Fix a bug in the federation API which could cause occasional "Failed to get PDU" errors. ([\#7089](https://github.com/matrix-org/synapse/issues/7089))
- Return the proper error (`M_BAD_ALIAS`) when a non-existent canonical alias is provided. ([\#7109](https://github.com/matrix-org/synapse/issues/7109))
- Return the proper error (`M_BAD_ALIAS`) when a non-existant canonical alias is provided. ([\#7109](https://github.com/matrix-org/synapse/issues/7109))
- Fix a bug which meant that groups updates were not correctly replicated between workers. ([\#7117](https://github.com/matrix-org/synapse/issues/7117))
- Fix starting workers when federation sending not split out. ([\#7133](https://github.com/matrix-org/synapse/issues/7133))
- Ensure `is_verified` is a boolean in responses to `GET /_matrix/client/r0/room_keys/keys`. Also warn the user if they forgot the `version` query param. ([\#7150](https://github.com/matrix-org/synapse/issues/7150))
@@ -1482,7 +1482,7 @@ Bugfixes
- Fix bad error handling that would cause Synapse to crash if it's provided with a YAML configuration file that's either empty or doesn't parse into a key-value map. ([\#7341](https://github.com/matrix-org/synapse/issues/7341))
- Fix incorrect metrics reporting for `renew_attestations` background task. ([\#7344](https://github.com/matrix-org/synapse/issues/7344))
- Prevent non-federating rooms from appearing in responses to federated `POST /publicRoom` requests when a filter was included. ([\#7367](https://github.com/matrix-org/synapse/issues/7367))
- Fix a bug which would cause the room directory to be incorrectly populated if Synapse was upgraded directly from v1.2.1 or earlier to v1.4.0 or later. Note that this fix does not apply retrospectively; see the [upgrade notes](docs/upgrade.md#upgrading-to-v1130) for more information. ([\#7387](https://github.com/matrix-org/synapse/issues/7387))
- Fix a bug which would cause the room durectory to be incorrectly populated if Synapse was upgraded directly from v1.2.1 or earlier to v1.4.0 or later. Note that this fix does not apply retrospectively; see the [upgrade notes](docs/upgrade.md#upgrading-to-v1130) for more information. ([\#7387](https://github.com/matrix-org/synapse/issues/7387))
- Fix bug in `EventContext.deserialize`. ([\#7393](https://github.com/matrix-org/synapse/issues/7393))
@@ -1638,7 +1638,7 @@ Security advisory
-----------------
Synapse may be vulnerable to request-smuggling attacks when it is used with a
reverse-proxy. The vulnerabilities are fixed in Twisted 20.3.0, and are
reverse-proxy. The vulnerabilties are fixed in Twisted 20.3.0, and are
described in
[CVE-2020-10108](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10108)
and
@@ -1748,7 +1748,7 @@ Internal Changes
- Refactoring work in preparation for changing the event redaction algorithm. ([\#6874](https://github.com/matrix-org/synapse/issues/6874), [\#6875](https://github.com/matrix-org/synapse/issues/6875), [\#6983](https://github.com/matrix-org/synapse/issues/6983), [\#7003](https://github.com/matrix-org/synapse/issues/7003))
- Improve performance of v2 state resolution for large rooms. ([\#6952](https://github.com/matrix-org/synapse/issues/6952), [\#7095](https://github.com/matrix-org/synapse/issues/7095))
- Reduce time spent doing GC, by freezing objects on startup. ([\#6953](https://github.com/matrix-org/synapse/issues/6953))
- Minor performance fixes to `get_auth_chain_ids`. ([\#6954](https://github.com/matrix-org/synapse/issues/6954))
- Minor perfermance fixes to `get_auth_chain_ids`. ([\#6954](https://github.com/matrix-org/synapse/issues/6954))
- Don't record remote cross-signing keys in the `devices` table. ([\#6956](https://github.com/matrix-org/synapse/issues/6956))
- Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions. ([\#6957](https://github.com/matrix-org/synapse/issues/6957))
- Merge worker apps together. ([\#6964](https://github.com/matrix-org/synapse/issues/6964), [\#7002](https://github.com/matrix-org/synapse/issues/7002), [\#7055](https://github.com/matrix-org/synapse/issues/7055), [\#7104](https://github.com/matrix-org/synapse/issues/7104))
@@ -1809,7 +1809,7 @@ Bugfixes
- Allow URL-encoded User IDs on `/_synapse/admin/v2/users/<user_id>[/admin]` endpoints. Thanks to @NHAS for reporting. ([\#6825](https://github.com/matrix-org/synapse/issues/6825))
- Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank. ([\#6849](https://github.com/matrix-org/synapse/issues/6849))
- Fix errors from logging in the purge jobs related to the message retention policies support. ([\#6945](https://github.com/matrix-org/synapse/issues/6945))
- Return a 404 instead of 200 for querying information of a non-existent user through the admin API. ([\#6901](https://github.com/matrix-org/synapse/issues/6901))
- Return a 404 instead of 200 for querying information of a non-existant user through the admin API. ([\#6901](https://github.com/matrix-org/synapse/issues/6901))
Updates to the Docker image
@@ -1889,7 +1889,7 @@ Bugfixes
Synapse 1.10.0rc4 (2020-02-11)
==============================
This release candidate was built incorrectly and is superseded by 1.10.0rc5.
This release candidate was built incorrectly and is superceded by 1.10.0rc5.
Synapse 1.10.0rc3 (2020-02-10)
==============================

View File

@@ -2270,7 +2270,7 @@ Features
Bugfixes
--------
- Fix spurious errors in logs when deleting a non-existent pusher. ([\#9121](https://github.com/matrix-org/synapse/issues/9121))
- Fix spurious errors in logs when deleting a non-existant pusher. ([\#9121](https://github.com/matrix-org/synapse/issues/9121))
- Fix a long-standing bug where Synapse would return a 500 error when a thumbnail did not exist (and auto-generation of thumbnails was not enabled). ([\#9163](https://github.com/matrix-org/synapse/issues/9163))
- Fix a long-standing bug where an internal server error was raised when attempting to preview an HTML document in an unknown character encoding. ([\#9164](https://github.com/matrix-org/synapse/issues/9164))
- Fix a long-standing bug where invalid data could cause errors when calculating the presentable room name for push. ([\#9165](https://github.com/matrix-org/synapse/issues/9165))
@@ -2522,7 +2522,7 @@ Bugfixes
- Fix a long-standing bug where a `m.image` event without a `url` would cause errors on push. ([\#8965](https://github.com/matrix-org/synapse/issues/8965))
- Fix a small bug in v2 state resolution algorithm, which could also cause performance issues for rooms with large numbers of power levels. ([\#8971](https://github.com/matrix-org/synapse/issues/8971))
- Add validation to the `sendToDevice` API to raise a missing parameters error instead of a 500 error. ([\#8975](https://github.com/matrix-org/synapse/issues/8975))
- Add validation of group IDs to raise a 400 error instead of a 500 error. ([\#8977](https://github.com/matrix-org/synapse/issues/8977))
- Add validation of group IDs to raise a 400 error instead of a 500 eror. ([\#8977](https://github.com/matrix-org/synapse/issues/8977))
Improved Documentation

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -8,9 +8,9 @@ to the server until they have.
There are several parts to this functionality; each requires some specific
configuration in `homeserver.yaml` to be enabled.
Note that various parts of the configuration and this document refer to the
Note that various parts of the configuation and this document refer to the
"privacy policy": agreement with a privacy policy is one particular use of this
feature, but of course administrators can specify other terms and conditions
feature, but of course adminstrators can specify other terms and conditions
unrelated to "privacy" per se.
Collecting policy agreement from a user

View File

@@ -23,7 +23,7 @@ people building from source should ensure they can fetch recent versions of Rust
(e.g. by using [rustup](https://rustup.rs/)).
The oldest supported version of SQLite is the version
[provided](https://packages.debian.org/bullseye/libsqlite3-0) by
[provided](https://packages.debian.org/buster/libsqlite3-0) by
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
Context

View File

@@ -266,7 +266,7 @@ The easiest way to do so is to run Postgres via a docker container. In one
terminal:
```shell
docker run --rm -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgres -p 5432:5432 postgres:14
docker run --rm -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgress -p 5432:5432 postgres:14
```
If you see an error like
@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
@@ -370,7 +370,6 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
See the [worker documentation](../workers.md) for additional information on workers.
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis, and Postgres(when applicable).
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh

View File

@@ -184,160 +184,3 @@ version `3`, that can only happen with a hash collision, which we basically hope
will never happen (SHA256 has a massive big key space).
## Worked examples of gradual migrations
Some migrations need to be performed gradually. A prime example of this is anything
which would need to do a large table scan — including adding columns, indices or
`NOT NULL` constraints to non-empty tables — such a migration should be done as a
background update where possible, at least on Postgres.
We can afford to be more relaxed about SQLite databases since they are usually
used on smaller deployments and SQLite does not support the same concurrent
DDL operations as Postgres.
We also typically insist on having at least one Synapse version's worth of
backwards compatibility, so that administrators can roll back Synapse if an upgrade
did not go smoothly.
This sometimes results in having to plan a migration across multiple versions
of Synapse.
This section includes an example and may include more in the future.
### Transforming a column into another one, with `NOT NULL` constraints
This example illustrates how you would introduce a new column, write data into it
based on data from an old column and then drop the old column.
We are aiming for semantic equivalence to:
```sql
ALTER TABLE mytable ADD COLUMN new_column INTEGER;
UPDATE mytable SET new_column = old_column * 100;
ALTER TABLE mytable ALTER COLUMN new_column ADD CONSTRAINT NOT NULL;
ALTER TABLE mytable DROP COLUMN old_column;
```
#### Synapse version `N`
```python
SCHEMA_VERSION = S
SCHEMA_COMPAT_VERSION = ... # unimportant at this stage
```
**Invariants:**
1. `old_column` is read by Synapse and written to by Synapse.
#### Synapse version `N + 1`
```python
SCHEMA_VERSION = S + 1
SCHEMA_COMPAT_VERSION = ... # unimportant at this stage
```
**Changes:**
1.
```sql
ALTER TABLE mytable ADD COLUMN new_column INTEGER;
```
**Invariants:**
1. `old_column` is read by Synapse and written to by Synapse.
2. `new_column` is written to by Synapse.
**Notes:**
1. `new_column` can't have a `NOT NULL NOT VALID` constraint yet, because the previous Synapse version did not write to the new column (since we haven't bumped the `SCHEMA_COMPAT_VERSION` yet, we still need to be compatible with the previous version).
#### Synapse version `N + 2`
```python
SCHEMA_VERSION = S + 2
SCHEMA_COMPAT_VERSION = S + 1 # this signals that we can't roll back to a time before new_column existed
```
**Changes:**
1. On Postgres, add a `NOT VALID` constraint to ensure new rows are compliant. *SQLite does not have such a construct, but it would be unnecessary anyway since there is no way to concurrently perform this migration on SQLite.*
```sql
ALTER TABLE mytable ADD CONSTRAINT CHECK new_column_not_null (new_column IS NOT NULL) NOT VALID;
```
2. Start a background update to perform migration: it should gradually run e.g.
```sql
UPDATE mytable SET new_column = old_column * 100 WHERE 0 < mytable_id AND mytable_id <= 5;
```
This background update is technically pointless on SQLite, but you must schedule it anyway so that the `portdb` script to migrate to Postgres still works.
3. Upon completion of the background update, you should run `VALIDATE CONSTRAINT` on Postgres to turn the `NOT VALID` constraint into a valid one.
```sql
ALTER TABLE mytable VALIDATE CONSTRAINT new_column_not_null;
```
This will take some time but does **NOT** hold an exclusive lock over the table.
**Invariants:**
1. `old_column` is read by Synapse and written to by Synapse.
2. `new_column` is written to by Synapse and new rows always have a non-`NULL` value in this field.
**Notes:**
1. If you wish, you can convert the `CHECK (new_column IS NOT NULL)` to a `NOT NULL` constraint free of charge in Postgres by adding the `NOT NULL` constraint and then dropping the `CHECK` constraint, because Postgres can statically verify that the `NOT NULL` constraint is implied by the `CHECK` constraint without performing a table scan.
2. It might be tempting to make version `N + 2` redundant by moving the background update to `N + 1` and delaying adding the `NOT NULL` constraint to `N + 3`, but that would mean the constraint would always be validated in the foreground in `N + 3`. Whereas if the `N + 2` step is kept, the migration in `N + 3` would be fast in the happy case.
#### Synapse version `N + 3`
```python
SCHEMA_VERSION = S + 3
SCHEMA_COMPAT_VERSION = S + 1 # we can't roll back to a time before new_column existed
```
**Changes:**
1. (Postgres) Update the table to populate values of `new_column` in case the background update had not completed. Additionally, `VALIDATE CONSTRAINT` to make the check fully valid.
```sql
-- you ideally want an index on `new_column` or e.g. `(new_column) WHERE new_column IS NULL` first, or perhaps you can find a way to skip this if the `NOT NULL` constraint has already been validated.
UPDATE mytable SET new_column = old_column * 100 WHERE new_column IS NULL;
-- this is a no-op if it already ran as part of the background update
ALTER TABLE mytable VALIDATE CONSTRAINT new_column_not_null;
```
2. (SQLite) Recreate the table by precisely following [the 12-step procedure for SQLite table schema changes](https://www.sqlite.org/lang_altertable.html#otheralter).
During this table rewrite, you should recreate `new_column` as `NOT NULL` and populate any outstanding `NULL` values at the same time.
Unfortunately, you can't drop `old_column` yet because it must be present for compatibility with the Postgres schema, as needed by `portdb`.
(Otherwise you could do this all in one go with SQLite!)
**Invariants:**
1. `old_column` is written to by Synapse (but no longer read by Synapse!).
2. `new_column` is read by Synapse and written to by Synapse. Moreover, all rows have a non-`NULL` value in this field, as guaranteed by a schema constraint.
**Notes:**
1. We can't drop `old_column` yet, or even stop writing to it, because that would break a rollback to the previous version of Synapse.
2. Application code can now rely on `new_column` being populated. The remaining steps are only motivated by the wish to clean-up old columns.
#### Synapse version `N + 4`
```python
SCHEMA_VERSION = S + 4
SCHEMA_COMPAT_VERSION = S + 3 # we can't roll back to a time before new_column was entirely non-NULL
```
**Invariants:**
1. `old_column` exists but is not written to or read from by Synapse.
2. `new_column` is read by Synapse and written to by Synapse. Moreover, all rows have a non-`NULL` value in this field, as guaranteed by a schema constraint.
**Notes:**
1. We can't drop `old_column` yet because that would break a rollback to the previous version of Synapse. \
**TODO:** It may be possible to relax this and drop the column straight away as long as the previous version of Synapse detected a rollback occurred and stopped attempting to write to the column. This could possibly be done by checking whether the database's schema compatibility version was `S + 3`.
#### Synapse version `N + 5`
```python
SCHEMA_VERSION = S + 5
SCHEMA_COMPAT_VERSION = S + 4 # we can't roll back to a time before old_column was no longer being touched
```
**Changes:**
1.
```sql
ALTER TABLE mytable DROP COLUMN old_column;
```

View File

@@ -12,7 +12,7 @@ Note that this schedule might be modified depending on the availability of the
Synapse team, e.g. releases may be skipped to avoid holidays.
Release announcements can be found in the
[release category of the Matrix blog](https://matrix.org/category/releases).
[release category of the Matrix blog](https://matrix.org/blog/category/releases).
## Bugfix releases
@@ -34,4 +34,4 @@ be held to be released together.
In some cases, a pre-disclosure of a security release will be issued as a notice
to Synapse operators that there is an upcoming security release. These can be
found in the [security category of the Matrix blog](https://matrix.org/category/security).
found in the [security category of the Matrix blog](https://matrix.org/blog/category/security).

View File

@@ -6,7 +6,7 @@ This is a work-in-progress set of notes with two goals:
See also [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902).
The key idea is described by [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). This allows servers to
The key idea is described by [MSC706](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). This allows servers to
request a lightweight response to the federation `/send_join` endpoint.
This is called a **faster join**, also known as a **partial join**. In these
notes we'll usually use the word "partial" as it matches the database schema.
@@ -264,7 +264,7 @@ But don't want to send out sensitive data in other HS's events in this way.
Suppose we discover after resync that we shouldn't have sent out one our events (not a prev_event) to a target HS. Not much we can do.
What about if we didn't send them an event but shouldn't've?
E.g. what if someone joined from a new HS shortly after you did? We wouldn't talk to them.
Could imagine sending out the "Missed" events after the resync but... painful to work out what they should have seen if they joined/left.
Could imagine sending out the "Missed" events after the resync but... painful to work out what they shuld have seen if they joined/left.
Instead, just send them the latest event (if they're still in the room after resync) and let them backfill.(?)
- Don't do this currently.
- If anyone who has received our messages sends a message to a HS we missed, they can backfill our messages

View File

@@ -1,157 +0,0 @@
## Streams
Synapse has a concept of "streams", which are roughly described in [`id_generators.py`](
https://github.com/matrix-org/synapse/blob/develop/synapse/storage/util/id_generators.py
).
Generally speaking, streams are a series of notifications that something in Synapse's database has changed that the application might need to respond to.
For example:
- The events stream reports new events (PDUs) that Synapse creates, or that Synapse accepts from another homeserver.
- The account data stream reports changes to users' [account data](https://spec.matrix.org/v1.7/client-server-api/#client-config).
- The to-device stream reports when a device has a new [to-device message](https://spec.matrix.org/v1.7/client-server-api/#send-to-device-messaging).
See [`synapse.replication.tcp.streams`](
https://github.com/matrix-org/synapse/blob/develop/synapse/replication/tcp/streams/__init__.py
) for the full list of streams.
It is very helpful to understand the streams mechanism when working on any part of Synapse that needs to respond to changes—especially if those changes are made by different workers.
To that end, let's describe streams formally, paraphrasing from the docstring of [`AbstractStreamIdGenerator`](
https://github.com/matrix-org/synapse/blob/a719b703d9bd0dade2565ddcad0e2f3a7a9d4c37/synapse/storage/util/id_generators.py#L96
).
### Definition
A stream is an append-only log `T1, T2, ..., Tn, ...` of facts[^1] which grows over time.
Only "writers" can add facts to a stream, and there may be multiple writers.
Each fact has an ID, called its "stream ID".
Readers should only process facts in ascending stream ID order.
Roughly speaking, each stream is backed by a database table.
It should have a `stream_id` (or similar) bigint column holding stream IDs, plus additional columns as necessary to describe the fact.
Typically, a fact is expressed with a single row in its backing table.[^2]
Within a stream, no two facts may have the same stream_id.
> _Aside_. Some additional notes on streams' backing tables.
>
> 1. Rich would like to [ditch the backing tables](https://github.com/matrix-org/synapse/issues/13456).
> 2. The backing tables may have other uses.
> For example, the events table serves backs the events stream, and is read when processing new events.
> But old rows are read from the table all the time, whenever Synapse needs to lookup some facts about an event.
> 3. Rich suspects that sometimes the stream is backed by multiple tables, so the stream proper is the union of those tables.
Stream writers can "reserve" a stream ID, and then later mark it as having being completed.
Stream writers need to track the completion of each stream fact.
In the happy case, completion means a fact has been written to the stream table.
But unhappy cases (e.g. transaction rollback due to an error) also count as completion.
Once completed, the rows written with that stream ID are fixed, and no new rows
will be inserted with that ID.
### Current stream ID
For any given stream reader (including writers themselves), we may define a per-writer current stream ID:
> The current stream ID _for a writer W_ is the largest stream ID such that
> all transactions added by W with equal or smaller ID have completed.
Similarly, there is a "linear" notion of current stream ID:
> The "linear" current stream ID is the largest stream ID such that
> all facts (added by any writer) with equal or smaller ID have completed.
Because different stream readers A and B learn about new facts at different times, A and B may disagree about current stream IDs.
Put differently: we should think of stream readers as being independent of each other, proceeding through a stream of facts at different rates.
**NB.** For both senses of "current", that if a writer opens a transaction that never completes, the current stream ID will never advance beyond that writer's last written stream ID.
For single-writer streams, the per-writer current ID and the linear current ID are the same.
Both senses of current ID are monotonic, but they may "skip" or jump over IDs because facts complete out of order.
_Example_.
Consider a single-writer stream which is initially at ID 1.
| Action | Current stream ID | Notes |
|------------|-------------------|-------------------------------------------------|
| | 1 | |
| Reserve 2 | 1 | |
| Reserve 3 | 1 | |
| Complete 3 | 1 | current ID unchanged, waiting for 2 to complete |
| Complete 2 | 3 | current ID jumps from 1 -> 3 |
| Reserve 4 | 3 | |
| Reserve 5 | 3 | |
| Reserve 6 | 3 | |
| Complete 5 | 3 | |
| Complete 4 | 5 | current ID jumps 3->5, even though 6 is pending |
| Complete 6 | 6 | |
### Multi-writer streams
There are two ways to view a multi-writer stream.
1. Treat it as a collection of distinct single-writer streams, one
for each writer.
2. Treat it as a single stream.
The single stream (option 2) is conceptually simpler, and easier to represent (a single stream id).
However, it requires each reader to know about the entire set of writers, to ensures that readers don't erroneously advance their current stream position too early and miss a fact from an unknown writer.
In contrast, multiple parallel streams (option 1) are more complex, requiring more state to represent (map from writer to stream id).
The payoff for doing so is that readers can "peek" ahead to facts that completed on one writer no matter the state of the others, reducing latency.
Note that a multi-writer stream can be viewed in both ways.
For example, the events stream is treated as multiple single-writer streams (option 1) by the sync handler, so that events are sent to clients as soon as possible.
But the background process that works through events treats them as a single linear stream.
Another useful example is the cache invalidation stream.
The facts this stream holds are instructions to "you should now invalidate these cache entries".
We only ever treat this as a multiple single-writer streams as there is no important ordering between cache invalidations.
(Invalidations are self-contained facts; and the invalidations commute/are idempotent).
### Writing to streams
Writers need to track:
- track their current position (i.e. its own per-writer stream ID).
- their facts currently awaiting completion.
At startup,
- the current position of that writer can be found by querying the database (which suggests that facts need to be written to the database atomically, in a transaction); and
- there are no facts awaiting completion.
To reserve a stream ID, call [`nextval`](https://www.postgresql.org/docs/current/functions-sequence.html) on the appropriate postgres sequence.
To write a fact to the stream: insert the appropriate rows to the appropriate backing table.
To complete a fact, first remove it from your map of facts currently awaiting completion.
Then, if no earlier fact is awaiting completion, the writer can advance its current position in that stream.
Upon doing so it should emit an `RDATA` message[^3], once for every fact between the old and the new stream ID.
### Subscribing to streams
Readers need to track the current position of every writer.
At startup, they can find this by contacting each writer with a `REPLICATE` message,
requesting that all writers reply describing their current position in their streams.
Writers reply with a `POSITION` message.
To learn about new facts, readers should listen for `RDATA` messages and process them to respond to the new fact.
The `RDATA` itself is not a self-contained representation of the fact;
readers will have to query the stream tables for the full details.
Readers must also advance their record of the writer's current position for that stream.
# Summary
In a nutshell: we have an append-only log with a "buffer/scratchpad" at the end where we have to wait for the sequence to be linear and contiguous.
---
[^1]: we use the word _fact_ here for two reasons.
Firstly, the word "event" is already heavily overloaded (PDUs, EDUs, account data, ...) and we don't need to make that worse.
Secondly, "fact" emphasises that the things we append to a stream cannot change after the fact.
[^2]: A fact might be expressed with 0 rows, e.g. if we opened a transaction to persist an event, but failed and rolled the transaction back before marking the fact as completed.
In principle a fact might be expressed with 2 or more rows; if so, each of those rows should share the fact's stream ID.
[^3]: This communication used to happen directly with the writers [over TCP](../../tcp_replication.md);
nowadays it's done via Redis's Pubsub.

View File

@@ -86,7 +86,7 @@ So we have stopped processing the request (and will probably go on to
start processing the next), without clearing the logcontext.
To circumvent this problem, synapse code assumes that, wherever you have
an awaitable, you will want to `await` it. To that end, wherever
an awaitable, you will want to `await` it. To that end, whereever
functions return awaitables, we adopt the following conventions:
**Rules for functions returning awaitables:**

View File

@@ -8,7 +8,8 @@ and allow server and room admins to configure how long messages should
be kept in a homeserver's database before being purged from it.
**Please note that, as this feature isn't part of the Matrix
specification yet, this implementation is to be considered as
experimental.**
experimental. There are known bugs which may cause database corruption.
Proceed with caution.**
A message retention policy is mainly defined by its `max_lifetime`
parameter, which defines how long a message can be kept around after

View File

@@ -348,42 +348,6 @@ callback returns `False`, Synapse falls through to the next one. The value of th
callback that does not return `False` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
### `check_login_for_spam`
_First introduced in Synapse v1.87.0_
```python
async def check_login_for_spam(
user_id: str,
device_id: Optional[str],
initial_display_name: Optional[str],
request_info: Collection[Tuple[Optional[str], str]],
auth_provider_id: Optional[str] = None,
) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]
```
Called when a user logs in.
The arguments passed to this callback are:
* `user_id`: The user ID the user is logging in with
* `device_id`: The device ID the user is re-logging into.
* `initial_display_name`: The device display name, if any.
* `request_info`: A collection of tuples, which first item is a user agent, and which
second item is an IP address. These user agents and IP addresses are the ones that were
used during the login process.
* `auth_provider_id`: The identifier of the SSO authentication provider, if any.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
*Note:* This will not be called when a user registers.
## Example
The example below is a module that implements the spam checker callback

View File

@@ -249,7 +249,7 @@ of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in
underneath the database, or if a different version of the locale is used on any
replicas.
If you have a database with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with
If you have a databse with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with
the correct locale parameter (as shown above). It is also possible to change the
parameters on a live database and run a `REINDEX` on the entire database,
however extreme care must be taken to avoid database corruption.

View File

@@ -95,7 +95,7 @@ matrix.example.com {
}
example.com:8448 {
reverse_proxy /_matrix/* localhost:8008
reverse_proxy localhost:8008
}
```

View File

@@ -37,7 +37,7 @@ Dockerfile to automate a synapse server in a single Docker image, at
<https://hub.docker.com/r/avhost/docker-matrix/tags/>
Slavi Pantaleev has created an Ansible playbook,
which installs the official Docker image of Matrix Synapse
which installs the offical Docker image of Matrix Synapse
along with many other Matrix-related services (Postgres database, Element, coturn,
ma1sd, SSL support, etc.).
For more details, see
@@ -93,7 +93,7 @@ For `bookworm` and `sid`, it can be installed simply with:
sudo apt install matrix-synapse
```
Synapse is also available in `bullseye-backports`. Please
Synapse is also avaliable in `bullseye-backports`. Please
see the [Debian documentation](https://backports.debian.org/Instructions/)
for information on how to use backports.
@@ -135,8 +135,8 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi
#### ArchLinux
The quickest way to get up and running with ArchLinux is probably with the package provided by ArchLinux
<https://archlinux.org/packages/extra/x86_64/matrix-synapse/>, which should pull in most of
The quickest way to get up and running with ArchLinux is probably with the community package
<https://archlinux.org/packages/community/x86_64/matrix-synapse/>, which should pull in most of
the necessary dependencies.
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
@@ -155,14 +155,6 @@ sudo pip uninstall py-bcrypt
sudo pip install py-bcrypt
```
#### Alpine Linux
6543 maintains [Synapse packages for Alpine Linux](https://pkgs.alpinelinux.org/packages?name=synapse&branch=edge) in the community repository. Install with:
```sh
sudo apk add synapse
```
#### Void Linux
Synapse can be found in the void repositories as
@@ -208,7 +200,7 @@ When following this route please make sure that the [Platform-specific prerequis
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
- Python 3.8 or later, up to Python 3.11.
- Python 3.7 or later, up to Python 3.11.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
If building on an uncommon architecture for which pre-built wheels are

View File

@@ -3,7 +3,7 @@
A structured logging system can be useful when your logs are destined for a
machine to parse and process. By maintaining its machine-readable characteristics,
it enables more efficient searching and aggregations when consumed by software
such as the [ELK stack](https://opensource.com/article/18/9/open-source-log-aggregation-tools).
such as the "ELK stack".
Synapse's structured logging system is configured via the file that Synapse's
`log_config` config option points to. The file should include a formatter which

View File

@@ -1,4 +1,8 @@
worker_app: synapse.app.generic_worker
worker_name: background_worker
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/background-worker-log.yaml

View File

@@ -1,5 +1,9 @@
worker_app: synapse.app.generic_worker
worker_name: event_persister1
worker_name: event_persister1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http

View File

@@ -1,4 +1,8 @@
worker_app: synapse.app.federation_sender
worker_name: federation_sender1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml

View File

@@ -1,6 +1,10 @@
worker_app: synapse.app.media_repository
worker_name: media_worker
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8085

View File

@@ -1,4 +1,8 @@
worker_app: synapse.app.pusher
worker_name: pusher_worker1
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml

View File

@@ -38,7 +38,7 @@ noted when manually using the protocol:
been disabled on the main process.
- The server will only time connections out that have sent a `PING`
command. If a ping is sent then the connection will be closed if no
further commands are received within 15s. Both the client and
further commands are receieved within 15s. Both the client and
server protocol implementations will send an initial PING on
connection and ensure at least one command every 5s is sent (not
necessarily `PING`).
@@ -128,7 +128,7 @@ batching. See `RdataCommand` for more details.
### Example
An example interaction is shown below. Each line is prefixed with '>'
An example iteraction is shown below. Each line is prefixed with '>'
or '<' to indicate which side is sending, these are *not* included on
the wire:

View File

@@ -18,7 +18,7 @@ This documentation provides two TURN server configuration examples:
For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP.
Hosting TURN behind NAT requires port forwarding and for the NAT gateway to have a public IP.
Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP.
However, even with appropriate configuration, NAT is known to cause issues and to often not work.
Afterwards, the homeserver needs some further configuration.

View File

@@ -88,73 +88,6 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.93.0
## Minimum supported Rust version
The minimum supported Rust version has been increased from v1.60.0 to v1.61.0.
Users building from source will need to ensure their `rustc` version is up to
date.
# Upgrading to v1.90.0
## App service query parameter authorization is now a configuration option
Synapse v1.81.0 deprecated application service authorization via query parameters as this is
considered insecure - and from Synapse v1.71.0 forwards the application service token has also been sent via
[the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)], making the insecure
query parameter authorization redundant. Since removing the ability to continue to use query parameters could break
backwards compatibility it has now been put behind a configuration option, `use_appservice_legacy_authorization`.
This option defaults to false, but can be activated by adding
```yaml
use_appservice_legacy_authorization: true
```
to your configuration.
# Upgrading to v1.89.0
## Removal of unspecced `user` property for `/register`
Application services can no longer call `/register` with a `user` property to create new users.
The standard `username` property should be used instead. See the
[Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions)
for more information.
# Upgrading to v1.88.0
## Minimum supported Python version
The minimum supported Python version has been increased from v3.7 to v3.8.
You will need Python 3.8 to run Synapse v1.88.0 (due out July 18th, 2023).
If you use current versions of the Matrix.org-distributed Debian
packages or Docker images, no action is required.
## Removal of `worker_replication_*` settings
As mentioned previously in [Upgrading to v1.84.0](#upgrading-to-v1840), the following deprecated settings
are being removed in this release of Synapse:
* [`worker_replication_host`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_host)
* [`worker_replication_http_port`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_port)
* [`worker_replication_http_tls`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_tls)
Please ensure that you have migrated to using `main` on your shared configuration's `instance_map`
(or create one if necessary). This is required if you have ***any*** workers at all;
administrators of single-process (monolith) installations don't need to do anything.
For an illustrative example, please see [Upgrading to v1.84.0](#upgrading-to-v1840) below.
# Upgrading to v1.86.0
## Minimum supported Rust version
The minimum supported Rust version has been increased from v1.58.1 to v1.60.0.
Users building from source will need to ensure their `rustc` version is up to
date.
# Upgrading to v1.85.0
## Application service registration with "user" property deprecation
@@ -1352,7 +1285,7 @@ In line with our [deprecation policy](deprecation_policy.md),
we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no
longer supported upstream.
This release of Synapse requires Python 3.6+ and PostgreSQL 9.6+ or
This release of Synapse requires Python 3.6+ and PostgresSQL 9.6+ or
SQLite 3.22+.
## Removal of old List Accounts Admin API
@@ -2312,7 +2245,7 @@ for details.
# Upgrading to v0.11.0
This release includes the option to send anonymous usage stats to
matrix.org, and requires that administrators explicitly opt in or out by
matrix.org, and requires that administrators explictly opt in or out by
setting the `report_stats` option to either `true` or `false`.
We would really appreciate it if you could help our project out by
@@ -2416,7 +2349,7 @@ latest module, please run:
# Upgrading to v0.5.0
The webclient has been split out into a separate repository/package in
The webclient has been split out into a seperate repository/pacakage in
this release. Before you restart your homeserver you will need to pull
in the webclient package by running:

View File

@@ -77,7 +77,7 @@ The following fields are returned in the JSON response body:
remote server, in ms. This is `0` if the last attempt to communicate with the
remote server was successful.
- `retry_interval` - integer - How long since the last time Synapse tried to reach
the remote server before trying again, in ms. This is `0` if no further retrying occurring.
the remote server before trying again, in ms. This is `0` if no further retrying occuring.
- `failure_ts` - nullable integer - The first time Synapse tried and failed to reach the
remote server, in ms. This is `null` if communication with the remote server has never failed.
- `last_successful_stream_ordering` - nullable integer - The stream ordering of the most

View File

@@ -1,7 +1,5 @@
# Registration Tokens
**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582)
This API allows you to manage tokens which can be used to authenticate
registration requests, as proposed in
[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md)

View File

@@ -27,8 +27,9 @@ What servers are currently participating in this room?
Run this sql query on your db:
```sql
SELECT DISTINCT split_part(state_key, ':', 2)
FROM current_state_events
WHERE room_id = '!cURbafjkfsMDVwdRDQ:matrix.org' AND membership = 'join';
FROM current_state_events AS c
INNER JOIN room_memberships AS m USING (room_id, event_id)
WHERE room_id = '!cURbafjkfsMDVwdRDQ:matrix.org' AND membership = 'join';
```
What users are registered on my server?

View File

@@ -25,10 +25,8 @@ messages from the database after 5 minutes, rather than 5 months.
In addition, configuration options referring to size use the following suffixes:
* `K` = KiB, or 1024 bytes
* `M` = MiB, or 1,048,576 bytes
* `G` = GiB, or 1,073,741,824 bytes
* `T` = TiB, or 1,099,511,627,776 bytes
* `K` = KiB, or 1024 bytes
For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes
for a user avatar.
@@ -464,20 +462,6 @@ See the docs [request log format](../administration/request_log.md).
* `additional_resources`: Only valid for an 'http' listener. A map of
additional endpoints which should be loaded via dynamic modules.
Unix socket support (_Added in Synapse 1.89.0_):
* `path`: A path and filename for a Unix socket. Make sure it is located in a
directory with read and write permissions, and that it already exists (the directory
will not be created). Defaults to `None`.
* **Note**: The use of both `path` and `port` options for the same `listener` is not
compatible.
* The `x_forwarded` option defaults to true when using Unix sockets and can be omitted.
* Other options that would not make sense to use with a UNIX socket, such as
`bind_addresses` and `tls` will be ignored and can be removed.
* `mode`: The file permissions to set on the UNIX socket. Defaults to `666`
* **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`).
Also make sure that `metrics` is not included in `resources` -> `names`
Valid resource names are:
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
@@ -490,7 +474,7 @@ Valid resource names are:
* `media`: the media API (/_matrix/media).
* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets)
* `metrics`: the metrics interface. See [here](../../metrics-howto.md).
* `openid`: OpenID authentication. See [here](../../openid.md).
@@ -521,7 +505,7 @@ listeners:
Example configuration #2:
```yaml
listeners:
# Insecure HTTP listener: for when matrix traffic passes through a reverse proxy
# Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
# that unwraps TLS.
#
# If you plan to use a reverse proxy, please see
@@ -549,22 +533,6 @@ listeners:
bind_addresses: ['::1', '127.0.0.1']
type: manhole
```
Example configuration #3:
```yaml
listeners:
# Unix socket listener: Ideal for Synapse deployments behind a reverse proxy, offering
# lightweight interprocess communication without TCP/IP overhead, avoid port
# conflicts, and providing enhanced security through system file permissions.
#
# Note that x_forwarded will default to true, when using a UNIX socket. Please see
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
#
- path: /var/run/synapse/main_public.sock
type: http
resources:
- names: [client, federation]
```
---
### `manhole_settings`
@@ -936,17 +904,6 @@ Example configuration:
redaction_retention_period: 28d
```
---
### `forgotten_room_retention_period`
How long to keep locally forgotten rooms before purging them from the DB.
Defaults to `null`, meaning it's disabled.
Example configuration:
```yaml
forgotten_room_retention_period: 28d
```
---
### `user_ips_max_age`
How long to track users' last seen time and IPs in the database.
@@ -1026,8 +983,11 @@ which are older than the room's maximum retention period. Synapse will also
filter events received over federation so that events that should have been
purged are ignored and not stored again.
The message retention policies feature is disabled by default. You can read more
about this feature [here](../../message_retention_policies.md).
The message retention policies feature is disabled by default. Please be advised
that enabling this feature carries some risk. There are known bugs with the implementation
which can cause database corruption. Setting retention to delete older history
is less risky than deleting newer history but in general caution is advised when enabling this
experimental feature. You can read more about this feature [here](../../message_retention_policies.md).
This setting has the following sub-options:
* `default_policy`: Default retention policy. If set, Synapse will apply it to rooms that lack the
@@ -1130,14 +1090,14 @@ federation_verify_certificates: false
The minimum TLS version that will be used for outbound federation requests.
Defaults to `"1"`. Configurable to `"1"`, `"1.1"`, `"1.2"`, or `"1.3"`. Note
that setting this value higher than `"1.2"` will prevent federation to most
of the public Matrix network: only configure it to `"1.3"` if you have an
Defaults to `1`. Configurable to `1`, `1.1`, `1.2`, or `1.3`. Note
that setting this value higher than `1.2` will prevent federation to most
of the public Matrix network: only configure it to `1.3` if you have an
entirely private federation setup and you can ensure TLS 1.3 support.
Example configuration:
```yaml
federation_client_minimum_tls_version: "1.2"
federation_client_minimum_tls_version: 1.2
```
---
### `federation_certificate_verification_whitelist`
@@ -1190,11 +1150,6 @@ inbound federation traffic as early as possible, rather than relying
purely on this application-layer restriction. If not specified, the
default is to whitelist everything.
Note: this does not stop a server from joining rooms that servers not on the
whitelist are in. As such, this option is really only useful to establish a
"private federation", where a group of servers all whitelist each other and have
the same whitelist.
Example configuration:
```yaml
federation_domain_whitelist:
@@ -1241,43 +1196,6 @@ Example configuration:
allow_device_name_lookup_over_federation: true
```
---
### `federation`
The federation section defines some sub-options related to federation.
The following options are related to configuring timeout and retry logic for one request,
independently of the others.
Short retry algorithm is used when something or someone will wait for the request to have an
answer, while long retry is used for requests that happen in the background,
like sending a federation transaction.
* `client_timeout`: timeout for the federation requests. Default to 60s.
* `max_short_retry_delay`: maximum delay to be used for the short retry algo. Default to 2s.
* `max_long_retry_delay`: maximum delay to be used for the short retry algo. Default to 60s.
* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts.
* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts.
The following options control the retry logic when communicating with a specific homeserver destination.
Unlike the previous configuration options, these values apply across all requests
for a given destination and the state of the backoff is stored in the database.
* `destination_min_retry_interval`: the initial backoff, after the first request fails. Defaults to 10m.
* `destination_retry_multiplier`: how much we multiply the backoff by after each subsequent fail. Defaults to 2.
* `destination_max_retry_interval`: a cap on the backoff. Defaults to a week.
Example configuration:
```yaml
federation:
client_timeout: 180s
max_short_retry_delay: 7s
max_long_retry_delay: 100s
max_short_retries: 5
max_long_retries: 20
destination_min_retry_interval: 30s
destination_retry_multiplier: 5
destination_max_retry_interval: 12h
```
---
## Caching
Options related to caching.
@@ -2652,50 +2570,7 @@ Example configuration:
```yaml
nonrefreshable_access_token_lifetime: 24h
```
---
### `ui_auth`
The amount of time to allow a user-interactive authentication session to be active.
This defaults to 0, meaning the user is queried for their credentials
before every action, but this can be overridden to allow a single
validation to be re-used. This weakens the protections afforded by
the user-interactive authentication process, by allowing for multiple
(and potentially different) operations to use the same validation session.
This is ignored for potentially "dangerous" operations (including
deactivating an account, modifying an account password, adding a 3PID,
and minting additional login tokens).
Use the `session_timeout` sub-option here to change the time allowed for credential validation.
Example configuration:
```yaml
ui_auth:
session_timeout: "15s"
```
---
### `login_via_existing_session`
Matrix supports the ability of an existing session to mint a login token for
another client.
Synapse disables this by default as it has security ramifications -- a malicious
client could use the mechanism to spawn more than one session.
The duration of time the generated token is valid for can be configured with the
`token_timeout` sub-option.
User-interactive authentication is required when this is enabled unless the
`require_ui_auth` sub-option is set to `False`.
Example configuration:
```yaml
login_via_existing_session:
enabled: true
require_ui_auth: false
token_timeout: "5m"
```
---
## Metrics
Config options related to metrics.
@@ -2863,20 +2738,6 @@ Example configuration:
```yaml
track_appservice_user_ips: true
```
---
### `use_appservice_legacy_authorization`
Whether to send the application service access tokens via the `access_token` query parameter
per older versions of the Matrix specification. Defaults to false. Set to true to enable sending
access tokens via a query parameter.
**Enabling this option is considered insecure and is not recommended. **
Example configuration:
```yaml
use_appservice_legacy_authorization: true
```
---
### `macaroon_secret_key`
@@ -2958,7 +2819,7 @@ Normally, the connection to the key server is validated via TLS certificates.
Additional security can be provided by configuring a `verify key`, which
will make synapse check that the response is signed by that key.
This setting supersedes an older setting named `perspectives`. The old format
This setting supercedes an older setting named `perspectives`. The old format
is still supported for backwards-compatibility, but it is deprecated.
`trusted_key_servers` defaults to matrix.org, but using it will generate a
@@ -3040,16 +2901,6 @@ enable SAML login. You can either put your entire pysaml config inline using the
option, or you can specify a path to a psyaml config file with the sub-option `config_path`.
This setting has the following sub-options:
* `idp_name`: A user-facing name for this identity provider, which is used to
offer the user a choice of login mechanisms.
* `idp_icon`: An optional icon for this identity provider, which is presented
by clients and Synapse's own IdP picker page. If given, must be an
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
obtain such an MXC URI is to upload an image to an (unencrypted) room
and then copy the "url" from the source of the event.)
* `idp_brand`: An optional brand for this identity provider, allowing clients
to style the login flow according to the identity provider in question.
See the [spec](https://spec.matrix.org/latest/) for possible options here.
* `sp_config`: the configuration for the pysaml2 Service Provider. See pysaml2 docs for format of config.
Default values will be used for the `entityid` and `service` settings,
so it is not normally necessary to specify them unless you need to
@@ -3201,7 +3052,7 @@ Options for each entry include:
* `idp_icon`: An optional icon for this identity provider, which is presented
by clients and Synapse's own IdP picker page. If given, must be an
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to
obtain such an MXC URI is to upload an image to an (unencrypted) room
and then copy the "url" from the source of the event.)
@@ -3219,14 +3070,6 @@ Options for each entry include:
* `client_secret`: oauth2 client secret to use. May be omitted if
`client_secret_jwt_key` is given, or if `client_auth_method` is 'none'.
Must be omitted if `client_secret_path` is specified.
* `client_secret_path`: path to the oauth2 client secret to use. With that
it's not necessary to leak secrets into the config file itself.
Mutually exclusive with `client_secret`. Can be omitted if
`client_secret_jwt_key` is specified.
*Added in Synapse 1.91.0.*
* `client_secret_jwt_key`: Alternative to client_secret: details of a key used
to create a JSON Web Token to be used as an OAuth2 client secret. If
@@ -3424,18 +3267,7 @@ Enable Central Authentication Service (CAS) for registration and login.
Has the following sub-options:
* `enabled`: Set this to true to enable authorization against a CAS server.
Defaults to false.
* `idp_name`: A user-facing name for this identity provider, which is used to
offer the user a choice of login mechanisms.
* `idp_icon`: An optional icon for this identity provider, which is presented
by clients and Synapse's own IdP picker page. If given, must be an
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
obtain such an MXC URI is to upload an image to an (unencrypted) room
and then copy the "url" from the source of the event.)
* `idp_brand`: An optional brand for this identity provider, allowing clients
to style the login flow according to the identity provider in question.
See the [spec](https://spec.matrix.org/latest/) for possible options here.
* `server_url`: The URL of the CAS authorization endpoint.
* `protocol_version`: The CAS protocol version, defaults to none (version 3 is required if you want to use "required_attributes").
* `displayname_attribute`: The attribute of the CAS response to use as the display name.
If no name is given here, no displayname will be set.
* `required_attributes`: It is possible to configure Synapse to only allow logins if CAS attributes
@@ -3443,24 +3275,16 @@ Has the following sub-options:
and the values must match the given value. Alternately if the given value
is `None` then any value is allowed (the attribute just must exist).
All of the listed attributes must match for the login to be permitted.
* `enable_registration`: set to 'false' to disable automatic registration of new
users. This allows the CAS SSO flow to be limited to sign in only, rather than
automatically registering users that have a valid SSO login but do not have
a pre-registered account. Defaults to true.
*Added in Synapse 1.93.0.*
Example configuration:
```yaml
cas_config:
enabled: true
server_url: "https://cas-server.com"
protocol_version: 3
displayname_attribute: name
required_attributes:
userGroup: "staff"
department: None
enable_registration: true
```
---
### `sso`
@@ -3591,6 +3415,28 @@ password_config:
require_uppercase: true
```
---
### `ui_auth`
The amount of time to allow a user-interactive authentication session to be active.
This defaults to 0, meaning the user is queried for their credentials
before every action, but this can be overridden to allow a single
validation to be re-used. This weakens the protections afforded by
the user-interactive authentication process, by allowing for multiple
(and potentially different) operations to use the same validation session.
This is ignored for potentially "dangerous" operations (including
deactivating an account, modifying an account password, and
adding a 3PID).
Use the `session_timeout` sub-option here to change the time allowed for credential validation.
Example configuration:
```yaml
ui_auth:
session_timeout: "15s"
```
---
## Push
Configuration settings related to push notifications
@@ -3683,7 +3529,6 @@ This option has the following sub-options:
* `prefer_local_users`: Defines whether to prefer local users in search query results.
If set to true, local users are more likely to appear above remote users when searching the
user directory. Defaults to false.
* `show_locked_users`: Defines whether to show locked users in search query results. Defaults to false.
Example configuration:
```yaml
@@ -3691,7 +3536,6 @@ user_directory:
enabled: false
search_all_users: true
prefer_local_users: true
show_locked_users: true
```
---
### `user_consent`
@@ -3889,19 +3733,6 @@ Example configuration:
```yaml
forget_rooms_on_leave: false
```
---
### `exclude_rooms_from_sync`
A list of rooms to exclude from sync responses. This is useful for server
administrators wishing to group users into a room without these users being able
to see it from their client.
By default, no room is excluded.
Example configuration:
```yaml
exclude_rooms_from_sync:
- !foo:example.com
```
---
## Opentracing
@@ -4052,14 +3883,13 @@ federation_sender_instances:
---
### `instance_map`
When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP
replication listener of the worker, if configured, and to the main process. Each worker
declared under [`stream_writers`](../../workers.md#stream-writers) and
[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP
replication listener, and that listener should be included in the `instance_map`. The
main process also needs an entry on the `instance_map`, and it should be listed under
`main` **if even one other worker exists**. Ensure the port matches with what is
declared inside the `listener` block for a `replication` listener.
When using workers this should be a map from [`worker_name`](#worker_name) to the
HTTP replication listener of the worker, if configured, and to the main process.
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
a HTTP replication listener, and that listener should be included in the `instance_map`.
The main process also needs an entry on the `instance_map`, and it should be listed under
`main` **if even one other worker exists**. Ensure the port matches with what is declared
inside the `listener` block for a `replication` listener.
Example configuration:
@@ -4072,14 +3902,6 @@ instance_map:
host: localhost
port: 8034
```
Example configuration(#2, for UNIX sockets):
```yaml
instance_map:
main:
path: /var/run/synapse/main_replication.sock
worker1:
path: /var/run/synapse/worker1_replication.sock
```
---
### `stream_writers`
@@ -4097,24 +3919,6 @@ stream_writers:
typing: worker1
```
---
### `outbound_federation_restricted_to`
When using workers, you can restrict outbound federation traffic to only go through a
specific subset of workers. Any worker specified here must also be in the
[`instance_map`](#instance_map).
[`worker_replication_secret`](#worker_replication_secret) must also be configured to
authorize inter-worker communication.
```yaml
outbound_federation_restricted_to:
- federation_sender1
- federation_sender2
```
Also see the [worker
documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers)
for more info.
---
### `run_background_tasks_on`
The [worker](../../workers.md#background-tasks) that is used to run
@@ -4239,6 +4043,51 @@ Example configuration:
worker_name: generic_worker1
```
---
### `worker_replication_host`
*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
The HTTP replication endpoint that it should talk to on the main Synapse process.
The main Synapse process defines this with a `replication` resource in
[`listeners` option](#listeners).
Example configuration:
```yaml
worker_replication_host: 127.0.0.1
```
---
### `worker_replication_http_port`
*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
The HTTP replication port that it should talk to on the main Synapse process.
The main Synapse process defines this with a `replication` resource in
[`listeners` option](#listeners).
Example configuration:
```yaml
worker_replication_http_port: 9093
```
---
### `worker_replication_http_tls`
*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
Whether TLS should be used for talking to the HTTP replication port on the main
Synapse process.
The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
has the `replication` resource enabled.
**Please note:** by default, it is not safe to expose replication ports to the
public Internet, even with TLS enabled.
See [`worker_replication_secret`](#worker_replication_secret).
Defaults to `false`.
*Added in Synapse 1.72.0.*
Example configuration:
```yaml
worker_replication_http_tls: true
```
---
### `worker_listeners`
A worker can handle HTTP requests. To do so, a `worker_listeners` option
@@ -4257,18 +4106,6 @@ worker_listeners:
resources:
- names: [client, federation]
```
Example configuration(#2, using UNIX sockets with a `replication` listener):
```yaml
worker_listeners:
- type: http
path: /var/run/synapse/worker_public.sock
resources:
- names: [client, federation]
- type: http
path: /var/run/synapse/worker_replication.sock
resources:
- names: [replication]
```
---
### `worker_manhole`

View File

@@ -1,133 +1,49 @@
# User Directory API Implementation
User Directory API Implementation
=================================
The user directory is maintained based on users that are 'visible' to the homeserver -
i.e. ones which are local to the server and ones which any local user shares a
room with.
The user directory is currently maintained based on the 'visible' users
on this particular server - i.e. ones which your account shares a room with, or
who are present in a publicly viewable room present on the server.
The directory info is stored in various tables, which can sometimes get out of
sync (although this is considered a bug). If this happens, for now the
The directory info is stored in various tables, which can (typically after
DB corruption) get stale or out of sync. If this happens, for now the
solution to fix it is to use the [admin API](usage/administration/admin_api/background_updates.md#run)
and execute the job `regenerate_directory`. This should then start a background task to
flush the current tables and regenerate the directory. Depending on the size
of your homeserver (number of users and rooms) this can take a while.
flush the current tables and regenerate the directory.
## Data model
Data model
----------
There are five relevant tables that collectively form the "user directory".
Three of them track a list of all known users. The last two (collectively called
the "search tables") track which users are visible to each other.
Three of them track a master list of all the users we could search for.
The last two (collectively called the "search tables") track who can
see who.
From all of these tables we exclude three types of local user:
- support users
- appservice users
- deactivated users
- support users
- appservice users
- deactivated users
A description of each table follows:
* `user_directory`. This contains the user ID, display name and avatar of each user.
- Because there is only one directory entry per user, it is important that it
only contain publicly visible information. Otherwise, this will leak the
* `user_directory`. This contains the user_id, display name and avatar we'll
return when you search the directory.
- Because there's only one directory entry per user, it's important that we only
ever put publicly visible names here. Otherwise we might leak a private
nickname or avatar used in a private room.
- Indexed on rooms. Indexed on users.
* `user_directory_search`. To be joined to `user_directory`. It contains an extra
column that enables full text search based on user IDs and display names.
Different schemas for SQLite and Postgres are used.
column that enables full text search based on user ids and display names.
Different schemas for SQLite and Postgres with different code paths to match.
- Indexed on the full text search data. Indexed on users.
* `user_directory_stream_pos`. When the initial background update to populate
the directory is complete, we record a stream position here. This indicates
that synapse should now listen for room changes and incrementally update
the directory where necessary. (See [stream positions](development/synapse_architecture/streams.html).)
the directory where necessary.
* `users_in_public_rooms`. Contains associations between users and the public
rooms they're in. Used to determine which users are in public rooms and should
be publicly visible in the directory. Both local and remote users are tracked.
* `users_in_public_rooms`. Contains associations between users and the public rooms they're in.
Used to determine which users are in public rooms and should be publicly visible in the directory.
* `users_who_share_private_rooms`. Rows are triples `(L, M, room id)` where `L`
is a local user and `M` is a local or remote user. `L` and `M` should be
different, but this isn't enforced by a constraint.
Note that if two local users share a room then there will be two entries:
`(user1, user2, !room_id)` and `(user2, user1, !room_id)`.
## Configuration options
The exact way user search works can be tweaked via some server-level
[configuration options](usage/configuration/config_documentation.md#user_directory).
The information is not repeated here, but the options are mentioned below.
## Search algorithm
If `search_all_users` is `false`, then results are limited to users who:
1. Are found in the `users_in_public_rooms` table, or
2. Are found in the `users_who_share_private_rooms` where `L` is the requesting
user and `M` is the search result.
Otherwise, if `search_all_users` is `true`, no such limits are placed and all
users known to the server (matching the search query) will be returned.
By default, locked users are not returned. If `show_locked_users` is `true` then
no filtering on the locked status of a user is done.
The user provided search term is lowercased and normalized using [NFKC](https://en.wikipedia.org/wiki/Unicode_equivalence#Normalization),
this treats the string as case-insensitive, canonicalizes different forms of the
same text, and maps some "roughly equivalent" characters together.
The search term is then split into words:
* If [ICU](https://en.wikipedia.org/wiki/International_Components_for_Unicode) is
available, then the system's [default locale](https://unicode-org.github.io/icu/userguide/locale/#default-locales)
will be used to break the search term into words. (See the
[installation instructions](setup/installation.md) for how to install ICU.)
* If unavailable, then runs of ASCII characters, numbers, underscores, and hyphens
are considered words.
The queries for PostgreSQL and SQLite are detailed below, by their overall goal
is to find matching users, preferring users who are "real" (e.g. not bots,
not deactivated). It is assumed that real users will have an display name and
avatar set.
### PostgreSQL
The above words are then transformed into two queries:
1. "exact" which matches the parsed words exactly (using [`to_tsquery`](https://www.postgresql.org/docs/current/textsearch-controls.html#TEXTSEARCH-PARSING-QUERIES));
2. "prefix" which matches the parsed words as prefixes (using `to_tsquery`).
Results are composed of all rows in the `user_directory_search` table whose information
matches one (or both) of these queries. Results are ordered by calculating a weighted
score for each result, higher scores are returned first:
* 4x if a user ID exists.
* 1.2x if the user has a display name set.
* 1.2x if the user has an avatar set.
* 0x-3x by the full text search results using the [`ts_rank_cd` function](https://www.postgresql.org/docs/current/textsearch-controls.html#TEXTSEARCH-RANKING)
against the "exact" search query; this has four variables with the following weightings:
* `D`: 0.1 for the user ID's domain
* `C`: 0.1 for unused
* `B`: 0.9 for the user's display name (or an empty string if it is not set)
* `A`: 0.1 for the user ID's localpart
* 0x-1x by the full text search results using the `ts_rank_cd` function against the
"prefix" search query. (Using the same weightings as above.)
* If `prefer_local_users` is `true`, then 2x if the user is local to the homeserver.
Note that `ts_rank_cd` returns a weight between 0 and 1. The initial weighting of
all results is 1.
### SQLite
Results are composed of all rows in the `user_directory_search` whose information
matches the query. Results are ordered by the following information, with each
subsequent column used as a tiebreaker, for each result:
1. By the [`rank`](https://www.sqlite.org/windowfunctions.html#built_in_window_functions)
of the full text search results using the [`matchinfo` function](https://www.sqlite.org/fts3.html#matchinfo). Higher
ranks are returned first.
2. If `prefer_local_users` is `true`, then users local to the homeserver are
returned first.
3. Users with a display name set are returned first.
4. Users with an avatar set are returned first.

View File

@@ -1 +1 @@
window.SYNAPSE_VERSION = 'v1.94';
window.SYNAPSE_VERSION = 'v1.85';

View File

@@ -95,12 +95,9 @@ for the main process
* Secondly, you need to enable
[redis-based replication](usage/configuration/config_documentation.md#redis)
* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map)
with the `main` process defined, as well as the relevant connection information from
it's HTTP `replication` listener (defined in step 1 above).
* Note that the `host` defined is the address the worker needs to look for the `main`
process at, not necessarily the same address that is bound to.
* If you are using Unix sockets for the `replication` resource, make sure to
use a `path` to the socket file instead of a `port`.
with the `main` process defined, as well as the relevant connection information from
it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined
is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to.
* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
can be used to authenticate HTTP traffic between workers. For example:
@@ -148,6 +145,9 @@ In the config file for each worker, you must specify:
with an `http` listener.
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
* **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map`
For example:
@@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou
You can start the main Synapse process with Poetry by running the following command:
```console
poetry run synapse_homeserver --config-file [your homeserver.yaml]
poetry run synapse_homeserver -c [your homeserver.yaml]
```
For worker setups, you can run the following command
```console
poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml]
poetry run synapse_worker -c [your worker.yaml]
```
## Available worker applications
@@ -232,6 +232,7 @@ information.
^/_matrix/client/v1/rooms/.*/hierarchy$
^/_matrix/client/(v1|unstable)/rooms/.*/relations/
^/_matrix/client/v1/rooms/.*/threads$
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
^/_matrix/client/(r0|v3|unstable)/account/3pid$
^/_matrix/client/(r0|v3|unstable)/account/whoami$
@@ -246,7 +247,6 @@ information.
^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
^/_matrix/client/(r0|v3|unstable)/capabilities$
^/_matrix/client/(r0|v3|unstable)/notifications$
# Encryption requests
^/_matrix/client/(r0|v3|unstable)/keys/query$
@@ -532,30 +532,6 @@ the stream writer for the `presence` stream:
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
#### Restrict outbound federation traffic to a specific set of workers
The
[`outbound_federation_restricted_to`](usage/configuration/config_documentation.md#outbound_federation_restricted_to)
configuration is useful to make sure outbound federation traffic only goes through a
specified subset of workers. This allows you to set more strict access controls (like a
firewall) for all workers and only allow the `federation_sender`'s to contact the
outside world.
```yaml
instance_map:
main:
host: localhost
port: 8030
federation_sender1:
host: localhost
port: 8034
outbound_federation_restricted_to:
- federation_sender1
worker_replication_secret: "secret_secret"
```
#### Background tasks
There is also support for moving background tasks to a separate

148
flake.lock generated
View File

@@ -8,20 +8,41 @@
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1688058187,
"narHash": "sha256-ipDcc7qrucpJ0+0eYNlwnE+ISTcq4m03qW+CWUshRXI=",
"lastModified": 1683102061,
"narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=",
"owner": "cachix",
"repo": "devenv",
"rev": "c8778e3dc30eb9043e218aaa3861d42d4992de77",
"rev": "ff1f29e41756553174d596cafe3a9fa77595100b",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "v0.6.3",
"ref": "main",
"repo": "devenv",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1682490133,
"narHash": "sha256-tR2Qx0uuk97WySpSSk4rGS/oH7xb5LykbjATcw1vw1I=",
"owner": "nix-community",
"repo": "fenix",
"rev": "4e9412753ab75ef0e038a5fe54a062fb44c27c6a",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
@@ -39,33 +60,12 @@
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1685518550,
"narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
@@ -170,27 +170,27 @@
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1685801374,
"narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
"lastModified": 1673800717,
"narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
"rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.05",
"ref": "nixos-22.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1690535733,
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
"lastModified": 1682519441,
"narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
"rev": "7a32a141db568abde9bc389845949dc2a454dfd3",
"type": "github"
},
"original": {
@@ -200,22 +200,6 @@
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1681358109,
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
@@ -231,11 +215,11 @@
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1688056373,
"narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
"lastModified": 1678376203,
"narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
"rev": "1a20b9708962096ec2481eeb2ddca29ed747770a",
"type": "github"
},
"original": {
@@ -247,27 +231,25 @@
"root": {
"inputs": {
"devenv": "devenv",
"fenix": "fenix",
"nixpkgs": "nixpkgs_2",
"rust-overlay": "rust-overlay",
"systems": "systems_3"
"systems": "systems"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_3"
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1693966243,
"narHash": "sha256-a2CA1aMIPE67JWSVIGoGtD3EGlFdK9+OlJQs0FOWCKY=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "a8b4bb4cbb744baaabc3e69099f352f99164e2c1",
"lastModified": 1682426789,
"narHash": "sha256-UqnLmJESRZE0tTEaGbRAw05Hm19TWIPA+R3meqi5I4w=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "943d2a8a1ca15e8b28a1f51f5a5c135e3728da04",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
@@ -285,36 +267,6 @@
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

View File

@@ -39,27 +39,27 @@
{
inputs = {
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
# available versions of packages.
# Use the master/unstable branch of nixpkgs. The latest stable, 22.11,
# does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest.
nixpkgs.url = "github:NixOS/nixpkgs/master";
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
systems.url = "github:nix-systems/default";
# A development environment manager built on Nix. See https://devenv.sh.
devenv.url = "github:cachix/devenv/v0.6.3";
# Rust toolchain.
rust-overlay.url = "github:oxalica/rust-overlay";
devenv.url = "github:cachix/devenv/main";
# Rust toolchains and rust-analyzer nightly.
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
outputs = { self, nixpkgs, devenv, systems, ... } @ inputs:
let
forEachSystem = nixpkgs.lib.genAttrs (import systems);
in {
devShells = forEachSystem (system:
let
overlays = [ (import rust-overlay) ];
pkgs = import nixpkgs {
inherit system overlays;
};
pkgs = nixpkgs.legacyPackages.${system};
in {
# Everything is configured via devenv - a Nix module for creating declarative
# developer environments. See https://devenv.sh/reference/options/ for a list
@@ -76,25 +76,6 @@
# Configure packages to install.
# Search for package names at https://search.nixos.org/packages?channel=unstable
packages = with pkgs; [
# The rust toolchain and related tools.
# This will install the "default" profile of rust components.
# https://rust-lang.github.io/rustup/concepts/profiles.html
#
# NOTE: We currently need to set the Rust version unnecessarily high
# in order to work around https://github.com/matrix-org/synapse/issues/15939
(rust-bin.stable."1.71.1".default.override {
# Additionally install the "rust-src" extension to allow diving into the
# Rust source code in an IDE (rust-analyzer will also make use of it).
extensions = [ "rust-src" ];
})
# The rust-analyzer language server implementation.
rust-analyzer
# GCC includes a linker; needed for building `ruff`
gcc
# Needed for building `ruff`
gnumake
# Native dependencies for running Synapse.
icu
libffi
@@ -143,11 +124,12 @@
# Install dependencies for the additional programming languages
# involved with Synapse development.
#
# * Rust is used for developing and running Synapse.
# * Golang is needed to run the Complement test suite.
# * Perl is needed to run the SyTest test suite.
# * Rust is used for developing and running Synapse.
# It is installed manually with `packages` above.
languages.go.enable = true;
languages.rust.enable = true;
languages.rust.version = "stable";
languages.perl.enable = true;
# Postgres is needed to run Synapse with postgres support and
@@ -196,7 +178,7 @@
EOF
'';
# Start synapse when `devenv up` is run.
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml -c homeserver-config-overrides.d";
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml --config-directory homeserver-config-overrides.d";
# Define the perl modules we require to run SyTest.
#
@@ -241,19 +223,6 @@
URI
YAMLLibYAML
]}";
# Clear the LD_LIBRARY_PATH environment variable on shell init.
#
# By default, devenv will set LD_LIBRARY_PATH to point to .devenv/profile/lib. This causes
# issues when we include `gcc` as a dependency to build C libraries, as the version of glibc
# that the development environment's cc compiler uses may differ from that of the system.
#
# When LD_LIBRARY_PATH is set, system tools will attempt to use the development environment's
# libraries. Which, when built against a different glibc version lead, to "version 'GLIBC_X.YY'
# not found" errors.
enterShell = ''
unset LD_LIBRARY_PATH
'';
}
];
};

View File

@@ -2,28 +2,17 @@
namespace_packages = True
plugins = pydantic.mypy, mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
follow_imports = normal
check_untyped_defs = True
show_error_codes = True
show_traceback = True
mypy_path = stubs
warn_unreachable = True
warn_unused_ignores = True
local_partial_types = True
no_implicit_optional = True
# Strict checks, see mypy --help
warn_unused_configs = True
# disallow_any_generics = True
disallow_subclassing_any = True
# disallow_untyped_calls = True
disallow_untyped_defs = True
disallow_incomplete_defs = True
# check_untyped_defs = True
# disallow_untyped_decorators = True
warn_redundant_casts = True
warn_unused_ignores = True
# warn_return_any = True
# no_implicit_reexport = True
strict_equality = True
warn_redundant_casts = True
# Run mypy type checking with the minimum supported Python version to catch new usage
# that isn't backwards-compatible (types, overloads, etc).
python_version = 3.8
@@ -42,14 +31,6 @@ warn_unused_ignores = False
[mypy-synapse.util.caches.treecache]
disallow_untyped_defs = False
disallow_incomplete_defs = False
[mypy-synapse.util.manhole]
# This module imports something from Twisted which has a bad annotation in Twisted trunk,
# but is unannotated in Twisted's latest release. We want to type-ignore the problem
# in the twisted trunk job, even though it has no effect on normal mypy runs.
warn_unused_ignores = False
;; Dependencies without annotations
;; Before ignoring a module, check to see if type stubs are available.
@@ -59,18 +40,18 @@ warn_unused_ignores = False
;; which we can pull in as a dev dependency by adding to `pyproject.toml`'s
;; `[tool.poetry.dev-dependencies]` list.
# https://github.com/lepture/authlib/issues/460
[mypy-authlib.*]
ignore_missing_imports = True
[mypy-ijson.*]
ignore_missing_imports = True
# https://github.com/msgpack/msgpack-python/issues/448
[mypy-lxml]
ignore_missing_imports = True
[mypy-msgpack]
ignore_missing_imports = True
# https://github.com/wolever/parameterized/issues/143
[mypy-parameterized.*]
ignore_missing_imports = True
@@ -86,9 +67,17 @@ ignore_missing_imports = True
[mypy-saml2.*]
ignore_missing_imports = True
[mypy-service_identity.*]
ignore_missing_imports = True
[mypy-srvlookup.*]
ignore_missing_imports = True
# https://github.com/twisted/treq/pull/366
[mypy-treq.*]
ignore_missing_imports = True
[mypy-incremental.*]
ignore_missing_imports = True
[mypy-setuptools_rust.*]
ignore_missing_imports = True

1827
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -35,7 +35,7 @@
showcontent = true
[tool.black]
target-version = ['py38', 'py39', 'py310', 'py311']
target-version = ['py37', 'py38', 'py39', 'py310']
# black ignores everything in .gitignore by default, see
# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore
# Use `extend-exclude` if you want to exclude something in addition to this.
@@ -43,39 +43,33 @@ target-version = ['py38', 'py39', 'py310', 'py311']
[tool.ruff]
line-length = 88
# See https://beta.ruff.rs/docs/rules/#error-e
# See https://github.com/charliermarsh/ruff/#pycodestyle
# for error codes. The ones we ignore are:
# E501: Line too long (black enforces this for us)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
#
# flake8-bugbear compatible checks. Its error codes are described at
# https://beta.ruff.rs/docs/rules/#flake8-bugbear-b
# https://github.com/charliermarsh/ruff/#flake8-bugbear
# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
# B023: Functions defined inside a loop must not use variables redefined in the loop
# B024: Abstract base class with no abstract method.
ignore = [
"B019",
"B023",
"B024",
"E501",
"E731",
]
select = [
# pycodestyle
# pycodestyle checks.
"E",
"W",
# pyflakes
# pyflakes checks.
"F",
# flake8-bugbear
# flake8-bugbear checks.
"B0",
# flake8-comprehensions
# flake8-comprehensions checks.
"C4",
# flake8-2020
"YTT",
# flake8-slots
"SLOT",
# flake8-debugger
"T10",
# flake8-pie
"PIE",
# flake8-executable
"EXE",
]
[tool.isort]
@@ -92,11 +86,10 @@ skip_gitignore = true
[tool.maturin]
manifest-path = "rust/Cargo.toml"
module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.94.0"
version = "1.85.2"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@@ -154,7 +147,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
python = "^3.8.0"
python = "^3.7.1"
# Mandatory Dependencies
# ----------------------
@@ -181,9 +174,7 @@ PyYAML = ">=3.13"
pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.7"
# 10.0.1 minimum is mandatory here because of libwebp CVE-2023-4863.
# Packagers that already took care of libwebp can lower that down to 5.4.0.
Pillow = ">=10.0.1"
Pillow = ">=5.4.0"
# We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
sortedcontainers = ">=1.5.2"
pymacaroons = ">=0.13.0"
@@ -210,11 +201,13 @@ cryptography = ">=3.4.7"
# ijson 3.1.4 fixes a bug with "." in property names
ijson = ">=3.1.4"
matrix-common = "^1.3.0"
# We need packaging.verison.Version(...).major added in 20.0.
packaging = ">=20.0"
# We support pydantic v1 and pydantic v2 via the pydantic.v1 compat module.
# See https://github.com/matrix-org/synapse/issues/15858
pydantic = ">=1.7.4, <3"
# We need packaging.requirements.Requirement, added in 16.1.
packaging = ">=16.1"
# At the time of writing, we only use functions from the version `importlib.metadata`
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
importlib_metadata = { version = ">=1.4", python = "<3.8" }
# This is the most recent version of Pydantic with available on common distros.
pydantic = ">=1.7.4"
# This is for building the rust components during "poetry install", which
# currently ignores the `build-system.requires` directive (c.f.
@@ -315,18 +308,12 @@ all = [
]
[tool.poetry.dev-dependencies]
# We pin development dependencies in poetry.lock so that our tests don't start
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
# We pin black so that our tests don't start failing on new releases.
isort = ">=5.10.1"
black = ">=22.7.0"
ruff = "0.0.290"
# Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2"
black = ">=22.3.0"
ruff = "0.0.265"
# Typechecking
lxml-stubs = ">=0.4.0"
mypy = "*"
mypy-zope = "*"
types-bleach = ">=4.1.0"
@@ -381,21 +368,13 @@ furo = ">=2022.12.7,<2024.0.0"
# system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.7.0"]
requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"]
build-backend = "poetry.core.masonry.api"
[tool.cibuildwheel]
# Skip unsupported platforms (by us or by Rust).
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
# We skip:
# - CPython 3.6 and 3.7: EOLed
# - PyPy 3.7: we only support Python 3.8+
# - musllinux i686: excluded to reduce number of wheels we build.
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
# c.f. https://github.com/matrix-org/synapse/pull/14259
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
# We need a rust compiler
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"

View File

@@ -7,7 +7,7 @@ name = "synapse"
version = "0.1.0"
edition = "2021"
rust-version = "1.61.0"
rust-version = "1.58.1"
[lib]
name = "synapse"
@@ -15,8 +15,6 @@ name = "synapse"
# tests/benchmarks.
crate-type = ["lib", "cdylib"]
# This is deprecated, see tool.maturin in pyproject.toml.
# It is left here for compatibilty with maturin < 0.15.
[package.metadata.maturin]
# This is where we tell maturin where to place the built library.
name = "synapse.synapse_rust"
@@ -25,12 +23,7 @@ name = "synapse.synapse_rust"
anyhow = "1.0.63"
lazy_static = "1.4.0"
log = "0.4.17"
pyo3 = { version = "0.17.1", features = [
"macros",
"anyhow",
"abi3",
"abi3-py37",
] }
pyo3 = { version = "0.17.1", features = ["macros", "anyhow", "abi3", "abi3-py37"] }
pyo3-log = "0.8.1"
pythonize = "0.17.0"
regex = "1.6.0"

View File

@@ -13,8 +13,7 @@
// limitations under the License.
#![feature(test)]
use std::borrow::Cow;
use std::collections::BTreeSet;
use synapse::push::{
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue,
@@ -29,15 +28,15 @@ fn bench_match_exact(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
@@ -74,15 +73,15 @@ fn bench_match_word(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
@@ -119,15 +118,15 @@ fn bench_match_word_miss(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
@@ -164,15 +163,15 @@ fn bench_eval_message(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
),
(
"room_id".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
),
(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
),
]
.into_iter()
@@ -198,6 +197,7 @@ fn bench_eval_message(b: &mut Bencher) {
false,
false,
false,
false,
);
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));

View File

@@ -1,102 +0,0 @@
// Copyright 2023 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! An implementation of Matrix server ACL rules.
use std::net::Ipv4Addr;
use std::str::FromStr;
use anyhow::Error;
use pyo3::prelude::*;
use regex::Regex;
use crate::push::utils::{glob_to_regex, GlobMatchType};
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
let child_module = PyModule::new(py, "acl")?;
child_module.add_class::<ServerAclEvaluator>()?;
m.add_submodule(child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import acl` work.
py.import("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.acl", child_module)?;
Ok(())
}
#[derive(Debug, Clone)]
#[pyclass(frozen)]
pub struct ServerAclEvaluator {
allow_ip_literals: bool,
allow: Vec<Regex>,
deny: Vec<Regex>,
}
#[pymethods]
impl ServerAclEvaluator {
#[new]
pub fn py_new(
allow_ip_literals: bool,
allow: Vec<&str>,
deny: Vec<&str>,
) -> Result<Self, Error> {
let allow = allow
.iter()
.map(|s| glob_to_regex(s, GlobMatchType::Whole))
.collect::<Result<_, _>>()?;
let deny = deny
.iter()
.map(|s| glob_to_regex(s, GlobMatchType::Whole))
.collect::<Result<_, _>>()?;
Ok(ServerAclEvaluator {
allow_ip_literals,
allow,
deny,
})
}
pub fn server_matches_acl_event(&self, server_name: &str) -> bool {
// first of all, check if literal IPs are blocked, and if so, whether the
// server name is a literal IP
if !self.allow_ip_literals {
// check for ipv6 literals. These start with '['.
if server_name.starts_with('[') {
return false;
}
// check for ipv4 literals. We can just lift the routine from std::net.
if Ipv4Addr::from_str(server_name).is_ok() {
return false;
}
}
// next, check the deny list
if self.deny.iter().any(|e| e.is_match(server_name)) {
return false;
}
// then the allow list.
if self.allow.iter().any(|e| e.is_match(server_name)) {
return true;
}
// everything else should be rejected.
false
}
}

View File

@@ -2,7 +2,6 @@ use lazy_static::lazy_static;
use pyo3::prelude::*;
use pyo3_log::ResetHandle;
pub mod acl;
pub mod push;
lazy_static! {
@@ -39,7 +38,6 @@ fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
m.add_function(wrap_pyfunction!(reset_logging_config, m)?)?;
acl::register_module(py, m)?;
push::register_module(py, m)?;
Ok(())

View File

@@ -63,18 +63,21 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
}];
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
// We don't want to notify on edits. Not only can this be confusing in real
// time (2 notifications, one message) but it's especially confusing
// if a bridge needs to edit a previously backfilled message.
PushRule {
rule_id: Cow::Borrowed("global/override/.org.matrix.msc4028.encrypted_event"),
rule_id: Cow::Borrowed("global/override/.com.beeper.suppress_edits"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Cow::Borrowed("m.room.encrypted"),
key: Cow::Borrowed("content.m\\.relates_to.rel_type"),
pattern: Cow::Borrowed("m.replace"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: false,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
@@ -139,11 +142,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.is_user_mention"),
rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(
KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
key: Cow::Borrowed(r"content.m\.mentions.user_ids"),
key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.user_ids"),
value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
}),
)]),
@@ -160,12 +163,12 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.is_room_mention"),
rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
key: Cow::Borrowed(r"content.m\.mentions.room"),
value: Cow::Owned(SimpleJsonValue::Bool(true)),
key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.room"),
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
})),
Condition::Known(KnownCondition::SenderNotificationPermission {
key: Cow::Borrowed("room"),
@@ -238,21 +241,6 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
// We don't want to notify on edits *unless* the edit directly mentions a
// user, which is handled above.
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_edits"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventPropertyIs(
EventPropertyIsCondition {
key: Cow::Borrowed(r"content.m\.relates_to.rel_type"),
value: Cow::Owned(SimpleJsonValue::Str(Cow::Borrowed("m.replace"))),
},
))]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"),
priority_class: 5,

View File

@@ -70,9 +70,7 @@ pub struct PushRuleEvaluator {
/// The "content.body", if any.
body: String,
/// True if the event has a m.mentions property. (Note that this is a separate
/// flag instead of checking flattened_keys since the m.mentions property
/// might be an empty map and not appear in flattened_keys.
/// True if the event has a mentions property and MSC3952 support is enabled.
has_mentions: bool,
/// The number of users in the room.
@@ -117,7 +115,7 @@ impl PushRuleEvaluator {
msc3931_enabled: bool,
) -> Result<Self, Error> {
let body = match flattened_keys.get("content.body") {
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(),
_ => String::new(),
};
@@ -157,7 +155,9 @@ impl PushRuleEvaluator {
let rule_id = &push_rule.rule_id().to_string();
// For backwards-compatibility the legacy mention rules are disabled
// if the event contains the 'm.mentions' property.
// if the event contains the 'm.mentions' property (and if the
// experimental feature is enabled, both of these are represented
// by the has_mentions flag).
if self.has_mentions
&& (rule_id == "global/override/.m.rule.contains_display_name"
|| rule_id == "global/content/.m.rule.contains_user_name"
@@ -313,15 +313,13 @@ impl PushRuleEvaluator {
};
let pattern = match &*exact_event_match.value_type {
EventMatchPatternType::UserId => user_id.to_owned(),
EventMatchPatternType::UserLocalpart => {
get_localpart_from_id(user_id)?.to_owned()
}
EventMatchPatternType::UserId => user_id,
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
};
self.match_event_property_contains(
exact_event_match.key.clone(),
Cow::Borrowed(&SimpleJsonValue::Str(Cow::Owned(pattern))),
Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())),
)?
}
KnownCondition::ContainsDisplayName => {
@@ -496,7 +494,7 @@ fn push_rule_evaluator() {
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
);
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
@@ -524,7 +522,7 @@ fn test_requires_room_version_supports_condition() {
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert(
"content.body".to_string(),
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
);
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
let evaluator = PushRuleEvaluator::py_new(
@@ -564,7 +562,7 @@ fn test_requires_room_version_supports_condition() {
};
let rules = PushRules::new(vec![custom_rule]);
result = evaluator.run(
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false),
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false),
None,
None,
);

View File

@@ -256,7 +256,7 @@ impl<'de> Deserialize<'de> for Action {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(untagged)]
pub enum SimpleJsonValue {
Str(Cow<'static, str>),
Str(String),
Int(i64),
Bool(bool),
Null,
@@ -265,7 +265,7 @@ pub enum SimpleJsonValue {
impl<'source> FromPyObject<'source> for SimpleJsonValue {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
if let Ok(s) = <PyString as pyo3::PyTryFrom>::try_from(ob) {
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
Ok(SimpleJsonValue::Str(s.to_string()))
// A bool *is* an int, ensure we try bool first.
} else if let Ok(b) = <PyBool as pyo3::PyTryFrom>::try_from(ob) {
Ok(SimpleJsonValue::Bool(b.extract()?))
@@ -527,7 +527,8 @@ pub struct FilteredPushRules {
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc4028_push_encrypted_events: bool,
msc3952_intentional_mentions: bool,
msc3958_suppress_edits_enabled: bool,
}
#[pymethods]
@@ -539,7 +540,8 @@ impl FilteredPushRules {
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc4028_push_encrypted_events: bool,
msc3952_intentional_mentions: bool,
msc3958_suppress_edits_enabled: bool,
) -> Self {
Self {
push_rules,
@@ -547,7 +549,8 @@ impl FilteredPushRules {
msc1767_enabled,
msc3381_polls_enabled,
msc3664_enabled,
msc4028_push_encrypted_events,
msc3952_intentional_mentions,
msc3958_suppress_edits_enabled,
}
}
@@ -584,8 +587,12 @@ impl FilteredPushRules {
return false;
}
if !self.msc4028_push_encrypted_events
&& rule.rule_id == "global/override/.org.matrix.msc4028.encrypted_event"
if !self.msc3952_intentional_mentions && rule.rule_id.contains("org.matrix.msc3952")
{
return false;
}
if !self.msc3958_suppress_edits_enabled
&& rule.rule_id == "global/override/.com.beeper.suppress_edits"
{
return false;
}

View File

@@ -20,20 +20,15 @@ from concurrent.futures import ThreadPoolExecutor
from types import FrameType
from typing import Collection, Optional, Sequence, Set
# These are expanded inside the dockerfile to be a fully qualified image name.
# e.g. docker.io/library/debian:bullseye
#
# If an EOL is forced by a Python version and we're dropping support for it, make sure
# to remove references to the distibution across Synapse (search for "bullseye" for
# example)
DISTS = (
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:trixie", # (EOL not specified yet)
"debian:buster", # oldstable: EOL 2022-08
"debian:bullseye",
"debian:bookworm",
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
"ubuntu:lunar", # 23.04 (EOL 2024-01)
)
DESC = """\
@@ -46,7 +41,7 @@ can be passed on the commandline for debugging.
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class Builder:
class Builder(object):
def __init__(
self,
redirect_stdout: bool = False,

View File

@@ -36,41 +36,11 @@ import textwrap
import traceback
import unittest.mock
from contextlib import contextmanager
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
List,
Set,
Type,
TypeVar,
)
from typing import Any, Callable, Dict, Generator, List, Set, Type, TypeVar
from parameterized import parameterized
from synapse._pydantic_compat import HAS_PYDANTIC_V2
if TYPE_CHECKING or HAS_PYDANTIC_V2:
from pydantic.v1 import (
BaseModel as PydanticBaseModel,
conbytes,
confloat,
conint,
constr,
)
from pydantic.v1.typing import get_args
else:
from pydantic import (
BaseModel as PydanticBaseModel,
conbytes,
confloat,
conint,
constr,
)
from pydantic.typing import get_args
from pydantic import BaseModel as PydanticBaseModel, conbytes, confloat, conint, constr
from pydantic.typing import get_args
from typing_extensions import ParamSpec
logger = logging.getLogger(__name__)
@@ -281,10 +251,7 @@ class TestConstrainedTypesPatch(unittest.TestCase):
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
run_test_snippet(
"""
try:
from pydantic.v1 import constr
except ImportError:
from pydantic import constr
from pydantic import constr
constr()
"""
)
@@ -302,10 +269,7 @@ class TestConstrainedTypesPatch(unittest.TestCase):
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
run_test_snippet(
"""
try:
from pydantic.v1 import *
except ImportError:
from pydantic import *
from pydantic import *
constr()
"""
)
@@ -314,10 +278,7 @@ class TestConstrainedTypesPatch(unittest.TestCase):
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
run_test_snippet(
"""
try:
from pydantic.v1.types import constr
except ImportError:
from pydantic.types import constr
from pydantic.types import constr
constr()
"""
)
@@ -326,11 +287,8 @@ class TestConstrainedTypesPatch(unittest.TestCase):
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
run_test_snippet(
"""
try:
from pydantic.v1 import types as pydantic_types
except ImportError:
from pydantic import types as pydantic_types
pydantic_types.constr()
import pydantic.types
pydantic.types.constr()
"""
)
@@ -338,10 +296,7 @@ class TestConstrainedTypesPatch(unittest.TestCase):
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
run_test_snippet(
"""
try:
from pydantic.v1 import constr
except ImportError:
from pydantic import constr
from pydantic import constr
constr(min_length=10)
"""
)
@@ -350,10 +305,7 @@ class TestConstrainedTypesPatch(unittest.TestCase):
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
run_test_snippet(
"""
try:
from pydantic.v1 import constr
except ImportError:
from pydantic import constr
from pydantic import constr
constr(strict=False)
"""
)
@@ -362,10 +314,7 @@ class TestConstrainedTypesPatch(unittest.TestCase):
with monkeypatch_pydantic():
run_test_snippet(
"""
try:
from pydantic.v1 import constr
except ImportError:
from pydantic import constr
from pydantic import constr
constr(strict=True)
"""
)
@@ -374,10 +323,7 @@ class TestConstrainedTypesPatch(unittest.TestCase):
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
run_test_snippet(
"""
try:
from pydantic.v1 import constr
except ImportError:
from pydantic import constr
from pydantic import constr
x: constr()
"""
)
@@ -386,10 +332,7 @@ class TestConstrainedTypesPatch(unittest.TestCase):
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
run_test_snippet(
"""
try:
from pydantic.v1 import BaseModel, conint
except ImportError:
from pydantic import BaseModel, conint
from pydantic import BaseModel, conint
class C:
x: conint()
"""
@@ -418,10 +361,7 @@ class TestFieldTypeInspection(unittest.TestCase):
run_test_snippet(
f"""
from typing import *
try:
from pydantic.v1 import *
except ImportError:
from pydantic import *
from pydantic import *
class C(BaseModel):
f: {annotation}
"""
@@ -448,10 +388,7 @@ class TestFieldTypeInspection(unittest.TestCase):
run_test_snippet(
f"""
from typing import *
try:
from pydantic.v1 import *
except ImportError:
from pydantic import *
from pydantic import *
class C(BaseModel):
f: {annotation}
"""
@@ -461,10 +398,7 @@ class TestFieldTypeInspection(unittest.TestCase):
with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
run_test_snippet(
"""
try:
from pydantic.v1.main import BaseModel
except ImportError:
from pydantic.main import BaseModel
from pydantic.main import BaseModel
class C(BaseModel):
f: str
"""

View File

@@ -43,7 +43,7 @@ def main(force_colors: bool) -> None:
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
# Get the schema version of the local file to check against current schema on develop
with open("synapse/storage/schema/__init__.py") as file:
with open("synapse/storage/schema/__init__.py", "r") as file:
local_schema = file.read()
new_locals: Dict[str, Any] = {}
exec(local_schema, new_locals)

View File

@@ -214,7 +214,7 @@ fi
extra_test_args=()
test_tags="synapse_blacklist,msc3874,msc3890,msc3391,msc3930,faster_joins"
test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins"
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
@@ -246,6 +246,10 @@ else
else
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
fi
# The tests for importing historical messages (MSC2716)
# only pass with monoliths, currently.
test_tags="$test_tags,msc2716"
fi
if [[ -n "$ASYNCIO_REACTOR" ]]; then
@@ -253,10 +257,6 @@ if [[ -n "$ASYNCIO_REACTOR" ]]; then
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
fi
if [[ -n "$UNIX_SOCKETS" ]]; then
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
fi
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
# Set the log level to what is desired
@@ -269,10 +269,6 @@ if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
export PASS_SYNAPSE_LOG_SENSITIVE=1
fi
# Log a few more useful things for a developer attempting to debug something
# particularly tricky.
export PASS_SYNAPSE_LOG_TESTING=1
# Run the tests!
echo "Images built; running complement"
cd "$COMPLEMENT_DIR"

View File

@@ -136,11 +136,11 @@ def request(
authorization_headers.append(header)
print("Authorization: %s" % header, file=sys.stderr)
dest = "matrix-federation://%s%s" % (destination, path)
dest = "matrix://%s%s" % (destination, path)
print("Requesting %s" % dest, file=sys.stderr)
s = requests.Session()
s.mount("matrix-federation://", MatrixConnectionAdapter())
s.mount("matrix://", MatrixConnectionAdapter())
headers: Dict[str, str] = {
"Authorization": authorization_headers[0],
@@ -247,7 +247,7 @@ def main() -> None:
def read_args_from_config(args: argparse.Namespace) -> None:
with open(args.config) as fh:
with open(args.config, "r") as fh:
config = yaml.safe_load(fh)
if not args.server_name:
@@ -329,17 +329,6 @@ class MatrixConnectionAdapter(HTTPAdapter):
raise ValueError("Invalid host:port '%s'" % (server_name,))
return out[0], port, out[0]
# Look up SRV for Matrix 1.8 `matrix-fed` service first
try:
srv = srvlookup.lookup("matrix-fed", "tcp", server_name)[0]
print(
f"SRV lookup on _matrix-fed._tcp.{server_name} gave {srv}",
file=sys.stderr,
)
return srv.host, srv.port, server_name
except Exception:
pass
# Fall back to deprecated `matrix` service
try:
srv = srvlookup.lookup("matrix", "tcp", server_name)[0]
print(
@@ -348,7 +337,6 @@ class MatrixConnectionAdapter(HTTPAdapter):
)
return srv.host, srv.port, server_name
except Exception:
# Fall even further back to just port 8448
return server_name, 8448, server_name
@staticmethod

View File

@@ -16,24 +16,13 @@
can crop up, e.g the cache descriptors.
"""
from typing import Callable, Optional, Tuple, Type, Union
from typing import Callable, Optional, Type
import mypy.types
from mypy.erasetype import remove_instance_last_known_values
from mypy.errorcodes import ErrorCode
from mypy.nodes import ARG_NAMED_OPT, TempNode, Var
from mypy.plugin import FunctionSigContext, MethodSigContext, Plugin
from mypy.nodes import ARG_NAMED_OPT
from mypy.plugin import MethodSigContext, Plugin
from mypy.typeops import bind_self
from mypy.types import (
AnyType,
CallableType,
Instance,
NoneType,
TupleType,
TypeAliasType,
UninhabitedType,
UnionType,
)
from mypy.types import CallableType, Instance, NoneType, UnionType
class SynapsePlugin(Plugin):
@@ -41,43 +30,14 @@ class SynapsePlugin(Plugin):
self, fullname: str
) -> Optional[Callable[[MethodSigContext], CallableType]]:
if fullname.startswith(
(
"synapse.util.caches.descriptors.CachedFunction.__call__",
"synapse.util.caches.descriptors._LruCachedFunction.__call__",
)
"synapse.util.caches.descriptors.CachedFunction.__call__"
) or fullname.startswith(
"synapse.util.caches.descriptors._LruCachedFunction.__call__"
):
return cached_function_method_signature
if fullname in (
"synapse.util.caches.descriptors._CachedFunctionDescriptor.__call__",
"synapse.util.caches.descriptors._CachedListFunctionDescriptor.__call__",
):
return check_is_cacheable_wrapper
return None
def _get_true_return_type(signature: CallableType) -> mypy.types.Type:
"""
Get the "final" return type of a callable which might return an Awaitable/Deferred.
"""
if isinstance(signature.ret_type, Instance):
# If a coroutine, unwrap the coroutine's return type.
if signature.ret_type.type.fullname == "typing.Coroutine":
return signature.ret_type.args[2]
# If an awaitable, unwrap the awaitable's final value.
elif signature.ret_type.type.fullname == "typing.Awaitable":
return signature.ret_type.args[0]
# If a Deferred, unwrap the Deferred's final value.
elif signature.ret_type.type.fullname == "twisted.internet.defer.Deferred":
return signature.ret_type.args[0]
# Otherwise, return the raw value of the function.
return signature.ret_type
def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
"""Fixes the `CachedFunction.__call__` signature to be correct.
@@ -86,17 +46,16 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
1. the `self` argument needs to be marked as "bound";
2. any `cache_context` argument should be removed;
3. an optional keyword argument `on_invalidated` should be added.
4. Wrap the return type to always be a Deferred.
"""
# 1. Mark this as a bound function signature.
signature: CallableType = bind_self(ctx.default_signature)
# First we mark this as a bound function signature.
signature = bind_self(ctx.default_signature)
# 2. Remove any "cache_context" args.
# Secondly, we remove any "cache_context" args.
#
# Note: We should be only doing this if `cache_context=True` is set, but if
# it isn't then the code will raise an exception when its called anyway, so
# it's not the end of the world.
# its not the end of the world.
context_arg_index = None
for idx, name in enumerate(signature.arg_names):
if name == "cache_context":
@@ -112,7 +71,7 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
arg_names.pop(context_arg_index)
arg_kinds.pop(context_arg_index)
# 3. Add an optional "on_invalidate" argument.
# Third, we add an optional "on_invalidate" argument.
#
# This is a either
# - a callable which accepts no input and returns nothing, or
@@ -134,16 +93,35 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
arg_names.append("on_invalidate")
arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg.
# 4. Ensure the return type is a Deferred.
ret_arg = _get_true_return_type(signature)
# Finally we ensure the return type is a Deferred.
if (
isinstance(signature.ret_type, Instance)
and signature.ret_type.type.fullname == "twisted.internet.defer.Deferred"
):
# If it is already a Deferred, nothing to do.
ret_type = signature.ret_type
else:
ret_arg = None
if isinstance(signature.ret_type, Instance):
# If a coroutine, wrap the coroutine's return type in a Deferred.
if signature.ret_type.type.fullname == "typing.Coroutine":
ret_arg = signature.ret_type.args[2]
# This should be able to use ctx.api.named_generic_type, but that doesn't seem
# to find the correct symbol for anything more than 1 module deep.
#
# modules is not part of CheckerPluginInterface. The following is a combination
# of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo.
sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined]
ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)])
# If an awaitable, wrap the awaitable's final value in a Deferred.
elif signature.ret_type.type.fullname == "typing.Awaitable":
ret_arg = signature.ret_type.args[0]
# Otherwise, wrap the return value in a Deferred.
if ret_arg is None:
ret_arg = signature.ret_type
# This should be able to use ctx.api.named_generic_type, but that doesn't seem
# to find the correct symbol for anything more than 1 module deep.
#
# modules is not part of CheckerPluginInterface. The following is a combination
# of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo.
sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined]
ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)])
signature = signature.copy_modified(
arg_types=arg_types,
@@ -155,198 +133,6 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
return signature
def check_is_cacheable_wrapper(ctx: MethodSigContext) -> CallableType:
"""Asserts that the signature of a method returns a value which can be cached.
Makes no changes to the provided method signature.
"""
# The true signature, this isn't being modified so this is what will be returned.
signature: CallableType = ctx.default_signature
if not isinstance(ctx.args[0][0], TempNode):
ctx.api.note("Cached function is not a TempNode?!", ctx.context) # type: ignore[attr-defined]
return signature
orig_sig = ctx.args[0][0].type
if not isinstance(orig_sig, CallableType):
ctx.api.fail("Cached 'function' is not a callable", ctx.context)
return signature
check_is_cacheable(orig_sig, ctx)
return signature
def check_is_cacheable(
signature: CallableType,
ctx: Union[MethodSigContext, FunctionSigContext],
) -> None:
"""
Check if a callable returns a type which can be cached.
Args:
signature: The callable to check.
ctx: The signature context, used for error reporting.
"""
# Unwrap the true return type from the cached function.
return_type = _get_true_return_type(signature)
verbose = ctx.api.options.verbosity >= 1
# TODO Technically a cachedList only needs immutable values, but forcing them
# to return Mapping instead of Dict is fine.
ok, note = is_cacheable(return_type, signature, verbose)
if ok:
message = f"function {signature.name} is @cached, returning {return_type}"
else:
message = f"function {signature.name} is @cached, but has mutable return value {return_type}"
if note:
message += f" ({note})"
message = message.replace("builtins.", "").replace("typing.", "")
if ok and note:
ctx.api.note(message, ctx.context) # type: ignore[attr-defined]
elif not ok:
ctx.api.fail(message, ctx.context, code=AT_CACHED_MUTABLE_RETURN)
# Immutable simple values.
IMMUTABLE_VALUE_TYPES = {
"builtins.bool",
"builtins.int",
"builtins.float",
"builtins.str",
"builtins.bytes",
}
# Types defined in Synapse which are known to be immutable.
IMMUTABLE_CUSTOM_TYPES = {
"synapse.synapse_rust.acl.ServerAclEvaluator",
"synapse.synapse_rust.push.FilteredPushRules",
# This is technically not immutable, but close enough.
"signedjson.types.VerifyKey",
}
# Immutable containers only if the values are also immutable.
IMMUTABLE_CONTAINER_TYPES_REQUIRING_IMMUTABLE_ELEMENTS = {
"builtins.frozenset",
"builtins.tuple",
"typing.AbstractSet",
"typing.Sequence",
"immutabledict.immutabledict",
}
MUTABLE_CONTAINER_TYPES = {
"builtins.set",
"builtins.list",
"builtins.dict",
}
AT_CACHED_MUTABLE_RETURN = ErrorCode(
"synapse-@cached-mutable",
"@cached() should have an immutable return type",
"General",
)
def is_cacheable(
rt: mypy.types.Type, signature: CallableType, verbose: bool
) -> Tuple[bool, Optional[str]]:
"""
Check if a particular type is cachable.
A type is cachable if it is immutable; for complex types this recurses to
check each type parameter.
Returns: a 2-tuple (cacheable, message).
- cachable: False means the type is definitely not cacheable;
true means anything else.
- Optional message.
"""
# This should probably be done via a TypeVisitor. Apologies to the reader!
if isinstance(rt, AnyType):
return True, ("may be mutable" if verbose else None)
elif isinstance(rt, Instance):
if (
rt.type.fullname in IMMUTABLE_VALUE_TYPES
or rt.type.fullname in IMMUTABLE_CUSTOM_TYPES
):
# "Simple" types are generally immutable.
return True, None
elif rt.type.fullname == "typing.Mapping":
# Generally mapping keys are immutable, but they only *have* to be
# hashable, which doesn't imply immutability. E.g. Mapping[K, V]
# is cachable iff K and V are cachable.
return is_cacheable(rt.args[0], signature, verbose) and is_cacheable(
rt.args[1], signature, verbose
)
elif rt.type.fullname in IMMUTABLE_CONTAINER_TYPES_REQUIRING_IMMUTABLE_ELEMENTS:
# E.g. Collection[T] is cachable iff T is cachable.
return is_cacheable(rt.args[0], signature, verbose)
elif rt.type.fullname in MUTABLE_CONTAINER_TYPES:
# Mutable containers are mutable regardless of their underlying type.
return False, None
elif "attrs" in rt.type.metadata:
# attrs classes are only cachable iff it is frozen (immutable itself)
# and all attributes are cachable.
frozen = rt.type.metadata["attrs"]["frozen"]
if frozen:
for attribute in rt.type.metadata["attrs"]["attributes"]:
attribute_name = attribute["name"]
symbol_node = rt.type.names[attribute_name].node
assert isinstance(symbol_node, Var)
assert symbol_node.type is not None
ok, note = is_cacheable(symbol_node.type, signature, verbose)
if not ok:
return False, f"non-frozen attrs property: {attribute_name}"
# All attributes were frozen.
return True, None
else:
return False, "non-frozen attrs class"
else:
# Ensure we fail for unknown types, these generally means that the
# above code is not complete.
return (
False,
f"Don't know how to handle {rt.type.fullname} return type instance",
)
elif isinstance(rt, NoneType):
# None is cachable.
return True, None
elif isinstance(rt, (TupleType, UnionType)):
# Tuples and unions are cachable iff all their items are cachable.
for item in rt.items:
ok, note = is_cacheable(item, signature, verbose)
if not ok:
return False, note
# This discards notes but that's probably fine
return True, None
elif isinstance(rt, TypeAliasType):
# For a type alias, check if the underlying real type is cachable.
return is_cacheable(mypy.types.get_proper_type(rt), signature, verbose)
elif isinstance(rt, UninhabitedType) and rt.is_noreturn:
# There is no return value, just consider it cachable. This is only used
# in tests.
return True, None
else:
# Ensure we fail for unknown types, these generally means that the
# above code is not complete.
return False, f"Don't know how to handle {type(rt).__qualname__} return type"
def plugin(version: str) -> Type[SynapsePlugin]:
# This is the entry point of the plugin, and lets us deal with the fact
# that the mypy plugin interface is *not* stable by looking at the version

View File

@@ -1,4 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -244,17 +245,11 @@ def _prepare() -> None:
else:
debian_version = new_version
if sys.platform == "darwin":
run_until_successful(
f"docker run --rm -v .:/synapse ubuntu:latest /synapse/scripts-dev/docker_update_debian_changelog.sh {new_version}",
shell=True,
)
else:
run_until_successful(
f'dch -M -v {debian_version} "New Synapse release {new_version}."',
shell=True,
)
run_until_successful('dch -M -r -D stable ""', shell=True)
run_until_successful(
f'dch -M -v {debian_version} "New Synapse release {new_version}."',
shell=True,
)
run_until_successful('dch -M -r -D stable ""', shell=True)
# Show the user the changes and ask if they want to edit the change log.
synapse_repo.git.add("-u")
@@ -572,27 +567,19 @@ def _notify(message: str) -> None:
# for this.
click.echo(f"\a{message}")
app_name = "Synapse Release Script"
# Try and run notify-send, but don't raise an Exception if this fails
# (This is best-effort)
if sys.platform == "darwin":
# See https://developer.apple.com/library/archive/documentation/AppleScript/Conceptual/AppleScriptLangGuide/reference/ASLR_cmds.html#//apple_ref/doc/uid/TP40000983-CH216-SW224
subprocess.run(
f"""osascript -e 'display notification "{message}" with title "{app_name}"'""",
shell=True,
)
else:
subprocess.run(
[
"notify-send",
"--app-name",
app_name,
"--expire-time",
"3600000",
message,
]
)
# TODO Support other platforms?
subprocess.run(
[
"notify-send",
"--app-name",
"Synapse Release Script",
"--expire-time",
"3600000",
message,
]
)
@cli.command()

View File

@@ -145,7 +145,7 @@ Example usage:
def read_args_from_config(args: argparse.Namespace) -> None:
with open(args.config) as fh:
with open(args.config, "r") as fh:
config = yaml.safe_load(fh)
if not args.server_name:
args.server_name = config["server_name"]

View File

@@ -1,21 +0,0 @@
# Copyright 2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
class ServerAclEvaluator:
def __init__(
self, allow_ip_literals: bool, allow: List[str], deny: List[str]
) -> None: ...
def server_matches_acl_event(self, server_name: str) -> bool: ...

View File

@@ -46,7 +46,8 @@ class FilteredPushRules:
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc4028_push_encrypted_events: bool,
msc3952_intentional_mentions: bool,
msc3958_suppress_edits_enabled: bool,
): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...

View File

@@ -21,21 +21,12 @@ import os
import sys
from typing import Any, Dict
from PIL import ImageFile
from synapse.util.rust import check_rust_lib_up_to_date
from synapse.util.stringutils import strtobool
# Allow truncated JPEG images to be thumbnailed.
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Check that we're not running on an unsupported Python version.
#
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
# if-statement completely.
py_version = sys.version_info
if py_version < (3, 8):
print("Synapse requires Python 3.8 or above.")
if sys.version_info < (3, 7):
print("Synapse requires Python 3.7 or above.")
sys.exit(1)
# Allow using the asyncio reactor via env var.
@@ -87,7 +78,7 @@ try:
except ImportError:
pass
import synapse.util # noqa: E402
import synapse.util
__version__ = synapse.util.SYNAPSE_VERSION

View File

@@ -1,26 +0,0 @@
# Copyright 2023 Maxwell G <maxwell@gtmx.me>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging.version import Version
try:
from pydantic import __version__ as pydantic_version
except ImportError:
import importlib.metadata
pydantic_version = importlib.metadata.version("pydantic")
HAS_PYDANTIC_V2: bool = Version(pydantic_version).major == 2
__all__ = ("HAS_PYDANTIC_V2",)

Some files were not shown because too many files have changed in this diff Show More