Compare commits
63 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| cbdbcb0c37 | |||
| a743f7d33e | |||
| 0b014eb25e | |||
| 535a689cfc | |||
| 6b3e0ea6bd | |||
| 443b94f0d5 | |||
| badd5cfef4 | |||
| 988ff6167d | |||
| 8af8a9bce5 | |||
| 8e2759f2d8 | |||
| 0922462fc7 | |||
| 73d8ded0b0 | |||
| e3a49f4784 | |||
| d24cd17820 | |||
| 36d8b83888 | |||
| 32545d2e26 | |||
| 0cf4705393 | |||
| 5a275a2377 | |||
| 58c657322a | |||
| aa28110264 | |||
| 4bdbebccb9 | |||
| ba1588461b | |||
| a468768104 | |||
| 9535fd0f9c | |||
| 320186319a | |||
| 86cf6a3a17 | |||
| fb611f1899 | |||
| 379944c806 | |||
| 872dfb83f0 | |||
| 99892e7a19 | |||
| 9225764bc8 | |||
| 232a270d45 | |||
| a653c20c01 | |||
| 60a66716f8 | |||
| f0d72acebf | |||
| 18f91a7ac2 | |||
| 2ff7fbffcd | |||
| 0af56b807b | |||
| d2df831d4f | |||
| e96fd31ef1 | |||
| 45a23c0ab0 | |||
| 6fd43c4b93 | |||
| e60c8f301b | |||
| 5dc21d95b7 | |||
| 8469bd81d3 | |||
| 4c780abf2d | |||
| a3fb2b858d | |||
| 680dbc86b1 | |||
| 5f8db3ed0e | |||
| b03ffaa409 | |||
| d3386afe40 | |||
| 0d8a54abaf | |||
| 6ab6f61490 | |||
| 03e5c93639 | |||
| 3de49f46ab | |||
| e53e99edba | |||
| d820ff204b | |||
| 87ecb333e3 | |||
| c50da488a0 | |||
| 53e804f296 | |||
| 7d574149d3 | |||
| 211becb069 | |||
| 8dc02f9d91 |
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# replaces the dependency on Twisted in `python_dependencies` with trunk.
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
sed -i -e 's#"Twisted.*"#"Twisted @ git+https://github.com/twisted/twisted"#' synapse/python_dependencies.py
|
||||
@@ -8,8 +8,4 @@
|
||||
!pyproject.toml
|
||||
!poetry.lock
|
||||
|
||||
# TODO: remove these once we have moved over to using poetry-core in pyproject.toml
|
||||
!MANIFEST.in
|
||||
!setup.py
|
||||
|
||||
**/__pycache__
|
||||
|
||||
+20
-26
@@ -20,19 +20,13 @@ jobs:
|
||||
- run: scripts-dev/config-lint.sh
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
toxenv:
|
||||
- "check_codestyle"
|
||||
- "check_isort"
|
||||
- "mypy"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install tox
|
||||
- run: tox -e ${{ matrix.toxenv }}
|
||||
# This does a vanilla `poetry install` - no extras. I'm slightly anxious
|
||||
# that we might skip some typechecks on code that uses extras. However,
|
||||
# I think the right way to fix this is to mark any extras needed for
|
||||
# typechecking as development dependencies. To detect this, we ought to
|
||||
# turn up mypy's strictness: disallow unknown imports and be accept fewer
|
||||
# uses of `Any`.
|
||||
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
|
||||
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -71,23 +65,23 @@ jobs:
|
||||
matrix:
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||
database: ["sqlite"]
|
||||
toxenv: ["py"]
|
||||
extras: ["all"]
|
||||
include:
|
||||
# Newest Python without optional deps
|
||||
- python-version: "3.10"
|
||||
toxenv: "py-noextras"
|
||||
extras: ""
|
||||
|
||||
# Oldest Python with PostgreSQL
|
||||
- python-version: "3.7"
|
||||
database: "postgres"
|
||||
postgres-version: "10"
|
||||
toxenv: "py"
|
||||
extras: "all"
|
||||
|
||||
# Newest Python with newest PostgreSQL
|
||||
- python-version: "3.10"
|
||||
database: "postgres"
|
||||
postgres-version: "14"
|
||||
toxenv: "py"
|
||||
extras: "all"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -99,17 +93,16 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install tox
|
||||
extras: ${{ matrix.extras }}
|
||||
- name: Await PostgreSQL
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
- run: tox -e ${{ matrix.toxenv }}
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
SYNAPSE_POSTGRES_HOST: localhost
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
@@ -156,23 +149,24 @@ jobs:
|
||||
|
||||
trial-pypy:
|
||||
# Very slow; only run if the branch name includes 'pypy'
|
||||
# Note: sqlite only; no postgres. Completely untested since poetry move.
|
||||
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.7"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install tox
|
||||
- run: tox -e py
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
extras: ${{ matrix.extras }}
|
||||
- run: poetry run trial --jobs=2 tests
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
|
||||
@@ -6,16 +6,25 @@ on:
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
mypy:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: .ci/patch_for_twisted_trunk.sh
|
||||
- run: pip install tox
|
||||
- run: tox -e mypy
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
extras: "all"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- run: poetry run mypy
|
||||
|
||||
trial:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -23,14 +32,15 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
- run: .ci/patch_for_twisted_trunk.sh
|
||||
- run: pip install tox
|
||||
- run: tox -e py
|
||||
env:
|
||||
TRIAL_FLAGS: "--jobs=2"
|
||||
python-version: "3.x"
|
||||
extras: "all test"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- run: poetry run trial --jobs 2 tests
|
||||
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
@@ -55,11 +65,23 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Patch dependencies
|
||||
run: .ci/patch_for_twisted_trunk.sh
|
||||
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
|
||||
# but the sytest-synapse container expects it to be in /venv/.
|
||||
# We symlink it before running poetry so that poetry actually
|
||||
# ends up installing to `/venv`.
|
||||
run: |
|
||||
ln -s -T /venv /src/.venv
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry install --no-interaction --extras "all test"
|
||||
working-directory: /src
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
env:
|
||||
# Use offline mode to avoid reinstalling the pinned version of
|
||||
# twisted.
|
||||
OFFLINE: 1
|
||||
- name: Summarise results.tap
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
|
||||
+2
-2
@@ -15,8 +15,8 @@ _trial_temp*/
|
||||
.DS_Store
|
||||
__pycache__/
|
||||
|
||||
# We do want the poetry lockfile. TODO: is there a good reason for ignoring
|
||||
# '*.lock' above? If not, let's nuke it.
|
||||
# We do want the poetry lockfile.
|
||||
# TODO: is there a good reason for ignoring '*.lock' above? If not, nuke it.
|
||||
!poetry.lock
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
|
||||
-54
@@ -1,54 +0,0 @@
|
||||
include LICENSE
|
||||
include VERSION
|
||||
include *.rst
|
||||
include *.md
|
||||
include demo/README
|
||||
include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
include demo/*.sh
|
||||
|
||||
include synapse/py.typed
|
||||
recursive-include synapse/storage *.sql
|
||||
recursive-include synapse/storage *.sql.postgres
|
||||
recursive-include synapse/storage *.sql.sqlite
|
||||
recursive-include synapse/storage *.py
|
||||
recursive-include synapse/storage *.txt
|
||||
recursive-include synapse/storage *.md
|
||||
|
||||
recursive-include docs *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
recursive-include tests *.pem
|
||||
recursive-include tests *.p8
|
||||
recursive-include tests *.crt
|
||||
recursive-include tests *.key
|
||||
|
||||
recursive-include synapse/res *
|
||||
recursive-include synapse/static *.css
|
||||
recursive-include synapse/static *.gif
|
||||
recursive-include synapse/static *.html
|
||||
recursive-include synapse/static *.js
|
||||
|
||||
exclude .codecov.yml
|
||||
exclude .coveragerc
|
||||
exclude .dockerignore
|
||||
exclude .editorconfig
|
||||
exclude Dockerfile
|
||||
exclude mypy.ini
|
||||
exclude sytest-blacklist
|
||||
exclude test_postgresql.sh
|
||||
|
||||
include book.toml
|
||||
include pyproject.toml
|
||||
recursive-include changelog.d *
|
||||
|
||||
include .flake8
|
||||
prune .circleci
|
||||
prune .github
|
||||
prune .ci
|
||||
prune contrib
|
||||
prune debian
|
||||
prune demo/etc
|
||||
prune docker
|
||||
prune stubs
|
||||
+16
-14
@@ -293,24 +293,26 @@ directory of your choice::
|
||||
git clone https://github.com/matrix-org/synapse.git
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies, that are easiest
|
||||
to install using pip and a virtualenv::
|
||||
Synapse has a number of external dependencies. We maintain a fixed development
|
||||
environment using [poetry](https://python-poetry.org/). First, install poetry. We recommend
|
||||
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install -e ".[all,dev]"
|
||||
pip install --user pipx
|
||||
pipx install poetry
|
||||
|
||||
but see the `poetry installation docs <https://python-poetry.org/docs/#installation>`_
|
||||
for more details. Then ask poetry to create a virtual environment from the project
|
||||
and install Synapse's dependencies::
|
||||
|
||||
poetry install --extras "all test"
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env. If any dependencies fail to install,
|
||||
try installing the failing modules individually::
|
||||
|
||||
pip install -e "module-name"
|
||||
dependencies into a virtual env.
|
||||
|
||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
|
||||
|
||||
./demo/start.sh
|
||||
poetry run ./demo/start.sh
|
||||
|
||||
(to stop, you can use `./demo/stop.sh`)
|
||||
(to stop, you can use `poetry run ./demo/stop.sh`)
|
||||
|
||||
See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html)
|
||||
for more information.
|
||||
@@ -318,14 +320,14 @@ for more information.
|
||||
If you just want to start a single instance of the app and run it directly::
|
||||
|
||||
# Create the homeserver.yaml config once
|
||||
python -m synapse.app.homeserver \
|
||||
poetry run synapse_homeserver \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
|
||||
# Start the app
|
||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
poetry run synapse_homeserver --config-path homeserver.yaml
|
||||
|
||||
|
||||
Running the unit tests
|
||||
@@ -334,7 +336,7 @@ Running the unit tests
|
||||
After getting up and running, you may wish to run Synapse's unit tests to
|
||||
check that everything is installed correctly::
|
||||
|
||||
trial tests
|
||||
poetry run trial tests
|
||||
|
||||
This should end with a 'PASSED' result (note that exact numbers will
|
||||
differ)::
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
Prevent a sync request from removing a user's busy presence status.
|
||||
@@ -0,0 +1 @@
|
||||
Fix bug with incremental sync missing events when rejoining/backfilling. Contributed by Nick @ Beeper.
|
||||
@@ -0,0 +1 @@
|
||||
Use poetry to manage Synapse's dependencies.
|
||||
@@ -0,0 +1 @@
|
||||
Fix rendering of the documentation site when using the 'print' feature.
|
||||
@@ -0,0 +1 @@
|
||||
The groups/communities feature in Synapse has been disabled by default.
|
||||
@@ -0,0 +1 @@
|
||||
Enable processing of device list updates asynchronously.
|
||||
@@ -0,0 +1 @@
|
||||
Remove unstable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440).
|
||||
@@ -0,0 +1 @@
|
||||
Preparation for faster-room-join work: start a background process to resynchronise the room state after a room join.
|
||||
@@ -0,0 +1 @@
|
||||
Remove an unstable identifier from [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083).
|
||||
@@ -0,0 +1 @@
|
||||
Run twisted trunk CI job in the locked poetry environment.
|
||||
@@ -0,0 +1 @@
|
||||
Run lints under poetry in CI, and remove corresponding tox lint jobs.
|
||||
@@ -0,0 +1 @@
|
||||
Run "main" trial tests under `poetry`.
|
||||
@@ -0,0 +1 @@
|
||||
Bump twisted version in `poetry.lock` to work around [pip bug #9644](https://github.com/pypa/pip/issues/9644).
|
||||
@@ -0,0 +1 @@
|
||||
Change Mutual Rooms' `unstable_features` flag to `uk.half-shot.msc2666.mutual_rooms` which matches the current MSC iteration.
|
||||
@@ -0,0 +1 @@
|
||||
Use `poetry` to manage the virtualenv in debian packages.
|
||||
@@ -0,0 +1 @@
|
||||
Fix typo in the release script help string.
|
||||
@@ -0,0 +1 @@
|
||||
Limit length of device_id to less than 512 characters.
|
||||
@@ -0,0 +1 @@
|
||||
Reintroduce the list of targets to the linter script, to avoid linting unwanted local-only directories during development.
|
||||
@@ -0,0 +1 @@
|
||||
Update worker documentation and replace old `federation_reader` with `generic_worker`.
|
||||
@@ -0,0 +1 @@
|
||||
Enable processing of device list updates asynchronously.
|
||||
@@ -0,0 +1 @@
|
||||
Dockerfile-workers: give the master its own log config.
|
||||
Vendored
+14
-3
@@ -30,9 +30,18 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in
|
||||
;;
|
||||
esac
|
||||
|
||||
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
|
||||
# than the 2/3 compatible `virtualenv`.
|
||||
# Manually install Poetry and export a pip-compatible `requirements.txt`
|
||||
# We need a Poetry pre-release as the export command is buggy in < 1.2
|
||||
TEMP_VENV="$(mktemp -d)"
|
||||
python3 -m venv "$TEMP_VENV"
|
||||
source "$TEMP_VENV/bin/activate"
|
||||
pip install -U pip
|
||||
pip install poetry==1.2.0b1
|
||||
deactivate
|
||||
rm -rf "$TEMP_VENV"
|
||||
|
||||
# Use --no-deps to only install pinned versions in exported_requirements.txt,
|
||||
# and to avoid https://github.com/pypa/pip/issues/9644
|
||||
dh_virtualenv \
|
||||
--install-suffix "matrix-synapse" \
|
||||
--builtin-venv \
|
||||
@@ -41,9 +50,11 @@ dh_virtualenv \
|
||||
--preinstall="lxml" \
|
||||
--preinstall="mock" \
|
||||
--preinstall="wheel" \
|
||||
--extra-pip-arg="--no-deps" \
|
||||
--extra-pip-arg="--no-cache-dir" \
|
||||
--extra-pip-arg="--compile" \
|
||||
--extras="all,systemd,test"
|
||||
--extras="all,systemd,test" \
|
||||
--requirements="exported_requirements.txt"
|
||||
|
||||
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
|
||||
Vendored
+7
@@ -1,3 +1,10 @@
|
||||
matrix-synapse-py3 (1.58.0+nmu1) UNRELEASED; urgency=medium
|
||||
|
||||
* Non-maintainer upload.
|
||||
* Use poetry to manage the bundled virtualenv included with this package.
|
||||
|
||||
-- Synapse Packaging Team <packages@matrix.org> Wed, 30 Mar 2022 12:21:43 +0100
|
||||
|
||||
matrix-synapse-py3 (1.57.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.57.0~rc1.
|
||||
|
||||
Vendored
+1
@@ -0,0 +1 @@
|
||||
exported_requirements.txt
|
||||
+2
-4
@@ -59,7 +59,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
WORKDIR /synapse
|
||||
|
||||
# Copy just what we need to run `poetry export`...
|
||||
COPY pyproject.toml poetry.lock README.rst /synapse/
|
||||
COPY pyproject.toml poetry.lock /synapse/
|
||||
|
||||
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
|
||||
|
||||
@@ -98,9 +98,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
# Copy over the rest of the synapse source code.
|
||||
COPY synapse /synapse/synapse/
|
||||
# ... and what we need to `pip install`.
|
||||
# TODO: once pyproject.toml declares poetry-core as its build system, we'll need to copy
|
||||
# pyproject.toml here, ditching setup.py and MANIFEST.in.
|
||||
COPY setup.py MANIFEST.in README.rst /synapse/
|
||||
COPY pyproject.toml poetry.lock README.rst /synapse/
|
||||
|
||||
# Install the synapse package itself.
|
||||
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Any, Dict, Set
|
||||
from typing import Any, Dict, Mapping, Set
|
||||
|
||||
import jinja2
|
||||
import yaml
|
||||
@@ -341,7 +341,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
# base shared worker jinja2 template.
|
||||
#
|
||||
# This config file will be passed to all workers, included Synapse's main process.
|
||||
shared_config = {"listeners": listeners}
|
||||
shared_config: Dict[str, Any] = {"listeners": listeners}
|
||||
|
||||
# The supervisord config. The contents of which will be inserted into the
|
||||
# base supervisord jinja2 template.
|
||||
@@ -446,21 +446,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
|
||||
# Write out the worker's logging config file
|
||||
|
||||
# Check whether we should write worker logs to disk, in addition to the console
|
||||
extra_log_template_args = {}
|
||||
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
|
||||
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
|
||||
dir=data_dir, name=worker_name
|
||||
)
|
||||
|
||||
# Render and write the file
|
||||
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
|
||||
convert(
|
||||
"/conf/log.config",
|
||||
log_config_filepath,
|
||||
worker_name=worker_name,
|
||||
**extra_log_template_args,
|
||||
)
|
||||
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
|
||||
|
||||
# Then a worker config file
|
||||
convert(
|
||||
@@ -496,6 +482,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
|
||||
# Finally, we'll write out the config files.
|
||||
|
||||
# log config for the master process
|
||||
master_log_config = generate_worker_log_config(environ, "master", data_dir)
|
||||
shared_config["log_config"] = master_log_config
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
"/conf/shared.yaml.j2",
|
||||
@@ -532,6 +522,30 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
|
||||
os.mkdir(log_dir)
|
||||
|
||||
|
||||
def generate_worker_log_config(
|
||||
environ: Mapping[str, str], worker_name: str, data_dir: str
|
||||
) -> str:
|
||||
"""Generate a log.config file for the given worker.
|
||||
|
||||
Returns: the path to the generated file
|
||||
"""
|
||||
# Check whether we should write worker logs to disk, in addition to the console
|
||||
extra_log_template_args = {}
|
||||
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
|
||||
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
|
||||
dir=data_dir, name=worker_name
|
||||
)
|
||||
# Render and write the file
|
||||
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
|
||||
convert(
|
||||
"/conf/log.config",
|
||||
log_config_filepath,
|
||||
worker_name=worker_name,
|
||||
**extra_log_template_args,
|
||||
)
|
||||
return log_config_filepath
|
||||
|
||||
|
||||
def start_supervisord():
|
||||
"""Starts up supervisord which then starts and monitors all other necessary processes
|
||||
|
||||
|
||||
+15
-39
@@ -6,60 +6,36 @@ The Synapse codebase uses a number of code formatting tools in order to
|
||||
quickly and automatically check for formatting (and sometimes logical)
|
||||
errors in code.
|
||||
|
||||
The necessary tools are detailed below.
|
||||
The necessary tools are:
|
||||
|
||||
First install them with:
|
||||
- [black](https://black.readthedocs.io/en/stable/), a source code formatter;
|
||||
- [isort](https://pycqa.github.io/isort/), which organises each file's imports;
|
||||
- [flake8](https://flake8.pycqa.org/en/latest/), which can spot common errors; and
|
||||
- [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
|
||||
|
||||
Install them with:
|
||||
|
||||
```sh
|
||||
pip install -e ".[lint,mypy]"
|
||||
```
|
||||
|
||||
- **black**
|
||||
The easiest way to run the lints is to invoke the linter script as follows.
|
||||
|
||||
The Synapse codebase uses [black](https://pypi.org/project/black/)
|
||||
as an opinionated code formatter, ensuring all comitted code is
|
||||
properly formatted.
|
||||
|
||||
Have `black` auto-format your code (it shouldn't change any
|
||||
functionality) with:
|
||||
|
||||
```sh
|
||||
black .
|
||||
```
|
||||
|
||||
- **flake8**
|
||||
|
||||
`flake8` is a code checking tool. We require code to pass `flake8`
|
||||
before being merged into the codebase.
|
||||
|
||||
Check all application and test code with:
|
||||
|
||||
```sh
|
||||
flake8 .
|
||||
```
|
||||
|
||||
- **isort**
|
||||
|
||||
`isort` ensures imports are nicely formatted, and can suggest and
|
||||
auto-fix issues such as double-importing.
|
||||
|
||||
Auto-fix imports with:
|
||||
|
||||
```sh
|
||||
isort .
|
||||
```
|
||||
```sh
|
||||
scripts-dev/lint.sh
|
||||
```
|
||||
|
||||
It's worth noting that modern IDEs and text editors can run these tools
|
||||
automatically on save. It may be worth looking into whether this
|
||||
functionality is supported in your editor for a more convenient
|
||||
development workflow. It is not, however, recommended to run `flake8` on
|
||||
save as it takes a while and is very resource intensive.
|
||||
development workflow. It is not, however, recommended to run `flake8` or `mypy`
|
||||
on save as they take a while and can be very resource intensive.
|
||||
|
||||
## General rules
|
||||
|
||||
- **Naming**:
|
||||
- Use camel case for class and type names
|
||||
- Use underscores for functions and variables.
|
||||
- Use `CamelCase` for class and type names
|
||||
- Use underscores for `function_names` and `variable_names`.
|
||||
- **Docstrings**: should follow the [google code
|
||||
style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings).
|
||||
See the
|
||||
|
||||
@@ -48,16 +48,16 @@ can find many good git tutorials on the web.
|
||||
|
||||
# 4. Install the dependencies
|
||||
|
||||
Once you have installed Python 3 and added the source, please open a terminal and
|
||||
setup a *virtualenv*, as follows:
|
||||
Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies
|
||||
and development environment. Once you have installed Python 3 and added the
|
||||
source, you should [install poetry](https://python-poetry.org/docs/#installation).
|
||||
We recommend installing `poetry` using `pipx`; see their instructions for details.
|
||||
|
||||
Next, open a terminal and install dependencies as follows:
|
||||
|
||||
```sh
|
||||
cd path/where/you/have/cloned/the/repository
|
||||
python3 -m venv ./env
|
||||
source ./env/bin/activate
|
||||
pip install wheel
|
||||
pip install -e ".[all,dev]"
|
||||
pip install tox
|
||||
poetry install --extras "all test"
|
||||
```
|
||||
|
||||
This will install the developer dependencies for the project.
|
||||
@@ -117,11 +117,10 @@ The linters look at your code and do two things:
|
||||
- ensure that your code follows the coding style adopted by the project;
|
||||
- catch a number of errors in your code.
|
||||
|
||||
The linter takes no time at all to run as soon as you've [downloaded the dependencies into your python virtual environment](#4-install-the-dependencies).
|
||||
The linter takes no time at all to run as soon as you've [downloaded the dependencies](#4-install-the-dependencies).
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh
|
||||
poetry run ./scripts-dev/lint.sh
|
||||
```
|
||||
|
||||
Note that this script *will modify your files* to fix styling errors.
|
||||
@@ -131,15 +130,13 @@ If you wish to restrict the linters to only the files changed since the last com
|
||||
(much faster!), you can instead run:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh -d
|
||||
poetry run ./scripts-dev/lint.sh -d
|
||||
```
|
||||
|
||||
Or if you know exactly which files you wish to lint, you can instead run:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||
poetry run ./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
|
||||
```
|
||||
|
||||
## Run the unit tests (Twisted trial).
|
||||
@@ -148,16 +145,14 @@ The unit tests run parts of Synapse, including your changes, to see if anything
|
||||
was broken. They are slower than the linters but will typically catch more errors.
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests
|
||||
poetry run trial tests
|
||||
```
|
||||
|
||||
If you wish to only run *some* unit tests, you may specify
|
||||
another module instead of `tests` - or a test class or a method:
|
||||
|
||||
```sh
|
||||
source ./env/bin/activate
|
||||
trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
|
||||
poetry run trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
|
||||
```
|
||||
|
||||
If your tests fail, you may wish to look at the logs (the default log level is `ERROR`):
|
||||
@@ -169,7 +164,7 @@ less _trial_temp/test.log
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
|
||||
|
||||
```sh
|
||||
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
|
||||
SYNAPSE_TEST_LOG_LEVEL=DEBUG poetry run trial tests
|
||||
```
|
||||
|
||||
By default, tests will use an in-memory SQLite database for test data. For additional
|
||||
@@ -180,7 +175,7 @@ database state to be stored in a file named `test.db` under the trial process'
|
||||
working directory. Typically, this ends up being `_trial_temp/test.db`. For example:
|
||||
|
||||
```sh
|
||||
SYNAPSE_TEST_PERSIST_SQLITE_DB=1 trial tests
|
||||
SYNAPSE_TEST_PERSIST_SQLITE_DB=1 poetry run trial tests
|
||||
```
|
||||
|
||||
The database file can then be inspected with:
|
||||
|
||||
@@ -10,15 +10,15 @@ See the folder [system](https://github.com/matrix-org/synapse/tree/develop/docs/
|
||||
for the systemd unit files.
|
||||
|
||||
The folder [workers](https://github.com/matrix-org/synapse/tree/develop/docs/systemd-with-workers/workers/)
|
||||
contains an example configuration for the `federation_reader` worker.
|
||||
contains an example configuration for the `generic_worker` worker.
|
||||
|
||||
## Synapse configuration files
|
||||
|
||||
See [the worker documentation](../workers.md) for information on how to set up the
|
||||
configuration files and reverse-proxy correctly.
|
||||
Below is a sample `federation_reader` worker configuration file.
|
||||
Below is a sample `generic_worker` worker configuration file.
|
||||
```yaml
|
||||
{{#include workers/federation_reader.yaml}}
|
||||
{{#include workers/generic_worker.yaml}}
|
||||
```
|
||||
|
||||
Systemd manages daemonization itself, so ensure that none of the configuration
|
||||
@@ -61,9 +61,9 @@ systemctl stop matrix-synapse.target
|
||||
# Restart the master alone
|
||||
systemctl start matrix-synapse.service
|
||||
|
||||
# Restart a specific worker (eg. federation_reader); the master is
|
||||
# Restart a specific worker (eg. generic_worker); the master is
|
||||
# unaffected by this.
|
||||
systemctl restart matrix-synapse-worker@federation_reader.service
|
||||
systemctl restart matrix-synapse-worker@generic_worker.service
|
||||
|
||||
# Add a new worker (assuming all configs are set up already)
|
||||
systemctl enable matrix-synapse-worker@federation_writer.service
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
worker_app: synapse.app.federation_reader
|
||||
worker_name: federation_reader1
|
||||
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8011
|
||||
resources:
|
||||
- names: [federation]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml
|
||||
@@ -0,0 +1,13 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker1
|
||||
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8011
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
+27
-17
@@ -19,32 +19,35 @@ this document.
|
||||
packages](setup/installation.md#prebuilt-packages), you will need to follow the
|
||||
normal process for upgrading those packages.
|
||||
|
||||
- If Synapse was installed using pip then upgrade to the latest
|
||||
version by running:
|
||||
|
||||
```bash
|
||||
pip install --upgrade matrix-synapse
|
||||
```
|
||||
|
||||
- If Synapse was installed from source, then:
|
||||
|
||||
1. Activate the virtualenv before upgrading. For example, if
|
||||
Synapse is installed in a virtualenv in `~/synapse/env` then
|
||||
1. Obtain the latest version of the source code. Git users can run
|
||||
`git pull` to do this.
|
||||
|
||||
2. If you're running Synapse in a virtualenv, make sure to activate it before
|
||||
upgrading. For example, if Synapse is installed in a virtualenv in `~/synapse/env` then
|
||||
run:
|
||||
|
||||
```bash
|
||||
source ~/synapse/env/bin/activate
|
||||
```
|
||||
|
||||
2. If Synapse was installed using pip then upgrade to the latest
|
||||
version by running:
|
||||
|
||||
```bash
|
||||
pip install --upgrade matrix-synapse
|
||||
```
|
||||
|
||||
If Synapse was installed using git then upgrade to the latest
|
||||
version by running:
|
||||
|
||||
```bash
|
||||
git pull
|
||||
pip install --upgrade .
|
||||
```
|
||||
Include any relevant extras between square brackets, e.g. `pip install --upgrade .[postgres,oidc]`.
|
||||
|
||||
3. Restart Synapse:
|
||||
3. If you're using `poetry` to manage a Synapse installation, run:
|
||||
```bash
|
||||
poetry install
|
||||
```
|
||||
Include any relevant extras with `--extras`, e.g. `poetry install --extras "postgres oidc"`.
|
||||
|
||||
4. Restart Synapse:
|
||||
|
||||
```bash
|
||||
synctl restart
|
||||
@@ -85,6 +88,13 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.58.0
|
||||
|
||||
## Groups/communities feature has been disabled by default
|
||||
|
||||
The non-standard groups/communities feature in Synapse has been disabled by default
|
||||
and will be removed in Synapse v1.61.0.
|
||||
|
||||
# Upgrading to v1.57.0
|
||||
|
||||
## Changes to database schema for application services
|
||||
|
||||
@@ -75,6 +75,20 @@ function setTocEntry() {
|
||||
* Populate sidebar on load
|
||||
*/
|
||||
window.addEventListener('load', () => {
|
||||
// Prevent rendering the table of contents of the "print book" page, as it
|
||||
// will end up being rendered into the output (in a broken-looking way)
|
||||
|
||||
// Get the name of the current page (i.e. 'print.html')
|
||||
const pageNameExtension = window.location.pathname.split('/').pop();
|
||||
|
||||
// Split off the extension (as '.../print' is also a valid page name), which
|
||||
// should result in 'print'
|
||||
const pageName = pageNameExtension.split('.')[0];
|
||||
if (pageName === "print") {
|
||||
// Don't render the table of contents on this page
|
||||
return;
|
||||
}
|
||||
|
||||
// Only create table of contents if there is more than one header on the page
|
||||
if (headers.length <= 1) {
|
||||
return;
|
||||
|
||||
+4
-6
@@ -146,12 +146,10 @@ worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- federation
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
worker_log_config: /home/matrix/synapse/config/worker1_log_config.yaml
|
||||
```
|
||||
|
||||
@@ -13,7 +13,6 @@ no_implicit_optional = True
|
||||
files =
|
||||
docker/,
|
||||
scripts-dev/,
|
||||
setup.py,
|
||||
synapse/,
|
||||
tests/
|
||||
|
||||
|
||||
Generated
+13
-12
@@ -717,7 +717,7 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock
|
||||
|
||||
[[package]]
|
||||
name = "prometheus-client"
|
||||
version = "0.13.1"
|
||||
version = "0.14.0"
|
||||
description = "Python client for the Prometheus monitoring system."
|
||||
category = "main"
|
||||
optional = false
|
||||
@@ -1285,7 +1285,7 @@ urllib3 = ">=1.26.0"
|
||||
|
||||
[[package]]
|
||||
name = "twisted"
|
||||
version = "22.2.0"
|
||||
version = "22.4.0"
|
||||
description = "An asynchronous networking framework written in Python"
|
||||
category = "main"
|
||||
optional = false
|
||||
@@ -1305,19 +1305,20 @@ typing-extensions = ">=3.6.5"
|
||||
"zope.interface" = ">=4.4.2"
|
||||
|
||||
[package.extras]
|
||||
all_non_platform = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
all_non_platform = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
conch = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)"]
|
||||
conch_nacl = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pynacl"]
|
||||
contextvars = ["contextvars (>=2.4,<3)"]
|
||||
dev = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "python-subunit (>=1.4,<2.0)", "pydoctor (>=21.9.0,<21.10.0)"]
|
||||
dev_release = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pydoctor (>=21.9.0,<21.10.0)"]
|
||||
http2 = ["h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)"]
|
||||
macos_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
mypy = ["mypy (==0.930)", "mypy-zope (==0.3.4)", "types-setuptools", "types-pyopenssl", "towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "python-subunit (>=1.4,<2.0)", "contextvars (>=2.4,<3)", "pydoctor (>=21.9.0,<21.10.0)"]
|
||||
osx_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
|
||||
macos_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
mypy = ["mypy (==0.930)", "mypy-zope (==0.3.4)", "types-setuptools", "types-pyopenssl", "towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pynacl", "pywin32 (!=226)", "python-subunit (>=1.4,<2.0)", "contextvars (>=2.4,<3)", "pydoctor (>=21.9.0,<21.10.0)"]
|
||||
osx_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
|
||||
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)"]
|
||||
tls = ["pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)"]
|
||||
windows_platform = ["pywin32 (!=226)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
windows_platform = ["pywin32 (!=226)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
|
||||
|
||||
[[package]]
|
||||
name = "twisted-iocpsupport"
|
||||
@@ -2225,8 +2226,8 @@ platformdirs = [
|
||||
{file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"},
|
||||
]
|
||||
prometheus-client = [
|
||||
{file = "prometheus_client-0.13.1-py3-none-any.whl", hash = "sha256:357a447fd2359b0a1d2e9b311a0c5778c330cfbe186d880ad5a6b39884652316"},
|
||||
{file = "prometheus_client-0.13.1.tar.gz", hash = "sha256:ada41b891b79fca5638bd5cfe149efa86512eaa55987893becd2c6d8d0a5dfc5"},
|
||||
{file = "prometheus_client-0.14.0-py3-none-any.whl", hash = "sha256:f4aba3fdd1735852049f537c1f0ab177159b7ab76f271ecc4d2f45aa2a1d01f2"},
|
||||
{file = "prometheus_client-0.14.0.tar.gz", hash = "sha256:8f7a922dd5455ad524b6ba212ce8eb2b4b05e073f4ec7218287f88b1cac34750"},
|
||||
]
|
||||
psycopg2 = [
|
||||
{file = "psycopg2-2.9.3-cp310-cp310-win32.whl", hash = "sha256:083707a696e5e1c330af2508d8fab36f9700b26621ccbcb538abe22e15485362"},
|
||||
@@ -2592,8 +2593,8 @@ twine = [
|
||||
{file = "twine-3.8.0.tar.gz", hash = "sha256:8efa52658e0ae770686a13b675569328f1fba9837e5de1867bfe5f46a9aefe19"},
|
||||
]
|
||||
twisted = [
|
||||
{file = "Twisted-22.2.0-py3-none-any.whl", hash = "sha256:5c63c149eb6b8fe1e32a0215b1cef96fabdba04f705d8efb9174b1ccf5b49d49"},
|
||||
{file = "Twisted-22.2.0.tar.gz", hash = "sha256:57f32b1f6838facb8c004c89467840367ad38e9e535f8252091345dba500b4f2"},
|
||||
{file = "Twisted-22.4.0-py3-none-any.whl", hash = "sha256:f9f7a91f94932477a9fc3b169d57f54f96c6e74a23d78d9ce54039a7f48928a2"},
|
||||
{file = "Twisted-22.4.0.tar.gz", hash = "sha256:a047990f57dfae1e0bd2b7df2526d4f16dcdc843774dc108b78c52f2a5f13680"},
|
||||
]
|
||||
twisted-iocpsupport = [
|
||||
{file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"},
|
||||
|
||||
+14
-2
@@ -79,8 +79,20 @@ else
|
||||
# If we were not asked to lint changed files, and no paths were found as a result,
|
||||
# then lint everything!
|
||||
if [[ -z ${files+x} ]]; then
|
||||
# Lint all source code files and directories
|
||||
files=( "." )
|
||||
# CI runs each linter on the entire checkout, e.g. `black .`. So don't
|
||||
# rely on this list to *find* lint targets if that misses a file; instead;
|
||||
# use it to exclude files from linters when this can't be done by config.
|
||||
#
|
||||
# To check which files the linters examine, use:
|
||||
# black --verbose . 2>&1 | \grep -v ignored
|
||||
# isort --show-files .
|
||||
# flake8 --verbose . # This isn't a great option
|
||||
# mypy has explicit config in mypy.ini; there is also mypy --verbose
|
||||
files=(
|
||||
"synapse" "docker" "tests"
|
||||
"scripts-dev"
|
||||
"contrib" "setup.py" "synmark" "stubs" ".ci"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -69,11 +69,12 @@ def cli():
|
||||
# ... wait for assets to build ...
|
||||
|
||||
./scripts-dev/release.py publish
|
||||
|
||||
./scripts-dev/release.py upload
|
||||
|
||||
# Optional: generate some nice links for the announcement
|
||||
|
||||
./scripts-dev/release.py upload
|
||||
./scripts-dev/release.py announce
|
||||
|
||||
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
|
||||
`tag`/`publish` command, then a new draft release will be created/published.
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
[check-manifest]
|
||||
ignore =
|
||||
.git-blame-ignore-revs
|
||||
contrib
|
||||
contrib/*
|
||||
docs/*
|
||||
pylint.cfg
|
||||
tox.ini
|
||||
|
||||
@@ -1,183 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014-2017 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
from setuptools import Command, find_packages, setup
|
||||
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
# Some notes on `setup.py test`:
|
||||
#
|
||||
# Once upon a time we used to try to make `setup.py test` run `tox` to run the
|
||||
# tests. That's a bad idea for three reasons:
|
||||
#
|
||||
# 1: `setup.py test` is supposed to find out whether the tests work in the
|
||||
# *current* environmentt, not whatever tox sets up.
|
||||
# 2: Empirically, trying to install tox during the test run wasn't working ("No
|
||||
# module named virtualenv").
|
||||
# 3: The tox documentation advises against it[1].
|
||||
#
|
||||
# Even further back in time, we used to use setuptools_trial [2]. That has its
|
||||
# own set of issues: for instance, it requires installation of Twisted to build
|
||||
# an sdist (because the recommended mode of usage is to add it to
|
||||
# `setup_requires`). That in turn means that in order to successfully run tox
|
||||
# you have to have the python header files installed for whichever version of
|
||||
# python tox uses (which is python3 on recent ubuntus, for example).
|
||||
#
|
||||
# So, for now at least, we stick with what appears to be the convention among
|
||||
# Twisted projects, and don't attempt to do anything when someone runs
|
||||
# `setup.py test`; instead we direct people to run `trial` directly if they
|
||||
# care.
|
||||
#
|
||||
# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
|
||||
# [2]: https://pypi.python.org/pypi/setuptools_trial
|
||||
class TestCommand(Command):
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
print(
|
||||
"""Synapse's tests cannot be run via setup.py. To run them, try:
|
||||
PYTHONPATH="." trial tests
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def read_file(path_segments):
|
||||
"""Read a file from the package. Takes a list of strings to join to
|
||||
make the path"""
|
||||
file_path = os.path.join(here, *path_segments)
|
||||
with open(file_path) as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
def exec_file(path_segments):
|
||||
"""Execute a single python file to get the variables defined in it"""
|
||||
result: Dict[str, Any] = {}
|
||||
code = read_file(path_segments)
|
||||
exec(code, result)
|
||||
return result
|
||||
|
||||
|
||||
version = exec_file(("synapse", "__init__.py"))["__version__"]
|
||||
dependencies = exec_file(("synapse", "python_dependencies.py"))
|
||||
long_description = read_file(("README.rst",))
|
||||
|
||||
REQUIREMENTS = dependencies["REQUIREMENTS"]
|
||||
CONDITIONAL_REQUIREMENTS = dependencies["CONDITIONAL_REQUIREMENTS"]
|
||||
ALL_OPTIONAL_REQUIREMENTS = dependencies["ALL_OPTIONAL_REQUIREMENTS"]
|
||||
|
||||
# Make `pip install matrix-synapse[all]` install all the optional dependencies.
|
||||
CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
|
||||
|
||||
# Developer dependencies should not get included in "all".
|
||||
#
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
CONDITIONAL_REQUIREMENTS["lint"] = [
|
||||
"isort==5.7.0",
|
||||
"black==22.3.0",
|
||||
"flake8-comprehensions",
|
||||
"flake8-bugbear==21.3.2",
|
||||
"flake8",
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["mypy"] = [
|
||||
"mypy==0.931",
|
||||
"mypy-zope==0.3.5",
|
||||
"types-bleach>=4.1.0",
|
||||
"types-jsonschema>=3.2.0",
|
||||
"types-opentracing>=2.4.2",
|
||||
"types-Pillow>=8.3.4",
|
||||
"types-psycopg2>=2.9.9",
|
||||
"types-pyOpenSSL>=20.0.7",
|
||||
"types-PyYAML>=5.4.10",
|
||||
"types-requests>=2.26.0",
|
||||
"types-setuptools>=57.4.0",
|
||||
]
|
||||
|
||||
# Dependencies which are exclusively required by unit test code. This is
|
||||
# NOT a list of all modules that are necessary to run the unit tests.
|
||||
# Tests assume that all optional dependencies are installed.
|
||||
#
|
||||
# parameterized_class decorator was introduced in parameterized 0.7.0
|
||||
CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0", "idna>=2.5"]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS["dev"] = (
|
||||
CONDITIONAL_REQUIREMENTS["lint"]
|
||||
+ CONDITIONAL_REQUIREMENTS["mypy"]
|
||||
+ CONDITIONAL_REQUIREMENTS["test"]
|
||||
+ [
|
||||
# The following are used by the release script
|
||||
"click==8.1.0",
|
||||
"redbaron==0.9.2",
|
||||
"GitPython==3.1.14",
|
||||
"commonmark==0.9.1",
|
||||
"pygithub==1.55",
|
||||
# The following are executed as commands by the release script.
|
||||
"twine",
|
||||
"towncrier",
|
||||
]
|
||||
)
|
||||
|
||||
setup(
|
||||
name="matrix-synapse",
|
||||
version=version,
|
||||
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||
description="Reference homeserver for the Matrix decentralised comms protocol",
|
||||
install_requires=REQUIREMENTS,
|
||||
extras_require=CONDITIONAL_REQUIREMENTS,
|
||||
include_package_data=True,
|
||||
zip_safe=False,
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/x-rst",
|
||||
python_requires="~=3.7",
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
# Application
|
||||
"synapse_homeserver = synapse.app.homeserver:main",
|
||||
"synapse_worker = synapse.app.generic_worker:main",
|
||||
"synctl = synapse._scripts.synctl:main",
|
||||
# Scripts
|
||||
"export_signing_key = synapse._scripts.export_signing_key:main",
|
||||
"generate_config = synapse._scripts.generate_config:main",
|
||||
"generate_log_config = synapse._scripts.generate_log_config:main",
|
||||
"generate_signing_key = synapse._scripts.generate_signing_key:main",
|
||||
"hash_password = synapse._scripts.hash_password:main",
|
||||
"register_new_matrix_user = synapse._scripts.register_new_matrix_user:main",
|
||||
"synapse_port_db = synapse._scripts.synapse_port_db:main",
|
||||
"synapse_review_recent_signups = synapse._scripts.review_recent_signups:main",
|
||||
"update_synapse_database = synapse._scripts.update_synapse_database:main",
|
||||
]
|
||||
},
|
||||
classifiers=[
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Topic :: Communications :: Chat",
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
],
|
||||
cmdclass={"test": TestCommand},
|
||||
)
|
||||
+3
-1
@@ -20,6 +20,8 @@ import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
from matrix_common.versionstring import get_distribution_version_string
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 7):
|
||||
print("Synapse requires Python 3.7 or above.")
|
||||
@@ -68,7 +70,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.57.0rc1"
|
||||
__version__ = get_distribution_version_string("matrix-synapse")
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -179,8 +179,6 @@ class RelationTypes:
|
||||
REPLACE: Final = "m.replace"
|
||||
REFERENCE: Final = "m.reference"
|
||||
THREAD: Final = "m.thread"
|
||||
# TODO Remove this in Synapse >= v1.57.0.
|
||||
UNSTABLE_THREAD: Final = "io.element.thread"
|
||||
|
||||
|
||||
class LimitBlockingTypes:
|
||||
|
||||
@@ -89,9 +89,7 @@ ROOM_EVENT_FILTER_SCHEMA = {
|
||||
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
|
||||
# MSC3440, filtering by event relations.
|
||||
"related_by_senders": {"type": "array", "items": {"type": "string"}},
|
||||
"io.element.relation_senders": {"type": "array", "items": {"type": "string"}},
|
||||
"related_by_rel_types": {"type": "array", "items": {"type": "string"}},
|
||||
"io.element.relation_types": {"type": "array", "items": {"type": "string"}},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -323,16 +321,6 @@ class Filter:
|
||||
self.related_by_senders = self.filter_json.get("related_by_senders", None)
|
||||
self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
|
||||
|
||||
# Fallback to the unstable prefix if the stable version is not given.
|
||||
if hs.config.experimental.msc3440_enabled:
|
||||
self.related_by_senders = self.related_by_senders or self.filter_json.get(
|
||||
"io.element.relation_senders", None
|
||||
)
|
||||
self.related_by_rel_types = (
|
||||
self.related_by_rel_types
|
||||
or self.filter_json.get("io.element.relation_types", None)
|
||||
)
|
||||
|
||||
def filters_all_types(self) -> bool:
|
||||
return "*" in self.not_types
|
||||
|
||||
|
||||
@@ -26,9 +26,6 @@ class ExperimentalConfig(Config):
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
experimental = config.get("experimental_features") or {}
|
||||
|
||||
# MSC3440 (thread relation)
|
||||
self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False)
|
||||
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
|
||||
|
||||
@@ -77,7 +74,7 @@ class ExperimentalConfig(Config):
|
||||
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
|
||||
|
||||
# The deprecated groups feature.
|
||||
self.groups_enabled: bool = experimental.get("groups_enabled", True)
|
||||
self.groups_enabled: bool = experimental.get("groups_enabled", False)
|
||||
|
||||
# MSC2654: Unread counts
|
||||
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
|
||||
|
||||
@@ -680,14 +680,6 @@ class ServerConfig(Config):
|
||||
config.get("use_account_validity_in_account_status") or False
|
||||
)
|
||||
|
||||
# This is a temporary option that enables fully using the new
|
||||
# `device_lists_changes_in_room` without the backwards compat code. This
|
||||
# is primarily for testing. If enabled the server should *not* be
|
||||
# downgraded, as it may lead to missing device list updates.
|
||||
self.use_new_device_lists_changes_in_room = (
|
||||
config.get("use_new_device_lists_changes_in_room") or False
|
||||
)
|
||||
|
||||
self.rooms_to_exclude_from_sync: List[str] = (
|
||||
config.get("exclude_rooms_from_sync") or []
|
||||
)
|
||||
|
||||
@@ -39,7 +39,6 @@ from . import EventBase
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
|
||||
@@ -396,9 +395,6 @@ class EventClientSerializer:
|
||||
clients.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._msc3440_enabled = hs.config.experimental.msc3440_enabled
|
||||
|
||||
def serialize_event(
|
||||
self,
|
||||
event: Union[JsonDict, EventBase],
|
||||
@@ -525,8 +521,6 @@ class EventClientSerializer:
|
||||
"current_user_participated": thread.current_user_participated,
|
||||
}
|
||||
serialized_aggregations[RelationTypes.THREAD] = thread_summary
|
||||
if self._msc3440_enabled:
|
||||
serialized_aggregations[RelationTypes.UNSTABLE_THREAD] = thread_summary
|
||||
|
||||
# Include the bundled aggregations in the event.
|
||||
if serialized_aggregations:
|
||||
|
||||
@@ -687,8 +687,6 @@ class FederationServer(FederationBase):
|
||||
time_now = self._clock.time_msec()
|
||||
event_json = event.get_pdu_json(time_now)
|
||||
resp = {
|
||||
# TODO Remove the unstable prefix when servers have updated.
|
||||
"org.matrix.msc3083.v2.event": event_json,
|
||||
"event": event_json,
|
||||
"state": [p.get_pdu_json(time_now) for p in state_events],
|
||||
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events],
|
||||
|
||||
@@ -1380,16 +1380,6 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
|
||||
prefix + "auth_chain.item",
|
||||
use_float=True,
|
||||
),
|
||||
# TODO Remove the unstable prefix when servers have updated.
|
||||
#
|
||||
# By re-using the same event dictionary this will cause the parsing of
|
||||
# org.matrix.msc3083.v2.event and event to stomp over each other.
|
||||
# Generally this should be fine.
|
||||
ijson.kvitems_coro(
|
||||
_event_parser(self._response.event_dict),
|
||||
prefix + "org.matrix.msc3083.v2.event",
|
||||
use_float=True,
|
||||
),
|
||||
ijson.kvitems_coro(
|
||||
_event_parser(self._response.event_dict),
|
||||
prefix + "event",
|
||||
|
||||
@@ -291,12 +291,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
# On start up check if there are any updates pending.
|
||||
hs.get_reactor().callWhenRunning(self._handle_new_device_update_async)
|
||||
|
||||
# Used to decide if we calculate outbound pokes up front or not. By
|
||||
# default we do to allow safely downgrading Synapse.
|
||||
self.use_new_device_lists_changes_in_room = (
|
||||
hs.config.server.use_new_device_lists_changes_in_room
|
||||
)
|
||||
|
||||
def _check_device_name_length(self, name: Optional[str]) -> None:
|
||||
"""
|
||||
Checks whether a device name is longer than the maximum allowed length.
|
||||
@@ -490,23 +484,9 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
room_ids = await self.store.get_rooms_for_user(user_id)
|
||||
|
||||
hosts: Optional[Set[str]] = None
|
||||
if not self.use_new_device_lists_changes_in_room:
|
||||
hosts = set()
|
||||
|
||||
if self.hs.is_mine_id(user_id):
|
||||
for room_id in room_ids:
|
||||
joined_users = await self.store.get_users_in_room(room_id)
|
||||
hosts.update(get_domain_from_id(u) for u in joined_users)
|
||||
|
||||
set_tag("target_hosts", hosts)
|
||||
|
||||
hosts.discard(self.server_name)
|
||||
|
||||
position = await self.store.add_device_change_to_streams(
|
||||
user_id,
|
||||
device_ids,
|
||||
hosts=hosts,
|
||||
room_ids=room_ids,
|
||||
)
|
||||
|
||||
@@ -528,14 +508,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
# We may need to do some processing asynchronously.
|
||||
self._handle_new_device_update_async()
|
||||
|
||||
if hosts:
|
||||
logger.info(
|
||||
"Sending device list update notif for %r to: %r", user_id, hosts
|
||||
)
|
||||
for host in hosts:
|
||||
self.federation_sender.send_device_messages(host, immediate=False)
|
||||
log_kv({"message": "sent device update to host", "host": host})
|
||||
|
||||
async def notify_user_signature_update(
|
||||
self, from_user_id: str, user_ids: List[str]
|
||||
) -> None:
|
||||
@@ -677,9 +649,13 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
return
|
||||
|
||||
for user_id, device_id, room_id, stream_id, opentracing_context in rows:
|
||||
joined_user_ids = await self.store.get_users_in_room(room_id)
|
||||
hosts = {get_domain_from_id(u) for u in joined_user_ids}
|
||||
hosts.discard(self.server_name)
|
||||
hosts = set()
|
||||
|
||||
# Ignore any users that aren't ours
|
||||
if self.hs.is_mine_id(user_id):
|
||||
joined_user_ids = await self.store.get_users_in_room(room_id)
|
||||
hosts = {get_domain_from_id(u) for u in joined_user_ids}
|
||||
hosts.discard(self.server_name)
|
||||
|
||||
# Check if we've already sent this update to some hosts
|
||||
if current_stream_id == stream_id:
|
||||
|
||||
@@ -16,7 +16,7 @@ import logging
|
||||
import random
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional
|
||||
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState
|
||||
from synapse.api.errors import AuthError, SynapseError
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
@@ -67,7 +67,9 @@ class EventStreamHandler:
|
||||
presence_handler = self.hs.get_presence_handler()
|
||||
|
||||
context = await presence_handler.user_syncing(
|
||||
auth_user_id, affect_presence=affect_presence
|
||||
auth_user_id,
|
||||
affect_presence=affect_presence,
|
||||
presence_state=PresenceState.ONLINE,
|
||||
)
|
||||
with context:
|
||||
if timeout:
|
||||
|
||||
@@ -466,6 +466,8 @@ class FederationHandler:
|
||||
)
|
||||
|
||||
if ret.partial_state:
|
||||
# TODO(faster_joins): roll this back if we don't manage to start the
|
||||
# background resync (eg process_remote_join fails)
|
||||
await self.store.store_partial_state_room(room_id, ret.servers_in_room)
|
||||
|
||||
max_stream_id = await self._federation_event_handler.process_remote_join(
|
||||
@@ -478,6 +480,18 @@ class FederationHandler:
|
||||
partial_state=ret.partial_state,
|
||||
)
|
||||
|
||||
if ret.partial_state:
|
||||
# Kick off the process of asynchronously fetching the state for this
|
||||
# room.
|
||||
#
|
||||
# TODO(faster_joins): pick this up again on restart
|
||||
run_as_background_process(
|
||||
desc="sync_partial_state_room",
|
||||
func=self._sync_partial_state_room,
|
||||
destination=origin,
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
# We wait here until this instance has seen the events come down
|
||||
# replication (if we're using replication) as the below uses caches.
|
||||
await self._replication.wait_for_stream_position(
|
||||
@@ -1370,3 +1384,64 @@ class FederationHandler:
|
||||
# We fell off the bottom, couldn't get the complexity from anyone. Oh
|
||||
# well.
|
||||
return None
|
||||
|
||||
async def _sync_partial_state_room(
|
||||
self,
|
||||
destination: str,
|
||||
room_id: str,
|
||||
) -> None:
|
||||
"""Background process to resync the state of a partial-state room
|
||||
|
||||
Args:
|
||||
destination: homeserver to pull the state from
|
||||
room_id: room to be resynced
|
||||
"""
|
||||
|
||||
# TODO(faster_joins): do we need to lock to avoid races? What happens if other
|
||||
# worker processes kick off a resync in parallel? Perhaps we should just elect
|
||||
# a single worker to do the resync.
|
||||
#
|
||||
# TODO(faster_joins): what happens if we leave the room during a resync? if we
|
||||
# really leave, that might mean we have difficulty getting the room state over
|
||||
# federation.
|
||||
#
|
||||
# TODO(faster_joins): try other destinations if the one we have fails
|
||||
|
||||
logger.info("Syncing state for room %s via %s", room_id, destination)
|
||||
|
||||
# we work through the queue in order of increasing stream ordering.
|
||||
while True:
|
||||
batch = await self.store.get_partial_state_events_batch(room_id)
|
||||
if not batch:
|
||||
# all the events are updated, so we can update current state and
|
||||
# clear the lazy-loading flag.
|
||||
logger.info("Updating current state for %s", room_id)
|
||||
assert (
|
||||
self.storage.persistence is not None
|
||||
), "TODO(faster_joins): support for workers"
|
||||
await self.storage.persistence.update_current_state(room_id)
|
||||
|
||||
logger.info("Clearing partial-state flag for %s", room_id)
|
||||
success = await self.store.clear_partial_state_room(room_id)
|
||||
if success:
|
||||
logger.info("State resync complete for %s", room_id)
|
||||
|
||||
# TODO(faster_joins) update room stats and user directory?
|
||||
return
|
||||
|
||||
# we raced against more events arriving with partial state. Go round
|
||||
# the loop again. We've already logged a warning, so no need for more.
|
||||
# TODO(faster_joins): there is still a race here, whereby incoming events which raced
|
||||
# with us will fail to be persisted after the call to `clear_partial_state_room` due to
|
||||
# having partial state.
|
||||
continue
|
||||
|
||||
events = await self.store.get_events_as_list(
|
||||
batch,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
allow_rejected=True,
|
||||
)
|
||||
for event in events:
|
||||
await self._federation_event_handler.update_state_for_partial_state_event(
|
||||
destination, event
|
||||
)
|
||||
|
||||
@@ -477,6 +477,45 @@ class FederationEventHandler:
|
||||
|
||||
return await self.persist_events_and_notify(room_id, [(event, context)])
|
||||
|
||||
async def update_state_for_partial_state_event(
|
||||
self, destination: str, event: EventBase
|
||||
) -> None:
|
||||
"""Recalculate the state at an event as part of a de-partial-stating process
|
||||
|
||||
Args:
|
||||
destination: server to request full state from
|
||||
event: partial-state event to be de-partial-stated
|
||||
"""
|
||||
logger.info("Updating state for %s", event.event_id)
|
||||
with nested_logging_context(suffix=event.event_id):
|
||||
# if we have all the event's prev_events, then we can work out the
|
||||
# state based on their states. Otherwise, we request it from the destination
|
||||
# server.
|
||||
#
|
||||
# This is the same operation as we do when we receive a regular event
|
||||
# over federation.
|
||||
state = await self._resolve_state_at_missing_prevs(destination, event)
|
||||
|
||||
# build a new state group for it if need be
|
||||
context = await self._state_handler.compute_event_context(
|
||||
event,
|
||||
old_state=state,
|
||||
)
|
||||
if context.partial_state:
|
||||
# this can happen if some or all of the event's prev_events still have
|
||||
# partial state - ie, an event has an earlier stream_ordering than one
|
||||
# or more of its prev_events, so we de-partial-state it before its
|
||||
# prev_events.
|
||||
#
|
||||
# TODO(faster_joins): we probably need to be more intelligent, and
|
||||
# exclude partial-state prev_events from consideration
|
||||
logger.warning(
|
||||
"%s still has partial state: can't de-partial-state it yet",
|
||||
event.event_id,
|
||||
)
|
||||
return
|
||||
await self._store.update_state_for_partial_state_event(event, context)
|
||||
|
||||
async def backfill(
|
||||
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
|
||||
) -> None:
|
||||
|
||||
@@ -175,17 +175,13 @@ class MessageHandler:
|
||||
state_filter = state_filter or StateFilter.all()
|
||||
|
||||
if at_token:
|
||||
# FIXME this claims to get the state at a stream position, but
|
||||
# get_recent_events_for_room operates by topo ordering. This therefore
|
||||
# does not reliably give you the state at the given stream position.
|
||||
# (https://github.com/matrix-org/synapse/issues/3305)
|
||||
last_events, _ = await self.store.get_recent_events_for_room(
|
||||
room_id, end_token=at_token.room_key, limit=1
|
||||
last_event = await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=at_token.room_key,
|
||||
)
|
||||
|
||||
if not last_events:
|
||||
if not last_event:
|
||||
raise NotFoundError("Can't find event for token %s" % (at_token,))
|
||||
last_event = last_events[0]
|
||||
|
||||
# check whether the user is in the room at that time to determine
|
||||
# whether they should be treated as peeking.
|
||||
@@ -204,7 +200,7 @@ class MessageHandler:
|
||||
visible_events = await filter_events_for_client(
|
||||
self.storage,
|
||||
user_id,
|
||||
last_events,
|
||||
[last_event],
|
||||
filter_send_to_client=False,
|
||||
is_peeking=is_peeking,
|
||||
)
|
||||
@@ -1102,10 +1098,7 @@ class EventCreationHandler:
|
||||
raise SynapseError(400, "Can't send same reaction twice")
|
||||
|
||||
# Don't attempt to start a thread if the parent event is a relation.
|
||||
elif (
|
||||
relation_type == RelationTypes.THREAD
|
||||
or relation_type == RelationTypes.UNSTABLE_THREAD
|
||||
):
|
||||
elif relation_type == RelationTypes.THREAD:
|
||||
if await self.store.event_includes_relation(relates_to):
|
||||
raise SynapseError(
|
||||
400, "Cannot start threads from an event with a relation"
|
||||
|
||||
@@ -151,7 +151,7 @@ class BasePresenceHandler(abc.ABC):
|
||||
|
||||
@abc.abstractmethod
|
||||
async def user_syncing(
|
||||
self, user_id: str, affect_presence: bool
|
||||
self, user_id: str, affect_presence: bool, presence_state: str
|
||||
) -> ContextManager[None]:
|
||||
"""Returns a context manager that should surround any stream requests
|
||||
from the user.
|
||||
@@ -165,6 +165,7 @@ class BasePresenceHandler(abc.ABC):
|
||||
affect_presence: If false this function will be a no-op.
|
||||
Useful for streams that are not associated with an actual
|
||||
client that is being used by a user.
|
||||
presence_state: The presence state indicated in the sync request
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
@@ -228,6 +229,11 @@ class BasePresenceHandler(abc.ABC):
|
||||
|
||||
return states
|
||||
|
||||
async def current_state_for_user(self, user_id: str) -> UserPresenceState:
|
||||
"""Get the current presence state for a user."""
|
||||
res = await self.current_state_for_users([user_id])
|
||||
return res[user_id]
|
||||
|
||||
@abc.abstractmethod
|
||||
async def set_state(
|
||||
self,
|
||||
@@ -461,7 +467,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
self.send_user_sync(user_id, False, last_sync_ms)
|
||||
|
||||
async def user_syncing(
|
||||
self, user_id: str, affect_presence: bool
|
||||
self, user_id: str, affect_presence: bool, presence_state: str
|
||||
) -> ContextManager[None]:
|
||||
"""Record that a user is syncing.
|
||||
|
||||
@@ -471,6 +477,17 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
if not affect_presence or not self._presence_enabled:
|
||||
return _NullContextManager()
|
||||
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
if prev_state != PresenceState.BUSY:
|
||||
# We set state here but pass ignore_status_msg = True as we don't want to
|
||||
# cause the status message to be cleared.
|
||||
# Note that this causes last_active_ts to be incremented which is not
|
||||
# what the spec wants: see comment in the BasePresenceHandler version
|
||||
# of this function.
|
||||
await self.set_state(
|
||||
UserID.from_string(user_id), {"presence": presence_state}, True
|
||||
)
|
||||
|
||||
curr_sync = self._user_to_num_current_syncs.get(user_id, 0)
|
||||
self._user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
|
||||
@@ -942,7 +959,10 @@ class PresenceHandler(BasePresenceHandler):
|
||||
await self._update_states([prev_state.copy_and_replace(**new_fields)])
|
||||
|
||||
async def user_syncing(
|
||||
self, user_id: str, affect_presence: bool = True
|
||||
self,
|
||||
user_id: str,
|
||||
affect_presence: bool = True,
|
||||
presence_state: str = PresenceState.ONLINE,
|
||||
) -> ContextManager[None]:
|
||||
"""Returns a context manager that should surround any stream requests
|
||||
from the user.
|
||||
@@ -956,6 +976,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
affect_presence: If false this function will be a no-op.
|
||||
Useful for streams that are not associated with an actual
|
||||
client that is being used by a user.
|
||||
presence_state: The presence state indicated in the sync request
|
||||
"""
|
||||
# Override if it should affect the user's presence, if presence is
|
||||
# disabled.
|
||||
@@ -967,9 +988,25 @@ class PresenceHandler(BasePresenceHandler):
|
||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
|
||||
# If they're busy then they don't stop being busy just by syncing,
|
||||
# so just update the last sync time.
|
||||
if prev_state.state != PresenceState.BUSY:
|
||||
# XXX: We set_state separately here and just update the last_active_ts above
|
||||
# This keeps the logic as similar as possible between the worker and single
|
||||
# process modes. Using set_state will actually cause last_active_ts to be
|
||||
# updated always, which is not what the spec calls for, but synapse has done
|
||||
# this for... forever, I think.
|
||||
await self.set_state(
|
||||
UserID.from_string(user_id), {"presence": presence_state}, True
|
||||
)
|
||||
# Retrieve the new state for the logic below. This should come from the
|
||||
# in-memory cache.
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
|
||||
# To keep the single process behaviour consistent with worker mode, run the
|
||||
# same logic as `update_external_syncs_row`, even though it looks weird.
|
||||
if prev_state.state == PresenceState.OFFLINE:
|
||||
# If they're currently offline then bring them online, otherwise
|
||||
# just update the last sync times.
|
||||
await self._update_states(
|
||||
[
|
||||
prev_state.copy_and_replace(
|
||||
@@ -979,6 +1016,10 @@ class PresenceHandler(BasePresenceHandler):
|
||||
)
|
||||
]
|
||||
)
|
||||
# otherwise, set the new presence state & update the last sync time,
|
||||
# but don't update last_active_ts as this isn't an indication that
|
||||
# they've been active (even though it's probably been updated by
|
||||
# set_state above)
|
||||
else:
|
||||
await self._update_states(
|
||||
[
|
||||
@@ -1086,11 +1127,6 @@ class PresenceHandler(BasePresenceHandler):
|
||||
)
|
||||
self.external_process_last_updated_ms.pop(process_id, None)
|
||||
|
||||
async def current_state_for_user(self, user_id: str) -> UserPresenceState:
|
||||
"""Get the current presence state for a user."""
|
||||
res = await self.current_state_for_users([user_id])
|
||||
return res[user_id]
|
||||
|
||||
async def _persist_and_notify(self, states: List[UserPresenceState]) -> None:
|
||||
"""Persist states in the database, poke the notifier and send to
|
||||
interested remote servers
|
||||
|
||||
@@ -661,16 +661,15 @@ class SyncHandler:
|
||||
stream_position: point at which to get state
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
"""
|
||||
# FIXME this claims to get the state at a stream position, but
|
||||
# get_recent_events_for_room operates by topo ordering. This therefore
|
||||
# does not reliably give you the state at the given stream position.
|
||||
# (https://github.com/matrix-org/synapse/issues/3305)
|
||||
last_events, _ = await self.store.get_recent_events_for_room(
|
||||
room_id, end_token=stream_position.room_key, limit=1
|
||||
# FIXME: This gets the state at the latest event before the stream ordering,
|
||||
# which might not be the same as the "current state" of the room at the time
|
||||
# of the stream token if there were multiple forward extremities at the time.
|
||||
last_event = await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=stream_position.room_key,
|
||||
)
|
||||
|
||||
if last_events:
|
||||
last_event = last_events[-1]
|
||||
if last_event:
|
||||
state = await self.get_state_after_event(
|
||||
last_event, state_filter=state_filter or StateFilter.all()
|
||||
)
|
||||
|
||||
@@ -1,151 +0,0 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
from typing import Set
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# REQUIREMENTS is a simple list of requirement specifiers[1], and must be
|
||||
# installed. It is passed to setup() as install_requires in setup.py.
|
||||
#
|
||||
# CONDITIONAL_REQUIREMENTS is the optional dependencies, represented as a dict
|
||||
# of lists. The dict key is the optional dependency name and can be passed to
|
||||
# pip when installing. The list is a series of requirement specifiers[1] to be
|
||||
# installed when that optional dependency requirement is specified. It is passed
|
||||
# to setup() as extras_require in setup.py
|
||||
#
|
||||
# Note that these both represent runtime dependencies (and the versions
|
||||
# installed are checked at runtime).
|
||||
#
|
||||
# Also note that we replicate these constraints in the Synapse Dockerfile while
|
||||
# pre-installing dependencies. If these constraints are updated here, the same
|
||||
# change should be made in the Dockerfile.
|
||||
#
|
||||
# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
|
||||
|
||||
REQUIREMENTS = [
|
||||
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
|
||||
"jsonschema>=3.0.0",
|
||||
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
|
||||
"frozendict>=1,!=2.1.2",
|
||||
"unpaddedbase64>=1.1.0",
|
||||
"canonicaljson>=1.4.0",
|
||||
# we use the type definitions added in signedjson 1.1.
|
||||
"signedjson>=1.1.0",
|
||||
"pynacl>=1.2.1",
|
||||
# validating SSL certs for IP addresses requires service_identity 18.1.
|
||||
"service_identity>=18.1.0",
|
||||
# Twisted 18.9 introduces some logger improvements that the structured
|
||||
# logger utilises
|
||||
"Twisted[tls]>=18.9.0",
|
||||
"treq>=15.1",
|
||||
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
|
||||
"pyopenssl>=16.0.0",
|
||||
"pyyaml>=3.11",
|
||||
"pyasn1>=0.1.9",
|
||||
"pyasn1-modules>=0.0.7",
|
||||
"bcrypt>=3.1.0",
|
||||
"pillow>=5.4.0",
|
||||
"sortedcontainers>=1.4.4",
|
||||
"pymacaroons>=0.13.0",
|
||||
"msgpack>=0.5.2",
|
||||
"phonenumbers>=8.2.0",
|
||||
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
|
||||
"prometheus_client>=0.4.0",
|
||||
# we use `order`, which arrived in attrs 19.2.0.
|
||||
# Note: 21.1.0 broke `/sync`, see #9936
|
||||
"attrs>=19.2.0,!=21.1.0",
|
||||
"netaddr>=0.7.18",
|
||||
# Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
|
||||
# end up with a broken installation, with recent MarkupSafe but old Jinja, we
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
"Jinja2>=3.0",
|
||||
"bleach>=1.4.3",
|
||||
# We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0.
|
||||
"typing-extensions>=3.10.0",
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
"cryptography>=3.4.7",
|
||||
# ijson 3.1.4 fixes a bug with "." in property names
|
||||
"ijson>=3.1.4",
|
||||
"matrix-common~=1.1.0",
|
||||
# We need packaging.requirements.Requirement, added in 16.1.
|
||||
"packaging>=16.1",
|
||||
# At the time of writing, we only use functions from the version `importlib.metadata`
|
||||
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
|
||||
"importlib_metadata>=1.4 ; python_version < '3.8'",
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS = {
|
||||
"matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
|
||||
"postgres": [
|
||||
# we use execute_values with the fetch param, which arrived in psycopg 2.8.
|
||||
"psycopg2>=2.8 ; platform_python_implementation != 'PyPy'",
|
||||
"psycopg2cffi>=2.8 ; platform_python_implementation == 'PyPy'",
|
||||
"psycopg2cffi-compat==1.1 ; platform_python_implementation == 'PyPy'",
|
||||
],
|
||||
"saml2": [
|
||||
"pysaml2>=4.5.0",
|
||||
],
|
||||
"oidc": ["authlib>=0.14.0"],
|
||||
# systemd-python is necessary for logging to the systemd journal via
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
"systemd": ["systemd-python>=231"],
|
||||
"url_preview": ["lxml>=4.2.0"],
|
||||
"sentry": ["sentry-sdk>=0.7.2"],
|
||||
"opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"],
|
||||
"jwt": ["pyjwt>=1.6.4"],
|
||||
# hiredis is not a *strict* dependency, but it makes things much faster.
|
||||
# (if it is not installed, we fall back to slow code.)
|
||||
"redis": ["txredisapi>=1.4.7", "hiredis"],
|
||||
# Required to use experimental `caches.track_memory_usage` config option.
|
||||
"cache_memory": ["pympler"],
|
||||
}
|
||||
|
||||
ALL_OPTIONAL_REQUIREMENTS: Set[str] = set()
|
||||
|
||||
for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
|
||||
# Exclude systemd as it's a system-based requirement.
|
||||
# Exclude lint as it's a dev-based requirement.
|
||||
if name not in ["systemd"]:
|
||||
ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
|
||||
|
||||
|
||||
# ensure there are no double-quote characters in any of the deps (otherwise the
|
||||
# 'pip install' incantation in DependencyException will break)
|
||||
for dep in itertools.chain(
|
||||
REQUIREMENTS,
|
||||
*CONDITIONAL_REQUIREMENTS.values(),
|
||||
):
|
||||
if '"' in dep:
|
||||
raise Exception(
|
||||
"Dependency `%s` contains double-quote; use single-quotes instead" % (dep,)
|
||||
)
|
||||
|
||||
|
||||
def list_requirements():
|
||||
return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
sys.stdout.writelines(req + "\n" for req in list_requirements())
|
||||
@@ -342,6 +342,15 @@ class LoginRestServlet(RestServlet):
|
||||
user_id = canonical_uid
|
||||
|
||||
device_id = login_submission.get("device_id")
|
||||
|
||||
# If device_id is present, check that device_id is not longer than a reasonable 512 characters
|
||||
if device_id and len(device_id) > 512:
|
||||
raise LoginError(
|
||||
400,
|
||||
"device_id cannot be longer than 512 characters.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
initial_display_name = login_submission.get("initial_device_display_name")
|
||||
(
|
||||
device_id,
|
||||
|
||||
@@ -180,13 +180,10 @@ class SyncRestServlet(RestServlet):
|
||||
|
||||
affect_presence = set_presence != PresenceState.OFFLINE
|
||||
|
||||
if affect_presence:
|
||||
await self.presence_handler.set_state(
|
||||
user, {"presence": set_presence}, True
|
||||
)
|
||||
|
||||
context = await self.presence_handler.user_syncing(
|
||||
user.to_string(), affect_presence=affect_presence
|
||||
user.to_string(),
|
||||
affect_presence=affect_presence,
|
||||
presence_state=set_presence,
|
||||
)
|
||||
with context:
|
||||
sync_result = await self.sync_handler.wait_for_sync_for_user(
|
||||
|
||||
@@ -86,7 +86,7 @@ class VersionsRestServlet(RestServlet):
|
||||
# Implements additional endpoints as described in MSC2432
|
||||
"org.matrix.msc2432": True,
|
||||
# Implements additional endpoints as described in MSC2666
|
||||
"uk.half-shot.msc2666": True,
|
||||
"uk.half-shot.msc2666.mutual_rooms": True,
|
||||
# Whether new rooms will be set to encrypted or not (based on presets).
|
||||
"io.element.e2ee_forced.public": self.e2ee_forced_public,
|
||||
"io.element.e2ee_forced.private": self.e2ee_forced_private,
|
||||
@@ -100,7 +100,6 @@ class VersionsRestServlet(RestServlet):
|
||||
# Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030
|
||||
"org.matrix.msc3030": self.config.experimental.msc3030_enabled,
|
||||
# Adds support for thread relations, per MSC3440.
|
||||
"org.matrix.msc3440": self.config.experimental.msc3440_enabled,
|
||||
"org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above
|
||||
},
|
||||
},
|
||||
|
||||
+1
-1
@@ -758,7 +758,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
|
||||
@cache_in_self
|
||||
def get_event_client_serializer(self) -> EventClientSerializer:
|
||||
return EventClientSerializer(self)
|
||||
return EventClientSerializer()
|
||||
|
||||
@cache_in_self
|
||||
def get_password_policy_handler(self) -> PasswordPolicyHandler:
|
||||
|
||||
@@ -1582,7 +1582,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
self,
|
||||
user_id: str,
|
||||
device_ids: Collection[str],
|
||||
hosts: Optional[Collection[str]],
|
||||
room_ids: Collection[str],
|
||||
) -> Optional[int]:
|
||||
"""Persist that a user's devices have been updated, and which hosts
|
||||
@@ -1592,9 +1591,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
user_id: The ID of the user whose device changed.
|
||||
device_ids: The IDs of any changed devices. If empty, this function will
|
||||
return None.
|
||||
hosts: The remote destinations that should be notified of the change. If
|
||||
None then the set of hosts have *not* been calculated, and will be
|
||||
calculated later by a background task.
|
||||
room_ids: The rooms that the user is in
|
||||
|
||||
Returns:
|
||||
@@ -1606,14 +1602,12 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
|
||||
context = get_active_span_text_map()
|
||||
|
||||
def add_device_changes_txn(
|
||||
txn, stream_ids_for_device_change, stream_ids_for_outbound_pokes
|
||||
):
|
||||
def add_device_changes_txn(txn, stream_ids):
|
||||
self._add_device_change_to_stream_txn(
|
||||
txn,
|
||||
user_id,
|
||||
device_ids,
|
||||
stream_ids_for_device_change,
|
||||
stream_ids,
|
||||
)
|
||||
|
||||
self._add_device_outbound_room_poke_txn(
|
||||
@@ -1621,43 +1615,17 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
user_id,
|
||||
device_ids,
|
||||
room_ids,
|
||||
stream_ids_for_device_change,
|
||||
context,
|
||||
hosts_have_been_calculated=hosts is not None,
|
||||
)
|
||||
|
||||
# If the set of hosts to send to has not been calculated yet (and so
|
||||
# `hosts` is None) or there are no `hosts` to send to, then skip
|
||||
# trying to persist them to the DB.
|
||||
if not hosts:
|
||||
return
|
||||
|
||||
self._add_device_outbound_poke_to_stream_txn(
|
||||
txn,
|
||||
user_id,
|
||||
device_ids,
|
||||
hosts,
|
||||
stream_ids_for_outbound_pokes,
|
||||
stream_ids,
|
||||
context,
|
||||
)
|
||||
|
||||
# `device_lists_stream` wants a stream ID per device update.
|
||||
num_stream_ids = len(device_ids)
|
||||
|
||||
if hosts:
|
||||
# `device_lists_outbound_pokes` wants a different stream ID for
|
||||
# each row, which is a row per host per device update.
|
||||
num_stream_ids += len(hosts) * len(device_ids)
|
||||
|
||||
async with self._device_list_id_gen.get_next_mult(num_stream_ids) as stream_ids:
|
||||
stream_ids_for_device_change = stream_ids[: len(device_ids)]
|
||||
stream_ids_for_outbound_pokes = stream_ids[len(device_ids) :]
|
||||
|
||||
async with self._device_list_id_gen.get_next_mult(
|
||||
len(device_ids)
|
||||
) as stream_ids:
|
||||
await self.db_pool.runInteraction(
|
||||
"add_device_change_to_stream",
|
||||
add_device_changes_txn,
|
||||
stream_ids_for_device_change,
|
||||
stream_ids_for_outbound_pokes,
|
||||
stream_ids,
|
||||
)
|
||||
|
||||
return stream_ids[-1]
|
||||
@@ -1735,7 +1703,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
next(stream_id_iterator),
|
||||
user_id,
|
||||
device_id,
|
||||
False,
|
||||
not self.hs.is_mine_id(
|
||||
user_id
|
||||
), # We only need to send out update for *our* users
|
||||
now,
|
||||
encoded_context if whitelisted_homeserver(destination) else "{}",
|
||||
)
|
||||
@@ -1752,19 +1722,8 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
room_ids: Collection[str],
|
||||
stream_ids: List[str],
|
||||
context: Dict[str, str],
|
||||
hosts_have_been_calculated: bool,
|
||||
) -> None:
|
||||
"""Record the user in the room has updated their device.
|
||||
|
||||
Args:
|
||||
hosts_have_been_calculated: True if `device_lists_outbound_pokes`
|
||||
has been updated already with the updates.
|
||||
"""
|
||||
|
||||
# We only need to convert to outbound pokes if they are our user.
|
||||
converted_to_destinations = (
|
||||
hosts_have_been_calculated or not self.hs.is_mine_id(user_id)
|
||||
)
|
||||
"""Record the user in the room has updated their device."""
|
||||
|
||||
encoded_context = json_encoder.encode(context)
|
||||
|
||||
@@ -1789,7 +1748,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
device_id,
|
||||
room_id,
|
||||
stream_id,
|
||||
converted_to_destinations,
|
||||
False,
|
||||
encoded_context,
|
||||
)
|
||||
for room_id in room_ids
|
||||
|
||||
@@ -963,6 +963,21 @@ class PersistEventsStore:
|
||||
values=to_insert,
|
||||
)
|
||||
|
||||
async def update_current_state(
|
||||
self,
|
||||
room_id: str,
|
||||
state_delta: DeltaState,
|
||||
stream_id: int,
|
||||
) -> None:
|
||||
"""Update the current state stored in the datatabase for the given room"""
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"update_current_state",
|
||||
self._update_current_state_txn,
|
||||
state_delta_by_room={room_id: state_delta},
|
||||
stream_id=stream_id,
|
||||
)
|
||||
|
||||
def _update_current_state_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
@@ -1819,10 +1834,7 @@ class PersistEventsStore:
|
||||
if rel_type == RelationTypes.REPLACE:
|
||||
txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,))
|
||||
|
||||
if (
|
||||
rel_type == RelationTypes.THREAD
|
||||
or rel_type == RelationTypes.UNSTABLE_THREAD
|
||||
):
|
||||
if rel_type == RelationTypes.THREAD:
|
||||
txn.call_after(self.store.get_thread_summary.invalidate, (parent_id,))
|
||||
# It should be safe to only invalidate the cache if the user has not
|
||||
# previously participated in the thread, but that's difficult (and
|
||||
|
||||
@@ -1979,3 +1979,27 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
desc="is_partial_state_event",
|
||||
)
|
||||
return result is not None
|
||||
|
||||
async def get_partial_state_events_batch(self, room_id: str) -> List[str]:
|
||||
"""Get a list of events in the given room that have partial state"""
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_partial_state_events_batch",
|
||||
self._get_partial_state_events_batch_txn,
|
||||
room_id,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _get_partial_state_events_batch_txn(
|
||||
txn: LoggingTransaction, room_id: str
|
||||
) -> List[str]:
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT event_id FROM partial_state_events AS pse
|
||||
JOIN events USING (event_id)
|
||||
WHERE pse.room_id = ?
|
||||
ORDER BY events.stream_ordering
|
||||
LIMIT 100
|
||||
""",
|
||||
(room_id,),
|
||||
)
|
||||
return [row[0] for row in txn]
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Collection,
|
||||
Dict,
|
||||
FrozenSet,
|
||||
@@ -32,20 +31,12 @@ import attr
|
||||
from synapse.api.constants import RelationTypes
|
||||
from synapse.events import EventBase
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
make_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause
|
||||
from synapse.storage.databases.main.stream import generate_pagination_where_clause
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.types import JsonDict, RoomStreamToken, StreamToken
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -63,16 +54,6 @@ class _RelatedEvent:
|
||||
|
||||
|
||||
class RelationsWorkerStore(SQLBaseStore):
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self._msc3440_enabled = hs.config.experimental.msc3440_enabled
|
||||
|
||||
@cached(uncached_args=("event",), tree=True)
|
||||
async def get_relations_for_event(
|
||||
self,
|
||||
@@ -497,7 +478,7 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
AND parent.room_id = child.room_id
|
||||
WHERE
|
||||
%s
|
||||
AND %s
|
||||
AND relation_type = ?
|
||||
ORDER BY parent.event_id, child.topological_ordering DESC, child.stream_ordering DESC
|
||||
"""
|
||||
else:
|
||||
@@ -512,22 +493,16 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
AND parent.room_id = child.room_id
|
||||
WHERE
|
||||
%s
|
||||
AND %s
|
||||
AND relation_type = ?
|
||||
ORDER BY child.topological_ordering DESC, child.stream_ordering DESC
|
||||
"""
|
||||
|
||||
clause, args = make_in_list_sql_clause(
|
||||
txn.database_engine, "relates_to_id", event_ids
|
||||
)
|
||||
args.append(RelationTypes.THREAD)
|
||||
|
||||
if self._msc3440_enabled:
|
||||
relations_clause = "(relation_type = ? OR relation_type = ?)"
|
||||
args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
|
||||
else:
|
||||
relations_clause = "relation_type = ?"
|
||||
args.append(RelationTypes.THREAD)
|
||||
|
||||
txn.execute(sql % (clause, relations_clause), args)
|
||||
txn.execute(sql % (clause,), args)
|
||||
latest_event_ids = {}
|
||||
for parent_event_id, child_event_id in txn:
|
||||
# Only consider the latest threaded reply (by topological ordering).
|
||||
@@ -547,7 +522,7 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
AND parent.room_id = child.room_id
|
||||
WHERE
|
||||
%s
|
||||
AND %s
|
||||
AND relation_type = ?
|
||||
GROUP BY parent.event_id
|
||||
"""
|
||||
|
||||
@@ -556,15 +531,9 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
clause, args = make_in_list_sql_clause(
|
||||
txn.database_engine, "relates_to_id", latest_event_ids.keys()
|
||||
)
|
||||
args.append(RelationTypes.THREAD)
|
||||
|
||||
if self._msc3440_enabled:
|
||||
relations_clause = "(relation_type = ? OR relation_type = ?)"
|
||||
args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
|
||||
else:
|
||||
relations_clause = "relation_type = ?"
|
||||
args.append(RelationTypes.THREAD)
|
||||
|
||||
txn.execute(sql % (clause, relations_clause), args)
|
||||
txn.execute(sql % (clause,), args)
|
||||
counts = dict(cast(List[Tuple[str, int]], txn.fetchall()))
|
||||
|
||||
return counts, latest_event_ids
|
||||
@@ -622,7 +591,7 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
parent.event_id = relates_to_id
|
||||
AND parent.room_id = child.room_id
|
||||
WHERE
|
||||
%s
|
||||
relation_type = ?
|
||||
AND %s
|
||||
AND %s
|
||||
GROUP BY parent.event_id, child.sender
|
||||
@@ -638,16 +607,9 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
txn.database_engine, "relates_to_id", event_ids
|
||||
)
|
||||
|
||||
if self._msc3440_enabled:
|
||||
relations_clause = "(relation_type = ? OR relation_type = ?)"
|
||||
relations_args = [RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD]
|
||||
else:
|
||||
relations_clause = "relation_type = ?"
|
||||
relations_args = [RelationTypes.THREAD]
|
||||
|
||||
txn.execute(
|
||||
sql % (users_sql, events_clause, relations_clause),
|
||||
users_args + events_args + relations_args,
|
||||
sql % (users_sql, events_clause),
|
||||
[RelationTypes.THREAD] + users_args + events_args,
|
||||
)
|
||||
return {(row[0], row[1]): row[2] for row in txn}
|
||||
|
||||
@@ -677,7 +639,7 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
user participated in that event's thread, otherwise false.
|
||||
"""
|
||||
|
||||
def _get_thread_summary_txn(txn: LoggingTransaction) -> Set[str]:
|
||||
def _get_threads_participated_txn(txn: LoggingTransaction) -> Set[str]:
|
||||
# Fetch whether the requester has participated or not.
|
||||
sql = """
|
||||
SELECT DISTINCT relates_to_id
|
||||
@@ -688,28 +650,20 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
AND parent.room_id = child.room_id
|
||||
WHERE
|
||||
%s
|
||||
AND %s
|
||||
AND relation_type = ?
|
||||
AND child.sender = ?
|
||||
"""
|
||||
|
||||
clause, args = make_in_list_sql_clause(
|
||||
txn.database_engine, "relates_to_id", event_ids
|
||||
)
|
||||
args.extend([RelationTypes.THREAD, user_id])
|
||||
|
||||
if self._msc3440_enabled:
|
||||
relations_clause = "(relation_type = ? OR relation_type = ?)"
|
||||
args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
|
||||
else:
|
||||
relations_clause = "relation_type = ?"
|
||||
args.append(RelationTypes.THREAD)
|
||||
|
||||
args.append(user_id)
|
||||
|
||||
txn.execute(sql % (clause, relations_clause), args)
|
||||
txn.execute(sql % (clause,), args)
|
||||
return {row[0] for row in txn.fetchall()}
|
||||
|
||||
participated_threads = await self.db_pool.runInteraction(
|
||||
"get_thread_summary", _get_thread_summary_txn
|
||||
"get_threads_participated", _get_threads_participated_txn
|
||||
)
|
||||
|
||||
return {event_id: event_id in participated_threads for event_id in event_ids}
|
||||
|
||||
@@ -1077,6 +1077,37 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||
get_rooms_for_retention_period_in_range_txn,
|
||||
)
|
||||
|
||||
async def clear_partial_state_room(self, room_id: str) -> bool:
|
||||
# this can race with incoming events, so we watch out for FK errors.
|
||||
# TODO(faster_joins): this still doesn't completely fix the race, since the persist process
|
||||
# is not atomic. I fear we need an application-level lock.
|
||||
try:
|
||||
await self.db_pool.runInteraction(
|
||||
"clear_partial_state_room", self._clear_partial_state_room_txn, room_id
|
||||
)
|
||||
return True
|
||||
except self.db_pool.engine.module.DatabaseError as e:
|
||||
# TODO(faster_joins): how do we distinguish between FK errors and other errors?
|
||||
logger.warning(
|
||||
"Exception while clearing lazy partial-state-room %s, retrying: %s",
|
||||
room_id,
|
||||
e,
|
||||
)
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _clear_partial_state_room_txn(txn: LoggingTransaction, room_id: str) -> None:
|
||||
DatabasePool.simple_delete_txn(
|
||||
txn,
|
||||
table="partial_state_rooms_servers",
|
||||
keyvalues={"room_id": room_id},
|
||||
)
|
||||
DatabasePool.simple_delete_one_txn(
|
||||
txn,
|
||||
table="partial_state_rooms",
|
||||
keyvalues={"room_id": room_id},
|
||||
)
|
||||
|
||||
|
||||
class _BackgroundUpdates:
|
||||
REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
|
||||
|
||||
@@ -21,6 +21,7 @@ from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
@@ -354,6 +355,53 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
|
||||
return {row["state_group"] for row in rows}
|
||||
|
||||
async def update_state_for_partial_state_event(
|
||||
self,
|
||||
event: EventBase,
|
||||
context: EventContext,
|
||||
) -> None:
|
||||
"""Update the state group for a partial state event"""
|
||||
await self.db_pool.runInteraction(
|
||||
"update_state_for_partial_state_event",
|
||||
self._update_state_for_partial_state_event_txn,
|
||||
event,
|
||||
context,
|
||||
)
|
||||
|
||||
def _update_state_for_partial_state_event_txn(
|
||||
self,
|
||||
txn,
|
||||
event: EventBase,
|
||||
context: EventContext,
|
||||
):
|
||||
# we shouldn't have any outliers here
|
||||
assert not event.internal_metadata.is_outlier()
|
||||
|
||||
# anything that was rejected should have the same state as its
|
||||
# predecessor.
|
||||
if context.rejected:
|
||||
assert context.state_group == context.state_group_before_event
|
||||
|
||||
self.db_pool.simple_update_txn(
|
||||
txn,
|
||||
table="event_to_state_groups",
|
||||
keyvalues={"event_id": event.event_id},
|
||||
updatevalues={"state_group": context.state_group},
|
||||
)
|
||||
|
||||
self.db_pool.simple_delete_one_txn(
|
||||
txn,
|
||||
table="partial_state_events",
|
||||
keyvalues={"event_id": event.event_id},
|
||||
)
|
||||
|
||||
# TODO(faster_joins): need to do something about workers here
|
||||
txn.call_after(
|
||||
self._get_state_group_for_event.prefill,
|
||||
(event.event_id,),
|
||||
context.state_group,
|
||||
)
|
||||
|
||||
|
||||
class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
|
||||
|
||||
|
||||
@@ -758,6 +758,32 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
"get_room_event_before_stream_ordering", _f
|
||||
)
|
||||
|
||||
async def get_last_event_in_room_before_stream_ordering(
|
||||
self,
|
||||
room_id: str,
|
||||
end_token: RoomStreamToken,
|
||||
) -> Optional[EventBase]:
|
||||
"""Returns the last event in a room at or before a stream ordering
|
||||
|
||||
Args:
|
||||
room_id
|
||||
end_token: The token used to stream from
|
||||
|
||||
Returns:
|
||||
The most recent event.
|
||||
"""
|
||||
|
||||
last_row = await self.get_room_event_before_stream_ordering(
|
||||
room_id=room_id,
|
||||
stream_ordering=end_token.stream,
|
||||
)
|
||||
if last_row:
|
||||
_, _, event_id = last_row
|
||||
event = await self.get_event(event_id, get_prev_content=True)
|
||||
return event
|
||||
|
||||
return None
|
||||
|
||||
async def get_current_room_stream_token_for_room_id(
|
||||
self, room_id: Optional[str] = None
|
||||
) -> RoomStreamToken:
|
||||
|
||||
@@ -376,6 +376,62 @@ class EventsPersistenceStorage:
|
||||
pos = PersistedEventPosition(self._instance_name, event_stream_id)
|
||||
return event, pos, self.main_store.get_room_max_token()
|
||||
|
||||
async def update_current_state(self, room_id: str) -> None:
|
||||
"""Recalculate the current state for a room, and persist it"""
|
||||
state = await self._calculate_current_state(room_id)
|
||||
delta = await self._calculate_state_delta(room_id, state)
|
||||
|
||||
# TODO(faster_joins): get a real stream ordering, to make this work correctly
|
||||
# across workers.
|
||||
#
|
||||
# TODO(faster_joins): this can race against event persistence, in which case we
|
||||
# will end up with incorrect state. Perhaps we should make this a job we
|
||||
# farm out to the event persister, somehow.
|
||||
stream_id = self.main_store.get_room_max_stream_ordering()
|
||||
await self.persist_events_store.update_current_state(room_id, delta, stream_id)
|
||||
|
||||
async def _calculate_current_state(self, room_id: str) -> StateMap[str]:
|
||||
"""Calculate the current state of a room, based on the forward extremities
|
||||
|
||||
Args:
|
||||
room_id: room for which to calculate current state
|
||||
|
||||
Returns:
|
||||
map from (type, state_key) to event id for the current state in the room
|
||||
"""
|
||||
latest_event_ids = await self.main_store.get_latest_event_ids_in_room(room_id)
|
||||
state_groups = set(
|
||||
(
|
||||
await self.main_store._get_state_group_for_events(latest_event_ids)
|
||||
).values()
|
||||
)
|
||||
|
||||
state_maps_by_state_group = await self.state_store._get_state_for_groups(
|
||||
state_groups
|
||||
)
|
||||
|
||||
if len(state_groups) == 1:
|
||||
# If there is only one state group, then we know what the current
|
||||
# state is.
|
||||
return state_maps_by_state_group[state_groups.pop()]
|
||||
|
||||
# Ok, we need to defer to the state handler to resolve our state sets.
|
||||
logger.debug("calling resolve_state_groups from preserve_events")
|
||||
|
||||
# Avoid a circular import.
|
||||
from synapse.state import StateResolutionStore
|
||||
|
||||
room_version = await self.main_store.get_room_version_id(room_id)
|
||||
res = await self._state_resolution_handler.resolve_state_groups(
|
||||
room_id,
|
||||
room_version,
|
||||
state_maps_by_state_group,
|
||||
event_map=None,
|
||||
state_res_store=StateResolutionStore(self.main_store),
|
||||
)
|
||||
|
||||
return res.state
|
||||
|
||||
async def _persist_event_batch(
|
||||
self,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
|
||||
@@ -66,9 +66,9 @@ Changes in SCHEMA_VERSION = 69:
|
||||
|
||||
|
||||
SCHEMA_COMPAT_VERSION = (
|
||||
# we now have `state_key` columns in both `events` and `state_events`, so
|
||||
# now incompatible with synapses wth SCHEMA_VERSION < 66.
|
||||
66
|
||||
# We now assume that `device_lists_changes_in_room` has been filled out for
|
||||
# recent device_list_updates.
|
||||
69
|
||||
)
|
||||
"""Limit on how far the synapse codebase can be rolled back without breaking db compat
|
||||
|
||||
|
||||
@@ -481,9 +481,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
|
||||
# events). This is a bit cheeky, but tests the logic of _check_event_relations.
|
||||
|
||||
# Filter for a particular sender.
|
||||
definition = {
|
||||
"io.element.relation_senders": ["@foo:bar"],
|
||||
}
|
||||
definition = {"related_by_senders": ["@foo:bar"]}
|
||||
|
||||
async def events_have_relations(*args, **kwargs):
|
||||
return ["$with_relation"]
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
from typing import Optional
|
||||
from unittest.mock import Mock
|
||||
|
||||
from parameterized import parameterized_class
|
||||
from signedjson import key, sign
|
||||
from signedjson.types import BaseKey, SigningKey
|
||||
|
||||
@@ -155,12 +154,6 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
|
||||
)
|
||||
|
||||
|
||||
@parameterized_class(
|
||||
[
|
||||
{"enable_room_poke_code_path": False},
|
||||
{"enable_room_poke_code_path": True},
|
||||
]
|
||||
)
|
||||
class FederationSenderDevicesTestCases(HomeserverTestCase):
|
||||
servlets = [
|
||||
admin.register_servlets,
|
||||
@@ -169,13 +162,14 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
|
||||
|
||||
def make_homeserver(self, reactor, clock):
|
||||
return self.setup_test_homeserver(
|
||||
federation_transport_client=Mock(spec=["send_transaction"]),
|
||||
federation_transport_client=Mock(
|
||||
spec=["send_transaction", "query_user_devices"]
|
||||
),
|
||||
)
|
||||
|
||||
def default_config(self):
|
||||
c = super().default_config()
|
||||
c["send_federation"] = True
|
||||
c["use_new_device_lists_changes_in_room"] = self.enable_room_poke_code_path
|
||||
return c
|
||||
|
||||
def prepare(self, reactor, clock, hs):
|
||||
@@ -226,6 +220,45 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
|
||||
self.assertEqual(len(self.edus), 1)
|
||||
self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id)
|
||||
|
||||
def test_dont_send_device_updates_for_remote_users(self):
|
||||
"""Check that we don't send device updates for remote users"""
|
||||
|
||||
# Send the server a device list EDU for the other user, this will cause
|
||||
# it to try and resync the device lists.
|
||||
self.hs.get_federation_transport_client().query_user_devices.return_value = (
|
||||
defer.succeed(
|
||||
{
|
||||
"stream_id": "1",
|
||||
"user_id": "@user2:host2",
|
||||
"devices": [{"device_id": "D1"}],
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.hs.get_device_handler().device_list_updater.incoming_device_list_update(
|
||||
"host2",
|
||||
{
|
||||
"user_id": "@user2:host2",
|
||||
"device_id": "D1",
|
||||
"stream_id": "1",
|
||||
"prev_ids": [],
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
self.reactor.advance(1)
|
||||
|
||||
# We shouldn't see an EDU for that update
|
||||
self.assertEqual(self.edus, [])
|
||||
|
||||
# Check that we did successfully process the inbound EDU (otherwise this
|
||||
# test would pass if we failed to process the EDU)
|
||||
devices = self.get_success(
|
||||
self.hs.get_datastores().main.get_cached_devices_for_user("@user2:host2")
|
||||
)
|
||||
self.assertIn("D1", devices)
|
||||
|
||||
def test_upload_signatures(self):
|
||||
"""Uploading signatures on some devices should produce updates for that user"""
|
||||
|
||||
|
||||
@@ -657,6 +657,85 @@ class PresenceHandlerTestCase(unittest.HomeserverTestCase):
|
||||
# Mark user as online and `status_msg = None`
|
||||
self._set_presencestate_with_status_msg(user_id, PresenceState.ONLINE, None)
|
||||
|
||||
def test_set_presence_from_syncing_not_set(self):
|
||||
"""Test that presence is not set by syncing if affect_presence is false"""
|
||||
user_id = "@test:server"
|
||||
status_msg = "I'm here!"
|
||||
|
||||
self._set_presencestate_with_status_msg(
|
||||
user_id, PresenceState.UNAVAILABLE, status_msg
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.presence_handler.user_syncing(user_id, False, PresenceState.ONLINE)
|
||||
)
|
||||
|
||||
state = self.get_success(
|
||||
self.presence_handler.get_state(UserID.from_string(user_id))
|
||||
)
|
||||
# we should still be unavailable
|
||||
self.assertEqual(state.state, PresenceState.UNAVAILABLE)
|
||||
# and status message should still be the same
|
||||
self.assertEqual(state.status_msg, status_msg)
|
||||
|
||||
def test_set_presence_from_syncing_is_set(self):
|
||||
"""Test that presence is set by syncing if affect_presence is true"""
|
||||
user_id = "@test:server"
|
||||
status_msg = "I'm here!"
|
||||
|
||||
self._set_presencestate_with_status_msg(
|
||||
user_id, PresenceState.UNAVAILABLE, status_msg
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE)
|
||||
)
|
||||
|
||||
state = self.get_success(
|
||||
self.presence_handler.get_state(UserID.from_string(user_id))
|
||||
)
|
||||
# we should now be online
|
||||
self.assertEqual(state.state, PresenceState.ONLINE)
|
||||
|
||||
def test_set_presence_from_syncing_keeps_status(self):
|
||||
"""Test that presence set by syncing retains status message"""
|
||||
user_id = "@test:server"
|
||||
status_msg = "I'm here!"
|
||||
|
||||
self._set_presencestate_with_status_msg(
|
||||
user_id, PresenceState.UNAVAILABLE, status_msg
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE)
|
||||
)
|
||||
|
||||
state = self.get_success(
|
||||
self.presence_handler.get_state(UserID.from_string(user_id))
|
||||
)
|
||||
# our status message should be the same as it was before
|
||||
self.assertEqual(state.status_msg, status_msg)
|
||||
|
||||
def test_set_presence_from_syncing_keeps_busy(self):
|
||||
"""Test that presence set by syncing doesn't affect busy status"""
|
||||
# while this isn't the default
|
||||
self.presence_handler._busy_presence_enabled = True
|
||||
|
||||
user_id = "@test:server"
|
||||
status_msg = "I'm busy!"
|
||||
|
||||
self._set_presencestate_with_status_msg(user_id, PresenceState.BUSY, status_msg)
|
||||
|
||||
self.get_success(
|
||||
self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE)
|
||||
)
|
||||
|
||||
state = self.get_success(
|
||||
self.presence_handler.get_state(UserID.from_string(user_id))
|
||||
)
|
||||
# we should still be busy
|
||||
self.assertEqual(state.state, PresenceState.BUSY)
|
||||
|
||||
def _set_presencestate_with_status_msg(
|
||||
self, user_id: str, state: str, status_msg: Optional[str]
|
||||
):
|
||||
|
||||
@@ -63,6 +63,7 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase):
|
||||
self.other_user = self.register_user("user", "pass")
|
||||
self.other_user_token = self.login("user", "pass")
|
||||
|
||||
@unittest.override_config({"experimental_features": {"groups_enabled": True}})
|
||||
def test_delete_group(self) -> None:
|
||||
# Create a new group
|
||||
channel = self.make_request(
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import time
|
||||
import urllib.parse
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
@@ -384,6 +384,31 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
|
||||
channel = self.make_request(b"POST", "/logout/all", access_token=access_token)
|
||||
self.assertEqual(channel.result["code"], b"200", channel.result)
|
||||
|
||||
def test_login_with_overly_long_device_id_fails(self) -> None:
|
||||
self.register_user("mickey", "cheese")
|
||||
|
||||
# create a device_id longer than 512 characters
|
||||
device_id = "yolo" * 512
|
||||
|
||||
body = {
|
||||
"type": "m.login.password",
|
||||
"user": "mickey",
|
||||
"password": "cheese",
|
||||
"device_id": device_id,
|
||||
}
|
||||
|
||||
# make a login request with the bad device_id
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/v3/login",
|
||||
json.dumps(body).encode("utf8"),
|
||||
custom_headers=None,
|
||||
)
|
||||
|
||||
# test that the login fails with the correct error code
|
||||
self.assertEqual(channel.code, 400)
|
||||
self.assertEqual(channel.json_body["errcode"], "M_INVALID_PARAM")
|
||||
|
||||
|
||||
@skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC")
|
||||
class MultiSSOTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
@@ -7,9 +7,9 @@ from twisted.test.proto_helpers import MemoryReactor
|
||||
from synapse.api.constants import EventContentFields, EventTypes
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.rest import admin
|
||||
from synapse.rest.client import login, register, room, room_batch
|
||||
from synapse.rest.client import login, register, room, room_batch, sync
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict
|
||||
from synapse.types import JsonDict, RoomStreamToken
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests import unittest
|
||||
@@ -63,6 +63,7 @@ class RoomBatchTestCase(unittest.HomeserverTestCase):
|
||||
room.register_servlets,
|
||||
register.register_servlets,
|
||||
login.register_servlets,
|
||||
sync.register_servlets,
|
||||
]
|
||||
|
||||
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
||||
@@ -178,3 +179,123 @@ class RoomBatchTestCase(unittest.HomeserverTestCase):
|
||||
"Expected a single state_group to be returned by saw state_groups=%s"
|
||||
% (state_group_map.keys(),),
|
||||
)
|
||||
|
||||
@unittest.override_config({"experimental_features": {"msc2716_enabled": True}})
|
||||
def test_sync_while_batch_importing(self) -> None:
|
||||
"""
|
||||
Make sure that /sync correctly returns full room state when a user joins
|
||||
during ongoing batch backfilling.
|
||||
See: https://github.com/matrix-org/synapse/issues/12281
|
||||
"""
|
||||
# Create user who will be invited & join room
|
||||
user_id = self.register_user("beep", "test")
|
||||
user_tok = self.login("beep", "test")
|
||||
|
||||
time_before_room = int(self.clock.time_msec())
|
||||
|
||||
# Create a room with some events
|
||||
room_id, _, _, _ = self._create_test_room()
|
||||
# Invite the user
|
||||
self.helper.invite(
|
||||
room_id, src=self.appservice.sender, tok=self.appservice.token, targ=user_id
|
||||
)
|
||||
|
||||
# Create another room, send a bunch of events to advance the stream token
|
||||
other_room_id = self.helper.create_room_as(
|
||||
self.appservice.sender, tok=self.appservice.token
|
||||
)
|
||||
for _ in range(5):
|
||||
self.helper.send_event(
|
||||
room_id=other_room_id,
|
||||
type=EventTypes.Message,
|
||||
content={"msgtype": "m.text", "body": "C"},
|
||||
tok=self.appservice.token,
|
||||
)
|
||||
|
||||
# Join the room as the normal user
|
||||
self.helper.join(room_id, user_id, tok=user_tok)
|
||||
|
||||
# Create an event to hang the historical batch from - In order to see
|
||||
# the failure case originally reported in #12281, the historical batch
|
||||
# must be hung from the most recent event in the room so the base
|
||||
# insertion event ends up with the highest `topogological_ordering`
|
||||
# (`depth`) in the room but will have a negative `stream_ordering`
|
||||
# because it's a `historical` event. Previously, when assembling the
|
||||
# `state` for the `/sync` response, the bugged logic would sort by
|
||||
# `topological_ordering` descending and pick up the base insertion
|
||||
# event because it has a negative `stream_ordering` below the given
|
||||
# pagination token. Now we properly sort by `stream_ordering`
|
||||
# descending which puts `historical` events with a negative
|
||||
# `stream_ordering` way at the bottom and aren't selected as expected.
|
||||
response = self.helper.send_event(
|
||||
room_id=room_id,
|
||||
type=EventTypes.Message,
|
||||
content={
|
||||
"msgtype": "m.text",
|
||||
"body": "C",
|
||||
},
|
||||
tok=self.appservice.token,
|
||||
)
|
||||
event_to_hang_id = response["event_id"]
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/unstable/org.matrix.msc2716/rooms/%s/batch_send?prev_event_id=%s"
|
||||
% (room_id, event_to_hang_id),
|
||||
content={
|
||||
"events": _create_message_events_for_batch_send_request(
|
||||
self.virtual_user_id, time_before_room, 3
|
||||
),
|
||||
"state_events_at_start": _create_join_state_events_for_batch_send_request(
|
||||
[self.virtual_user_id], time_before_room
|
||||
),
|
||||
},
|
||||
access_token=self.appservice.token,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.result)
|
||||
|
||||
# Now we need to find the invite + join events stream tokens so we can sync between
|
||||
main_store = self.hs.get_datastores().main
|
||||
events, next_key = self.get_success(
|
||||
main_store.get_recent_events_for_room(
|
||||
room_id,
|
||||
50,
|
||||
end_token=main_store.get_room_max_token(),
|
||||
),
|
||||
)
|
||||
invite_event_position = None
|
||||
for event in events:
|
||||
if (
|
||||
event.type == "m.room.member"
|
||||
and event.content["membership"] == "invite"
|
||||
):
|
||||
invite_event_position = self.get_success(
|
||||
main_store.get_topological_token_for_event(event.event_id)
|
||||
)
|
||||
break
|
||||
|
||||
assert invite_event_position is not None, "No invite event found"
|
||||
|
||||
# Remove the topological order from the token by re-creating w/stream only
|
||||
invite_event_position = RoomStreamToken(None, invite_event_position.stream)
|
||||
|
||||
# Sync everything after this token
|
||||
since_token = self.get_success(invite_event_position.to_string(main_store))
|
||||
sync_response = self.make_request(
|
||||
"GET",
|
||||
f"/sync?since={since_token}",
|
||||
access_token=user_tok,
|
||||
)
|
||||
|
||||
# Assert that, for this room, the user was considered to have joined and thus
|
||||
# receives the full state history
|
||||
state_event_types = [
|
||||
event["type"]
|
||||
for event in sync_response.json_body["rooms"]["join"][room_id]["state"][
|
||||
"events"
|
||||
]
|
||||
]
|
||||
|
||||
assert (
|
||||
"m.room.create" in state_event_types
|
||||
), "Missing room full state in sync response"
|
||||
|
||||
@@ -21,6 +21,29 @@ class DeviceStoreTestCase(HomeserverTestCase):
|
||||
def prepare(self, reactor, clock, hs):
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
def add_device_change(self, user_id, device_ids, host):
|
||||
"""Add a device list change for the given device to
|
||||
`device_lists_outbound_pokes` table.
|
||||
"""
|
||||
|
||||
for device_id in device_ids:
|
||||
stream_id = self.get_success(
|
||||
self.store.add_device_change_to_streams(
|
||||
"user_id", [device_id], ["!some:room"]
|
||||
)
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.store.add_device_list_outbound_pokes(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
room_id="!some:room",
|
||||
stream_id=stream_id,
|
||||
hosts=[host],
|
||||
context={},
|
||||
)
|
||||
)
|
||||
|
||||
def test_store_new_device(self):
|
||||
self.get_success(
|
||||
self.store.store_device("user_id", "device_id", "display_name")
|
||||
@@ -95,11 +118,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
|
||||
device_ids = ["device_id1", "device_id2"]
|
||||
|
||||
# Add two device updates with sequential `stream_id`s
|
||||
self.get_success(
|
||||
self.store.add_device_change_to_streams(
|
||||
"user_id", device_ids, ["somehost"], ["!some:room"]
|
||||
)
|
||||
)
|
||||
self.add_device_change("@user_id:test", device_ids, "somehost")
|
||||
|
||||
# Get all device updates ever meant for this remote
|
||||
now_stream_id, device_updates = self.get_success(
|
||||
@@ -123,11 +142,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
|
||||
"device_id4",
|
||||
"device_id5",
|
||||
]
|
||||
self.get_success(
|
||||
self.store.add_device_change_to_streams(
|
||||
"user_id", device_ids, ["somehost"], ["!some:room"]
|
||||
)
|
||||
)
|
||||
self.add_device_change("@user_id:test", device_ids, "somehost")
|
||||
|
||||
# Get device updates meant for this remote
|
||||
next_stream_id, device_updates = self.get_success(
|
||||
@@ -147,11 +162,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
|
||||
|
||||
# Add some more device updates to ensure it still resumes properly
|
||||
device_ids = ["device_id6", "device_id7"]
|
||||
self.get_success(
|
||||
self.store.add_device_change_to_streams(
|
||||
"user_id", device_ids, ["somehost"], ["!some:room"]
|
||||
)
|
||||
)
|
||||
self.add_device_change("@user_id:test", device_ids, "somehost")
|
||||
|
||||
# Get the next batch of device updates
|
||||
next_stream_id, device_updates = self.get_success(
|
||||
@@ -224,11 +235,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
|
||||
"fakeSelfSigning",
|
||||
]
|
||||
|
||||
self.get_success(
|
||||
self.store.add_device_change_to_streams(
|
||||
"@user_id:test", device_ids, ["somehost"], ["!some:room"]
|
||||
)
|
||||
)
|
||||
self.add_device_change("@user_id:test", device_ids, "somehost")
|
||||
|
||||
# Get device updates meant for this remote
|
||||
next_stream_id, device_updates = self.get_success(
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[tox]
|
||||
envlist = py37, py38, py39, py310, check_codestyle, check_isort
|
||||
envlist = py37, py38, py39, py310
|
||||
|
||||
# we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208
|
||||
minversion = 2.3.2
|
||||
@@ -32,20 +32,6 @@ deps =
|
||||
# install the "enum34" dependency of cryptography.
|
||||
pip>=10
|
||||
|
||||
# directories/files we run the linters on.
|
||||
# TODO: this is now out of date; we will remove as part of poetry migration.
|
||||
lint_targets =
|
||||
setup.py
|
||||
synapse
|
||||
tests
|
||||
# annoyingly, black doesn't find these so we have to list them
|
||||
scripts-dev
|
||||
stubs
|
||||
contrib
|
||||
synmark
|
||||
.ci
|
||||
docker
|
||||
|
||||
# default settings for all tox environments
|
||||
[testenv]
|
||||
deps =
|
||||
@@ -116,18 +102,3 @@ setenv =
|
||||
commands =
|
||||
python -m synmark {posargs:}
|
||||
|
||||
[testenv:check_codestyle]
|
||||
extras = lint
|
||||
commands =
|
||||
python -m black --check --diff {[base]lint_targets}
|
||||
flake8 {[base]lint_targets} {env:PEP8SUFFIX:}
|
||||
|
||||
[testenv:check_isort]
|
||||
extras = lint
|
||||
commands = isort -c --df {[base]lint_targets}
|
||||
|
||||
[testenv:mypy]
|
||||
deps =
|
||||
{[base]deps}
|
||||
extras = all,mypy
|
||||
commands = mypy
|
||||
|
||||
Reference in New Issue
Block a user