Compare commits
52 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8cd0a81f25 | |||
| daa5321055 | |||
| cbbdf14be3 | |||
| 9c815553ed | |||
| e7176e8120 | |||
| 5bdf01fccd | |||
| 36c6b92bfc | |||
| 8eb7bb975e | |||
| 3bdb9b07fd | |||
| 0371a354cf | |||
| ae391db777 | |||
| d7fc87d973 | |||
| 224ef0b669 | |||
| a4243183f0 | |||
| 92014fbf72 | |||
| 4ccfa16081 | |||
| 7c7bd9898b | |||
| b516d91999 | |||
| 2328e90fbb | |||
| 5e82b07d2c | |||
| c9bf644fa0 | |||
| a704a35dd7 | |||
| e55a9b3e41 | |||
| 6774f265b4 | |||
| 6e731e86bf | |||
| c971698bff | |||
| 7477f43fd8 | |||
| 3710fea19d | |||
| df8c8a4f45 | |||
| 8a529e4fb6 | |||
| f25b0f8808 | |||
| 677272caed | |||
| 2481b7dfa4 | |||
| f19dd39dfc | |||
| b07b14b494 | |||
| 561d06b481 | |||
| 39d131b016 | |||
| ce857c05d5 | |||
| cc780b3f77 | |||
| 4cf9f92f39 | |||
| 95a96b21eb | |||
| c303eca8cc | |||
| c8e81898b6 | |||
| 861752b3aa | |||
| 670d590f8a | |||
| 07d7cbfe69 | |||
| cd8b73aa97 | |||
| 53aa26eddc | |||
| a587de96b8 | |||
| 411ba44790 | |||
| aea94ca8cd | |||
| 9345361c6b |
@@ -29,11 +29,12 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For each type of test we only run on Py3.7 on PRs
|
||||
# For PRs, we only run each type of test with the oldest Python version supported (which
|
||||
# is Python 3.8 right now)
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
@@ -46,13 +47,13 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10", "3.11")
|
||||
for version in ("3.9", "3.10", "3.11")
|
||||
)
|
||||
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "postgres",
|
||||
"postgres-version": "11",
|
||||
"extras": "all",
|
||||
@@ -71,7 +72,7 @@ if not IS_PR:
|
||||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
@@ -133,11 +134,6 @@ if not IS_PR:
|
||||
"sytest-tag": "testing",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "buster",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@@ -144,7 +144,7 @@ jobs:
|
||||
|
||||
- name: Only build a single wheel on PR
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
|
||||
@@ -320,7 +320,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
@@ -362,7 +362,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.7"]
|
||||
python-version: ["pypy-3.8"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
@@ -477,7 +477,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.7"
|
||||
- python-version: "3.8"
|
||||
postgres-version: "11"
|
||||
|
||||
- python-version: "3.11"
|
||||
|
||||
@@ -96,7 +96,11 @@ jobs:
|
||||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:buster
|
||||
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
|
||||
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
|
||||
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
|
||||
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
|
||||
image: matrixdotorg/sytest-synapse:focal
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ __pycache__/
|
||||
/logs
|
||||
/media_store/
|
||||
/uploads
|
||||
/homeserver-config-overrides.d
|
||||
|
||||
# For direnv users
|
||||
/.envrc
|
||||
|
||||
+53
@@ -1,3 +1,56 @@
|
||||
# Synapse 1.88.0rc1 (2023-07-11)
|
||||
|
||||
This release
|
||||
- raises the minimum supported version of Python to 3.8, as Python 3.7 is now [end-of-life](https://devguide.python.org/versions/), and
|
||||
- removes deprecated config options related to worker deployment.
|
||||
|
||||
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#upgrading-to-v1880) for more information.
|
||||
|
||||
### Features
|
||||
|
||||
- Add `not_user_type` param to the [list accounts admin API](https://matrix-org.github.io/synapse/v1.88/admin_api/user_admin_api.html#list-accounts). ([\#15844](https://github.com/matrix-org/synapse/issues/15844))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Pin `pydantic` to `^=1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release.
|
||||
Contributed by @PaarthShah. ([\#15862](https://github.com/matrix-org/synapse/issues/15862))
|
||||
- Correctly resize thumbnails with pillow version >=10. ([\#15876](https://github.com/matrix-org/synapse/issues/15876))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fixed header levels on the [Admin API "Users"](https://matrix-org.github.io/synapse/v1.87/admin_api/user_admin_api.html) documentation page. Contributed by @sumnerevans at @beeper. ([\#15852](https://github.com/matrix-org/synapse/issues/15852))
|
||||
- Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. ([\#15872](https://github.com/matrix-org/synapse/issues/15872))
|
||||
|
||||
### Deprecations and Removals
|
||||
|
||||
- **Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options.** See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#removal-of-worker_replication_-settings) for more details. ([\#15860](https://github.com/matrix-org/synapse/issues/15860))
|
||||
- Remove support for Python 3.7 and hence for Debian Buster. ([\#15851](https://github.com/matrix-org/synapse/issues/15851), [\#15892](https://github.com/matrix-org/synapse/issues/15892), [\#15893](https://github.com/matrix-org/synapse/issues/15893), [\#15917](https://github.com/matrix-org/synapse/pull/15917))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Add foreign key constraint to `event_forward_extremities`. ([\#15751](https://github.com/matrix-org/synapse/issues/15751), [\#15907](https://github.com/matrix-org/synapse/issues/15907))
|
||||
- Add read/write style cross-worker locks. ([\#15782](https://github.com/matrix-org/synapse/issues/15782))
|
||||
- Stop writing to column `user_id` of tables `profiles` and `user_filters`. ([\#15787](https://github.com/matrix-org/synapse/issues/15787))
|
||||
- Use lower isolation level when cleaning old presence stream data to avoid serialization errors. ([\#15826](https://github.com/matrix-org/synapse/issues/15826))
|
||||
- Add tracing to media `/upload` code paths. ([\#15850](https://github.com/matrix-org/synapse/issues/15850), [\#15888](https://github.com/matrix-org/synapse/issues/15888))
|
||||
- Add a timeout that aborts any Postgres statement taking more than 1 hour. ([\#15853](https://github.com/matrix-org/synapse/issues/15853))
|
||||
- Fix the `devenv up` configuration which was ignoring the config overrides. ([\#15854](https://github.com/matrix-org/synapse/issues/15854))
|
||||
- Optimised cleanup of old entries in `device_lists_stream`. ([\#15861](https://github.com/matrix-org/synapse/issues/15861))
|
||||
- Update the Matrix clients link in the _It works! Synapse is running_ landing page. ([\#15874](https://github.com/matrix-org/synapse/issues/15874))
|
||||
- Fix building Synapse with the nightly Rust compiler. ([\#15906](https://github.com/matrix-org/synapse/issues/15906))
|
||||
- Add `Server` to Access-Control-Expose-Headers header. ([\#15908](https://github.com/matrix-org/synapse/issues/15908))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump authlib from 1.2.0 to 1.2.1. ([\#15864](https://github.com/matrix-org/synapse/issues/15864))
|
||||
* Bump importlib-metadata from 6.6.0 to 6.7.0. ([\#15865](https://github.com/matrix-org/synapse/issues/15865))
|
||||
* Bump lxml from 4.9.2 to 4.9.3. ([\#15897](https://github.com/matrix-org/synapse/issues/15897))
|
||||
* Bump regex from 1.8.4 to 1.9.1. ([\#15902](https://github.com/matrix-org/synapse/issues/15902))
|
||||
* Bump ruff from 0.0.275 to 0.0.277. ([\#15900](https://github.com/matrix-org/synapse/issues/15900))
|
||||
* Bump sentry-sdk from 1.25.1 to 1.26.0. ([\#15867](https://github.com/matrix-org/synapse/issues/15867))
|
||||
* Bump serde_json from 1.0.99 to 1.0.100. ([\#15901](https://github.com/matrix-org/synapse/issues/15901))
|
||||
* Bump types-pyopenssl from 23.2.0.0 to 23.2.0.1. ([\#15866](https://github.com/matrix-org/synapse/issues/15866))
|
||||
|
||||
# Synapse 1.87.0 (2023-07-04)
|
||||
|
||||
Please note that this will be the last release of Synapse that is compatible with
|
||||
|
||||
Generated
+29
-17
@@ -182,9 +182,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.52"
|
||||
version = "1.0.64"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
|
||||
checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -273,9 +273,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.26"
|
||||
version = "1.0.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
|
||||
checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -291,9 +291,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.8.4"
|
||||
version = "1.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
|
||||
checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -302,9 +314,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
|
||||
checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
@@ -320,29 +332,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.164"
|
||||
version = "1.0.171"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
|
||||
checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.164"
|
||||
version = "1.0.171"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68"
|
||||
checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.10",
|
||||
"syn 2.0.25",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.99"
|
||||
version = "1.0.100"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3"
|
||||
checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -374,9 +386,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.10"
|
||||
version = "2.0.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
|
||||
checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
@@ -3,3 +3,4 @@
|
||||
|
||||
[workspace]
|
||||
members = ["rust"]
|
||||
resolver = "2"
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little.
|
||||
@@ -0,0 +1 @@
|
||||
Fix long-standing bug where remote invites weren't correctly pushed.
|
||||
@@ -0,0 +1 @@
|
||||
Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it.
|
||||
@@ -0,0 +1 @@
|
||||
Document which Python version runs on a given Linux distribution so we can more easily clean up later.
|
||||
@@ -0,0 +1 @@
|
||||
Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009).
|
||||
@@ -0,0 +1 @@
|
||||
Better clarify how to run a worker instance (pass both configs).
|
||||
@@ -0,0 +1 @@
|
||||
Add details to warning in log when we fail to fetch an alias.
|
||||
@@ -0,0 +1 @@
|
||||
Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting.
|
||||
Vendored
+6
@@ -1,3 +1,9 @@
|
||||
matrix-synapse-py3 (1.88.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.88.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jul 2023 10:20:19 +0100
|
||||
|
||||
matrix-synapse-py3 (1.87.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.87.0.
|
||||
|
||||
@@ -28,12 +28,12 @@ FROM docker.io/library/${distro} as builder
|
||||
|
||||
RUN apt-get update -qq -o Acquire::Languages=none
|
||||
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
|
||||
# fetch and unpack the package
|
||||
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
|
||||
@@ -62,33 +62,29 @@ FROM docker.io/library/${distro}
|
||||
ARG distro=""
|
||||
ENV distro ${distro}
|
||||
|
||||
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
|
||||
# http://bugs.python.org/issue19846
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
# Install the build dependencies
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
# TODO: it would be nice to do that automatically.
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
|
||||
@@ -35,7 +35,11 @@ server {
|
||||
|
||||
# Send all other traffic to the main process
|
||||
location ~* ^(\\/_matrix|\\/_synapse) {
|
||||
{% if using_unix_sockets %}
|
||||
proxy_pass http://unix:/run/main_public.sock;
|
||||
{% else %}
|
||||
proxy_pass http://localhost:8080;
|
||||
{% endif %}
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
@@ -6,6 +6,9 @@
|
||||
{% if enable_redis %}
|
||||
redis:
|
||||
enabled: true
|
||||
{% if using_unix_sockets %}
|
||||
path: /tmp/redis.sock
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if appservice_registrations is not none %}
|
||||
|
||||
@@ -19,7 +19,11 @@ username=www-data
|
||||
autorestart=true
|
||||
|
||||
[program:redis]
|
||||
{% if using_unix_sockets %}
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock
|
||||
{% else %}
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
|
||||
{% endif %}
|
||||
priority=1
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
|
||||
@@ -8,7 +8,11 @@ worker_name: "{{ name }}"
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
{% if using_unix_sockets %}
|
||||
path: "/run/worker.{{ port }}"
|
||||
{% else %}
|
||||
port: {{ port }}
|
||||
{% endif %}
|
||||
{% if listener_resources %}
|
||||
resources:
|
||||
- names:
|
||||
|
||||
@@ -36,12 +36,17 @@ listeners:
|
||||
|
||||
# Allow configuring in case we want to reverse proxy 8008
|
||||
# using another process in the same container
|
||||
{% if SYNAPSE_USE_UNIX_SOCKET %}
|
||||
# Unix sockets don't care about TLS or IP addresses or ports
|
||||
- path: '/run/main_public.sock'
|
||||
type: http
|
||||
{% else %}
|
||||
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
|
||||
tls: false
|
||||
bind_addresses: ['::']
|
||||
type: http
|
||||
x_forwarded: false
|
||||
|
||||
{% endif %}
|
||||
resources:
|
||||
- names: [client]
|
||||
compress: true
|
||||
@@ -57,8 +62,11 @@ database:
|
||||
user: "{{ POSTGRES_USER or "synapse" }}"
|
||||
password: "{{ POSTGRES_PASSWORD }}"
|
||||
database: "{{ POSTGRES_DB or "synapse" }}"
|
||||
{% if not SYNAPSE_USE_UNIX_SOCKET %}
|
||||
{# Synapse will use a default unix socket for Postgres when host/port is not specified (behavior from `psycopg2`). #}
|
||||
host: "{{ POSTGRES_HOST or "db" }}"
|
||||
port: "{{ POSTGRES_PORT or "5432" }}"
|
||||
{% endif %}
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
{% else %}
|
||||
|
||||
@@ -74,6 +74,9 @@ MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
MAIN_PROCESS_INSTANCE_NAME = "main"
|
||||
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
|
||||
MAIN_PROCESS_REPLICATION_PORT = 9093
|
||||
# Obviously, these would only be used with the UNIX socket option
|
||||
MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
|
||||
MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
|
||||
|
||||
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
|
||||
# during processing with the name of the worker.
|
||||
@@ -407,11 +410,15 @@ def add_worker_roles_to_shared_config(
|
||||
)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
||||
instance_map[worker_name] = {
|
||||
"path": f"/run/worker.{worker_port}",
|
||||
}
|
||||
else:
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
# Update the list of stream writers. It's convenient that the name of the worker
|
||||
# type is the same as the stream to write. Iterate over the whole list in case there
|
||||
# is more than one.
|
||||
@@ -423,10 +430,15 @@ def add_worker_roles_to_shared_config(
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
||||
instance_map[worker_name] = {
|
||||
"path": f"/run/worker.{worker_port}",
|
||||
}
|
||||
else:
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def merge_worker_template_configs(
|
||||
@@ -718,17 +730,29 @@ def generate_worker_files(
|
||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||
# into files at the correct indentation below.
|
||||
|
||||
# Convenience helper for if using unix sockets instead of host:port
|
||||
using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
|
||||
# First read the original config file and extract the listeners block. Then we'll
|
||||
# add another listener for replication. Later we'll write out the result to the
|
||||
# shared config file.
|
||||
listeners = [
|
||||
{
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
listeners: List[Any]
|
||||
if using_unix_sockets:
|
||||
listeners = [
|
||||
{
|
||||
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
else:
|
||||
listeners = [
|
||||
{
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
with open(config_path) as file_stream:
|
||||
original_config = yaml.safe_load(file_stream)
|
||||
original_listeners = original_config.get("listeners")
|
||||
@@ -769,7 +793,17 @@ def generate_worker_files(
|
||||
|
||||
# A list of internal endpoints to healthcheck, starting with the main process
|
||||
# which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
# This list ends up being part of the command line to curl, (curl added support for
|
||||
# Unix sockets in version 7.40).
|
||||
if using_unix_sockets:
|
||||
healthcheck_urls = [
|
||||
f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
|
||||
# The scheme and hostname from the following URL are ignored.
|
||||
# The only thing that matters is the path `/health`
|
||||
"http://localhost/health"
|
||||
]
|
||||
else:
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
# Get the set of all worker types that we have configured
|
||||
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
||||
@@ -806,8 +840,12 @@ def generate_worker_files(
|
||||
# given worker_type needs to stay assigned and not be replaced.
|
||||
worker_config["shared_extra_conf"].update(shared_config)
|
||||
shared_config = worker_config["shared_extra_conf"]
|
||||
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
if using_unix_sockets:
|
||||
healthcheck_urls.append(
|
||||
f"--unix-socket /run/worker.{worker_port} http://localhost/health"
|
||||
)
|
||||
else:
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
@@ -826,6 +864,7 @@ def generate_worker_files(
|
||||
"/conf/workers/{name}.yaml".format(name=worker_name),
|
||||
**worker_config,
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Save this worker's port number to the correct nginx upstreams
|
||||
@@ -846,8 +885,13 @@ def generate_worker_files(
|
||||
nginx_upstream_config = ""
|
||||
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
if using_unix_sockets:
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server unix:/run/worker.{port};\n"
|
||||
|
||||
else:
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
@@ -877,10 +921,15 @@ def generate_worker_files(
|
||||
# If there are workers, add the main process to the instance_map too.
|
||||
if workers_in_use:
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
}
|
||||
if using_unix_sockets:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
||||
}
|
||||
else:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
}
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
@@ -890,6 +939,7 @@ def generate_worker_files(
|
||||
appservice_registrations=appservice_registrations,
|
||||
enable_redis=workers_in_use,
|
||||
workers_in_use=workers_in_use,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Nginx config
|
||||
@@ -900,6 +950,7 @@ def generate_worker_files(
|
||||
upstream_directives=nginx_upstream_config,
|
||||
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
|
||||
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Supervisord config
|
||||
@@ -909,6 +960,7 @@ def generate_worker_files(
|
||||
"/etc/supervisor/supervisord.conf",
|
||||
main_config_path=config_path,
|
||||
enable_redis=workers_in_use,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
convert(
|
||||
|
||||
@@ -242,6 +242,9 @@ The following parameters should be set in the URL:
|
||||
|
||||
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
|
||||
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
- `not_user_type` - Exclude certain user types, such as bot users, from the request.
|
||||
Can be provided multiple times. Possible values are `bot`, `support` or "empty string".
|
||||
"empty string" here means to exclude users without a type.
|
||||
|
||||
Caution. The database only has indexes on the columns `name` and `creation_ts`.
|
||||
This means that if a different sort order is used (`is_guest`, `admin`,
|
||||
@@ -1180,7 +1183,7 @@ The following parameters should be set in the URL:
|
||||
- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
|
||||
be local.
|
||||
|
||||
### Check username availability
|
||||
## Check username availability
|
||||
|
||||
Checks to see if a username is available, and valid, for the server. See [the client-server
|
||||
API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available)
|
||||
@@ -1198,7 +1201,7 @@ GET /_synapse/admin/v1/username_available?username=$localpart
|
||||
The request and response format is the same as the
|
||||
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
|
||||
|
||||
### Find a user based on their ID in an auth provider
|
||||
## Find a user based on their ID in an auth provider
|
||||
|
||||
The API is:
|
||||
|
||||
@@ -1237,7 +1240,7 @@ Returns a `404` HTTP status code if no user was found, with a response body like
|
||||
_Added in Synapse 1.68.0._
|
||||
|
||||
|
||||
### Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
## Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
|
||||
The API is:
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ people building from source should ensure they can fetch recent versions of Rust
|
||||
(e.g. by using [rustup](https://rustup.rs/)).
|
||||
|
||||
The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/buster/libsqlite3-0) by
|
||||
[provided](https://packages.debian.org/bullseye/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
Context
|
||||
|
||||
@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
|
||||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
|
||||
```
|
||||
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
|
||||
|
||||
@@ -370,6 +370,7 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
See the [worker documentation](../workers.md) for additional information on workers.
|
||||
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
|
||||
- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
|
||||
- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis, and Postgres(when applicable).
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
|
||||
@@ -200,7 +200,7 @@ When following this route please make sure that the [Platform-specific prerequis
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.7 or later, up to Python 3.11.
|
||||
- Python 3.8 or later, up to Python 3.11.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: background_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/background-worker-log.yaml
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: event_persister1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
worker_name: event_persister1
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
worker_app: synapse.app.federation_sender
|
||||
worker_name: federation_sender1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
worker_app: synapse.app.media_repository
|
||||
worker_name: media_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
worker_app: synapse.app.pusher
|
||||
worker_name: pusher_worker1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
|
||||
|
||||
@@ -87,6 +87,33 @@ process, for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.88.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
||||
The minimum supported Python version has been increased from v3.7 to v3.8.
|
||||
You will need Python 3.8 to run Synapse v1.88.0 (due out July 18th, 2023).
|
||||
|
||||
If you use current versions of the Matrix.org-distributed Debian
|
||||
packages or Docker images, no action is required.
|
||||
|
||||
## Removal of `worker_replication_*` settings
|
||||
|
||||
As mentioned previously in [Upgrading to v1.84.0](#upgrading-to-v1840), the following deprecated settings
|
||||
are being removed in this release of Synapse:
|
||||
|
||||
* [`worker_replication_host`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_host)
|
||||
* [`worker_replication_http_port`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_port)
|
||||
* [`worker_replication_http_tls`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_tls)
|
||||
|
||||
Please ensure that you have migrated to using `main` on your shared configuration's `instance_map`
|
||||
(or create one if necessary). This is required if you have ***any*** workers at all;
|
||||
administrators of single-process (monolith) installations don't need to do anything.
|
||||
|
||||
For an illustrative example, please see [Upgrading to v1.84.0](#upgrading-to-v1840) below.
|
||||
|
||||
|
||||
# Upgrading to v1.86.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
||||
@@ -462,6 +462,20 @@ See the docs [request log format](../administration/request_log.md).
|
||||
* `additional_resources`: Only valid for an 'http' listener. A map of
|
||||
additional endpoints which should be loaded via dynamic modules.
|
||||
|
||||
Unix socket support (_Added in Synapse 1.89.0_):
|
||||
* `path`: A path and filename for a Unix socket. Make sure it is located in a
|
||||
directory with read and write permissions, and that it already exists (the directory
|
||||
will not be created). Defaults to `None`.
|
||||
* **Note**: The use of both `path` and `port` options for the same `listener` is not
|
||||
compatible.
|
||||
* The `x_forwarded` option defaults to true when using Unix sockets and can be omitted.
|
||||
* Other options that would not make sense to use with a UNIX socket, such as
|
||||
`bind_addresses` and `tls` will be ignored and can be removed.
|
||||
* `mode`: The file permissions to set on the UNIX socket. Defaults to `666`
|
||||
* **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`).
|
||||
Also make sure that `metrics` is not included in `resources` -> `names`
|
||||
|
||||
|
||||
Valid resource names are:
|
||||
|
||||
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
|
||||
@@ -474,7 +488,7 @@ Valid resource names are:
|
||||
|
||||
* `media`: the media API (/_matrix/media).
|
||||
|
||||
* `metrics`: the metrics interface. See [here](../../metrics-howto.md).
|
||||
* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets)
|
||||
|
||||
* `openid`: OpenID authentication. See [here](../../openid.md).
|
||||
|
||||
@@ -533,6 +547,22 @@ listeners:
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: manhole
|
||||
```
|
||||
Example configuration #3:
|
||||
```yaml
|
||||
listeners:
|
||||
# Unix socket listener: Ideal for Synapse deployments behind a reverse proxy, offering
|
||||
# lightweight interprocess communication without TCP/IP overhead, avoid port
|
||||
# conflicts, and providing enhanced security through system file permissions.
|
||||
#
|
||||
# Note that x_forwarded will default to true, when using a UNIX socket. Please see
|
||||
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
|
||||
#
|
||||
- path: /var/run/synapse/main_public.sock
|
||||
type: http
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
---
|
||||
### `manhole_settings`
|
||||
|
||||
@@ -3949,6 +3979,14 @@ instance_map:
|
||||
host: localhost
|
||||
port: 8034
|
||||
```
|
||||
Example configuration(#2, for UNIX sockets):
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
path: /var/run/synapse/main_replication.sock
|
||||
worker1:
|
||||
path: /var/run/synapse/worker1_replication.sock
|
||||
```
|
||||
---
|
||||
### `stream_writers`
|
||||
|
||||
@@ -4090,51 +4128,6 @@ Example configuration:
|
||||
worker_name: generic_worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_host`
|
||||
*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
The HTTP replication endpoint that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_host: 127.0.0.1
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_port`
|
||||
*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
The HTTP replication port that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_tls`
|
||||
*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
Whether TLS should be used for talking to the HTTP replication port on the main
|
||||
Synapse process.
|
||||
The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
|
||||
has the `replication` resource enabled.
|
||||
|
||||
**Please note:** by default, it is not safe to expose replication ports to the
|
||||
public Internet, even with TLS enabled.
|
||||
See [`worker_replication_secret`](#worker_replication_secret).
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
*Added in Synapse 1.72.0.*
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_tls: true
|
||||
```
|
||||
---
|
||||
### `worker_listeners`
|
||||
|
||||
A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
@@ -4153,6 +4146,18 @@ worker_listeners:
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
Example configuration(#2, using UNIX sockets with a `replication` listener):
|
||||
```yaml
|
||||
worker_listeners:
|
||||
- type: http
|
||||
path: /var/run/synapse/worker_public.sock
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
- type: http
|
||||
path: /var/run/synapse/worker_replication.sock
|
||||
resources:
|
||||
- names: [replication]
|
||||
```
|
||||
---
|
||||
### `worker_manhole`
|
||||
|
||||
|
||||
+8
-8
@@ -95,9 +95,12 @@ for the main process
|
||||
* Secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis)
|
||||
* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
with the `main` process defined, as well as the relevant connection information from
|
||||
it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined
|
||||
is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to.
|
||||
with the `main` process defined, as well as the relevant connection information from
|
||||
it's HTTP `replication` listener (defined in step 1 above).
|
||||
* Note that the `host` defined is the address the worker needs to look for the `main`
|
||||
process at, not necessarily the same address that is bound to.
|
||||
* If you are using Unix sockets for the `replication` resource, make sure to
|
||||
use a `path` to the socket file instead of a `port`.
|
||||
* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
can be used to authenticate HTTP traffic between workers. For example:
|
||||
|
||||
@@ -145,9 +148,6 @@ In the config file for each worker, you must specify:
|
||||
with an `http` listener.
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
|
||||
* **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map`
|
||||
|
||||
For example:
|
||||
|
||||
@@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou
|
||||
|
||||
You can start the main Synapse process with Poetry by running the following command:
|
||||
```console
|
||||
poetry run synapse_homeserver -c [your homeserver.yaml]
|
||||
poetry run synapse_homeserver --config-file [your homeserver.yaml]
|
||||
```
|
||||
For worker setups, you can run the following command
|
||||
```console
|
||||
poetry run synapse_worker -c [your worker.yaml]
|
||||
poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml]
|
||||
```
|
||||
## Available worker applications
|
||||
|
||||
|
||||
@@ -178,7 +178,7 @@
|
||||
EOF
|
||||
'';
|
||||
# Start synapse when `devenv up` is run.
|
||||
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml --config-directory homeserver-config-overrides.d";
|
||||
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml -c homeserver-config-overrides.d";
|
||||
|
||||
# Define the perl modules we require to run SyTest.
|
||||
#
|
||||
|
||||
Generated
+125
-163
@@ -41,9 +41,6 @@ files = [
|
||||
{file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
|
||||
dev = ["attrs[docs,tests]", "pre-commit"]
|
||||
@@ -53,13 +50,13 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte
|
||||
|
||||
[[package]]
|
||||
name = "authlib"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "Authlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:4ddf4fd6cfa75c9a460b361d4bd9dac71ffda0be879dbe4292a02e92349ad55a"},
|
||||
{file = "Authlib-1.2.0.tar.gz", hash = "sha256:4fa3e80883a5915ef9f5bc28630564bc4ed5b5af39812a3ff130ec76bd631e9d"},
|
||||
{file = "Authlib-1.2.1-py2.py3-none-any.whl", hash = "sha256:c88984ea00149a90e3537c964327da930779afa4564e354edfd98410bea01911"},
|
||||
{file = "Authlib-1.2.1.tar.gz", hash = "sha256:421f7c6b468d907ca2d9afede256f068f87e34d23dd221c07d13d4c234726afb"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -190,7 +187,6 @@ packaging = ">=22.0"
|
||||
pathspec = ">=0.9.0"
|
||||
platformdirs = ">=2"
|
||||
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
|
||||
typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""}
|
||||
typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
|
||||
|
||||
[package.extras]
|
||||
@@ -412,7 +408,6 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
||||
importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
|
||||
|
||||
[[package]]
|
||||
name = "click-default-group"
|
||||
@@ -601,7 +596,6 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
gitdb = ">=4.0.1,<5"
|
||||
typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""}
|
||||
|
||||
[[package]]
|
||||
name = "hiredis"
|
||||
@@ -837,23 +831,22 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "importlib-metadata"
|
||||
version = "6.6.0"
|
||||
version = "6.7.0"
|
||||
description = "Read metadata from Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "importlib_metadata-6.6.0-py3-none-any.whl", hash = "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed"},
|
||||
{file = "importlib_metadata-6.6.0.tar.gz", hash = "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705"},
|
||||
{file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"},
|
||||
{file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
|
||||
zipp = ">=0.5"
|
||||
|
||||
[package.extras]
|
||||
docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
|
||||
perf = ["ipython"]
|
||||
testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"]
|
||||
testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
|
||||
|
||||
[[package]]
|
||||
name = "importlib-resources"
|
||||
@@ -987,11 +980,9 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
attrs = ">=17.4.0"
|
||||
importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
|
||||
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
|
||||
pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
|
||||
pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
|
||||
typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
|
||||
@@ -1082,95 +1073,108 @@ pyasn1 = ">=0.4.6"
|
||||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "4.9.2"
|
||||
version = "4.9.3"
|
||||
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
|
||||
optional = true
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
|
||||
files = [
|
||||
{file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"},
|
||||
{file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"},
|
||||
{file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"},
|
||||
{file = "lxml-4.9.2-cp27-cp27m-win32.whl", hash = "sha256:8d0b4612b66ff5d62d03bcaa043bb018f74dfea51184e53f067e6fdcba4bd8de"},
|
||||
{file = "lxml-4.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:4c8f293f14abc8fd3e8e01c5bd86e6ed0b6ef71936ded5bf10fe7a5efefbaca3"},
|
||||
{file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2899456259589aa38bfb018c364d6ae7b53c5c22d8e27d0ec7609c2a1ff78b50"},
|
||||
{file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6749649eecd6a9871cae297bffa4ee76f90b4504a2a2ab528d9ebe912b101975"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a08cff61517ee26cb56f1e949cca38caabe9ea9fbb4b1e10a805dc39844b7d5c"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:85cabf64adec449132e55616e7ca3e1000ab449d1d0f9d7f83146ed5bdcb6d8a"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8340225bd5e7a701c0fa98284c849c9b9fc9238abf53a0ebd90900f25d39a4e4"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:1ab8f1f932e8f82355e75dda5413a57612c6ea448069d4fb2e217e9a4bed13d4"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:699a9af7dffaf67deeae27b2112aa06b41c370d5e7633e0ee0aea2e0b6c211f7"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9cc34af337a97d470040f99ba4282f6e6bac88407d021688a5d585e44a23184"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-win32.whl", hash = "sha256:d02a5399126a53492415d4906ab0ad0375a5456cc05c3fc0fc4ca11771745cda"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:a38486985ca49cfa574a507e7a2215c0c780fd1778bb6290c21193b7211702ab"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c83203addf554215463b59f6399835201999b5e48019dc17f182ed5ad87205c9"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2a87fa548561d2f4643c99cd13131acb607ddabb70682dcf1dff5f71f781a4bf"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:d6b430a9938a5a5d85fc107d852262ddcd48602c120e3dbb02137c83d212b380"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3efea981d956a6f7173b4659849f55081867cf897e719f57383698af6f618a92"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df0623dcf9668ad0445e0558a21211d4e9a149ea8f5666917c8eeec515f0a6d1"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-win32.whl", hash = "sha256:da248f93f0418a9e9d94b0080d7ebc407a9a5e6d0b57bb30db9b5cc28de1ad33"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:3818b8e2c4b5148567e1b09ce739006acfaa44ce3156f8cbbc11062994b8e8dd"},
|
||||
{file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"},
|
||||
{file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"},
|
||||
{file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"},
|
||||
{file = "lxml-4.9.2-cp35-cp35m-win_amd64.whl", hash = "sha256:998c7c41910666d2976928c38ea96a70d1aa43be6fe502f21a651e17483a43c5"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f49e52d174375a7def9915c9f06ec4e569d235ad428f70751765f48d5926678c"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36c3c175d34652a35475a73762b545f4527aec044910a651d2bf50de9c3352b1"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a35f8b7fa99f90dd2f5dc5a9fa12332642f087a7641289ca6c40d6e1a2637d8e"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:3ab9fa9d6dc2a7f29d7affdf3edebf6ece6fb28a6d80b14c3b2fb9d39b9322c3"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c9ec3eaf616d67db0764b3bb983962b4f385a1f08304fd30c7283954e6a7869b"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a29ba94d065945944016b6b74e538bdb1751a1db6ffb80c9d3c2e40d6fa9894"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a82d05da00a58b8e4c0008edbc8a4b6ec5a4bc1e2ee0fb6ed157cf634ed7fa45"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:223f4232855ade399bd409331e6ca70fb5578efef22cf4069a6090acc0f53c0e"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d17bc7c2ccf49c478c5bdd447594e82692c74222698cfc9b5daae7ae7e90743b"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-win32.whl", hash = "sha256:b64d891da92e232c36976c80ed7ebb383e3f148489796d8d31a5b6a677825efe"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a0a336d6d3e8b234a3aae3c674873d8f0e720b76bc1d9416866c41cd9500ffb9"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:da4dd7c9c50c059aba52b3524f84d7de956f7fef88f0bafcf4ad7dde94a064e8"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:821b7f59b99551c69c85a6039c65b75f5683bdc63270fec660f75da67469ca24"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e5168986b90a8d1f2f9dc1b841467c74221bd752537b99761a93d2d981e04889"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e20cb5a47247e383cf4ff523205060991021233ebd6f924bca927fcf25cf86f"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13598ecfbd2e86ea7ae45ec28a2a54fb87ee9b9fdb0f6d343297d8e548392c03"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:880bbbcbe2fca64e2f4d8e04db47bcdf504936fa2b33933efd945e1b429bea8c"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7d2278d59425777cfcb19735018d897ca8303abe67cc735f9f97177ceff8027f"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5344a43228767f53a9df6e5b253f8cdca7dfc7b7aeae52551958192f56d98457"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-win32.whl", hash = "sha256:925073b2fe14ab9b87e73f9a5fde6ce6392da430f3004d8b72cc86f746f5163b"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:9b22c5c66f67ae00c0199f6055705bc3eb3fcb08d03d2ec4059a2b1b25ed48d7"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5f50a1c177e2fa3ee0667a5ab79fdc6b23086bc8b589d90b93b4bd17eb0e64d1"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:090c6543d3696cbe15b4ac6e175e576bcc3f1ccfbba970061b7300b0c15a2140"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:63da2ccc0857c311d764e7d3d90f429c252e83b52d1f8f1d1fe55be26827d1f4"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5b4545b8a40478183ac06c073e81a5ce4cf01bf1734962577cf2bb569a5b3bbf"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2e430cd2824f05f2d4f687701144556646bae8f249fd60aa1e4c768ba7018947"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6804daeb7ef69e7b36f76caddb85cccd63d0c56dedb47555d2fc969e2af6a1a5"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a6e441a86553c310258aca15d1c05903aaf4965b23f3bc2d55f200804e005ee5"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca34efc80a29351897e18888c71c6aca4a359247c87e0b1c7ada14f0ab0c0fb2"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-win32.whl", hash = "sha256:6b418afe5df18233fc6b6093deb82a32895b6bb0b1155c2cdb05203f583053f1"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f1496ea22ca2c830cbcbd473de8f114a320da308438ae65abad6bab7867fe38f"},
|
||||
{file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b264171e3143d842ded311b7dccd46ff9ef34247129ff5bf5066123c55c2431c"},
|
||||
{file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0dc313ef231edf866912e9d8f5a042ddab56c752619e92dfd3a2c277e6a7299a"},
|
||||
{file = "lxml-4.9.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:16efd54337136e8cd72fb9485c368d91d77a47ee2d42b057564aae201257d419"},
|
||||
{file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0f2b1e0d79180f344ff9f321327b005ca043a50ece8713de61d1cb383fb8ac05"},
|
||||
{file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7b770ed79542ed52c519119473898198761d78beb24b107acf3ad65deae61f1f"},
|
||||
{file = "lxml-4.9.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efa29c2fe6b4fdd32e8ef81c1528506895eca86e1d8c4657fda04c9b3786ddf9"},
|
||||
{file = "lxml-4.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e91ee82f4199af8c43d8158024cbdff3d931df350252288f0d4ce656df7f3b5"},
|
||||
{file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b23e19989c355ca854276178a0463951a653309fb8e57ce674497f2d9f208746"},
|
||||
{file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:01d36c05f4afb8f7c20fd9ed5badca32a2029b93b1750f571ccc0b142531caf7"},
|
||||
{file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"},
|
||||
{file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"},
|
||||
{file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"},
|
||||
{file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"},
|
||||
{file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"},
|
||||
{file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"},
|
||||
{file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"},
|
||||
{file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56dc1f1ebccc656d1b3ed288f11e27172a01503fc016bcabdcbc0978b19352b7"},
|
||||
{file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:578695735c5a3f51569810dfebd05dd6f888147a34f0f98d4bb27e92b76e05c2"},
|
||||
{file = "lxml-4.9.3-cp35-cp35m-win32.whl", hash = "sha256:704f61ba8c1283c71b16135caf697557f5ecf3e74d9e453233e4771d68a1f42d"},
|
||||
{file = "lxml-4.9.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c41bfca0bd3532d53d16fd34d20806d5c2b1ace22a2f2e4c0008570bf2c58833"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:64f479d719dc9f4c813ad9bb6b28f8390360660b73b2e4beb4cb0ae7104f1c12"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:dd708cf4ee4408cf46a48b108fb9427bfa00b9b85812a9262b5c668af2533ea5"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c31c7462abdf8f2ac0577d9f05279727e698f97ecbb02f17939ea99ae8daa98"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e3cd95e10c2610c360154afdc2f1480aea394f4a4f1ea0a5eacce49640c9b190"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:4930be26af26ac545c3dffb662521d4e6268352866956672231887d18f0eaab2"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4aec80cde9197340bc353d2768e2a75f5f60bacda2bab72ab1dc499589b3878c"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:14e019fd83b831b2e61baed40cab76222139926b1fb5ed0e79225bc0cae14584"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0c0850c8b02c298d3c7006b23e98249515ac57430e16a166873fc47a5d549287"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aca086dc5f9ef98c512bac8efea4483eb84abbf926eaeedf7b91479feb092458"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-win32.whl", hash = "sha256:50baa9c1c47efcaef189f31e3d00d697c6d4afda5c3cde0302d063492ff9b477"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bef4e656f7d98aaa3486d2627e7d2df1157d7e88e7efd43a65aa5dd4714916cf"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:46f409a2d60f634fe550f7133ed30ad5321ae2e6630f13657fb9479506b00601"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4c28a9144688aef80d6ea666c809b4b0e50010a2aca784c97f5e6bf143d9f129"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:141f1d1a9b663c679dc524af3ea1773e618907e96075262726c7612c02b149a4"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:53ace1c1fd5a74ef662f844a0413446c0629d151055340e9893da958a374f70d"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17a753023436a18e27dd7769e798ce302963c236bc4114ceee5b25c18c52c693"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d298a1bd60c067ea75d9f684f5f3992c9d6766fadbc0bcedd39750bf344c2f4"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:081d32421db5df44c41b7f08a334a090a545c54ba977e47fd7cc2deece78809a"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:23eed6d7b1a3336ad92d8e39d4bfe09073c31bfe502f20ca5116b2a334f8ec02"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-win32.whl", hash = "sha256:1509dd12b773c02acd154582088820893109f6ca27ef7291b003d0e81666109f"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:120fa9349a24c7043854c53cae8cec227e1f79195a7493e09e0c12e29f918e52"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4d2d1edbca80b510443f51afd8496be95529db04a509bc8faee49c7b0fb6d2cc"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d7e43bd40f65f7d97ad8ef5c9b1778943d02f04febef12def25f7583d19baac"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:71d66ee82e7417828af6ecd7db817913cb0cf9d4e61aa0ac1fde0583d84358db"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6fc3c450eaa0b56f815c7b62f2b7fba7266c4779adcf1cece9e6deb1de7305ce"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65299ea57d82fb91c7f019300d24050c4ddeb7c5a190e076b5f48a2b43d19c42"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eadfbbbfb41b44034a4c757fd5d70baccd43296fb894dba0295606a7cf3124aa"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e9bdd30efde2b9ccfa9cb5768ba04fe71b018a25ea093379c857c9dad262c40"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcdd00edfd0a3001e0181eab3e63bd5c74ad3e67152c84f93f13769a40e073a7"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-win32.whl", hash = "sha256:57aba1bbdf450b726d58b2aea5fe47c7875f5afb2c4a23784ed78f19a0462574"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:92af161ecbdb2883c4593d5ed4815ea71b31fafd7fd05789b23100d081ecac96"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9bb6ad405121241e99a86efff22d3ef469024ce22875a7ae045896ad23ba2340"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8ed74706b26ad100433da4b9d807eae371efaa266ffc3e9191ea436087a9d6a7"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fbf521479bcac1e25a663df882c46a641a9bff6b56dc8b0fafaebd2f66fb231b"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:303bf1edce6ced16bf67a18a1cf8339d0db79577eec5d9a6d4a80f0fb10aa2da"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:5515edd2a6d1a5a70bfcdee23b42ec33425e405c5b351478ab7dc9347228f96e"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:690dafd0b187ed38583a648076865d8c229661ed20e48f2335d68e2cf7dc829d"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6420a005548ad52154c8ceab4a1290ff78d757f9e5cbc68f8c77089acd3c432"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb3bb49c7a6ad9d981d734ef7c7193bc349ac338776a0360cc671eaee89bcf69"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d27be7405547d1f958b60837dc4c1007da90b8b23f54ba1f8b728c78fdb19d50"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-win32.whl", hash = "sha256:8df133a2ea5e74eef5e8fc6f19b9e085f758768a16e9877a60aec455ed2609b2"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:4dd9a263e845a72eacb60d12401e37c616438ea2e5442885f65082c276dfb2b2"},
|
||||
{file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"},
|
||||
{file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"},
|
||||
{file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"},
|
||||
{file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"},
|
||||
{file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"},
|
||||
{file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"},
|
||||
{file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"},
|
||||
{file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"},
|
||||
{file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"},
|
||||
{file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"},
|
||||
{file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"},
|
||||
{file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"},
|
||||
{file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"},
|
||||
{file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"},
|
||||
{file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
cssselect = ["cssselect (>=0.7)"]
|
||||
html5 = ["html5lib"]
|
||||
htmlsoup = ["BeautifulSoup4"]
|
||||
source = ["Cython (>=0.29.7)"]
|
||||
source = ["Cython (>=0.29.35)"]
|
||||
|
||||
[[package]]
|
||||
name = "lxml-stubs"
|
||||
@@ -1199,7 +1203,6 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
mdurl = ">=0.1,<1.0"
|
||||
typing_extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
benchmarking = ["psutil", "pytest", "pytest-benchmark"]
|
||||
@@ -1283,7 +1286,6 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
attrs = "*"
|
||||
importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["aiounittest", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "mypy (==0.910)", "tox", "twine (==4.0.1)", "twisted"]
|
||||
@@ -1459,7 +1461,6 @@ files = [
|
||||
[package.dependencies]
|
||||
mypy-extensions = ">=0.4.3"
|
||||
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
|
||||
typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""}
|
||||
typing-extensions = ">=3.10"
|
||||
|
||||
[package.extras]
|
||||
@@ -1721,9 +1722,6 @@ files = [
|
||||
{file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = {version = ">=4.4", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
|
||||
test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"]
|
||||
@@ -2060,7 +2058,6 @@ files = [
|
||||
[package.dependencies]
|
||||
cryptography = ">=3.1"
|
||||
defusedxml = "*"
|
||||
importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""}
|
||||
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
|
||||
pyopenssl = "*"
|
||||
python-dateutil = "*"
|
||||
@@ -2245,28 +2242,28 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.0.275"
|
||||
version = "0.0.277"
|
||||
description = "An extremely fast Python linter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.0.275-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:5e6554a072e7ce81eb6f0bec1cebd3dcb0e358652c0f4900d7d630d61691e914"},
|
||||
{file = "ruff-0.0.275-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:1cc599022fe5ffb143a965b8d659eb64161ab8ab4433d208777eab018a1aab67"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5206fc1cd8c1c1deadd2e6360c0dbcd690f1c845da588ca9d32e4a764a402c60"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c4e6468da26f77b90cae35319d310999f471a8c352998e9b39937a23750149e"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0dbdea02942131dbc15dd45f431d152224f15e1dd1859fcd0c0487b658f60f1a"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:22efd9f41af27ef8fb9779462c46c35c89134d33e326c889971e10b2eaf50c63"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c09662112cfa22d7467a19252a546291fd0eae4f423e52b75a7a2000a1894db"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80043726662144876a381efaab88841c88e8df8baa69559f96b22d4fa216bef1"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5859ee543b01b7eb67835dfd505faa8bb7cc1550f0295c92c1401b45b42be399"},
|
||||
{file = "ruff-0.0.275-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c8ace4d40a57b5ea3c16555f25a6b16bc5d8b2779ae1912ce2633543d4e9b1da"},
|
||||
{file = "ruff-0.0.275-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8347fc16aa185aae275906c4ac5b770e00c896b6a0acd5ba521f158801911998"},
|
||||
{file = "ruff-0.0.275-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ec43658c64bfda44fd84bbea9da8c7a3b34f65448192d1c4dd63e9f4e7abfdd4"},
|
||||
{file = "ruff-0.0.275-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:508b13f7ca37274cceaba4fb3ea5da6ca192356323d92acf39462337c33ad14e"},
|
||||
{file = "ruff-0.0.275-py3-none-win32.whl", hash = "sha256:6afb1c4422f24f361e877937e2a44b3f8176774a476f5e33845ebfe887dd5ec2"},
|
||||
{file = "ruff-0.0.275-py3-none-win_amd64.whl", hash = "sha256:d9b264d78621bf7b698b6755d4913ab52c19bd28bee1a16001f954d64c1a1220"},
|
||||
{file = "ruff-0.0.275-py3-none-win_arm64.whl", hash = "sha256:a19ce3bea71023eee5f0f089dde4a4272d088d5ac0b675867e074983238ccc65"},
|
||||
{file = "ruff-0.0.275.tar.gz", hash = "sha256:a63a0b645da699ae5c758fce19188e901b3033ec54d862d93fcd042addf7f38d"},
|
||||
{file = "ruff-0.0.277-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:3250b24333ef419b7a232080d9724ccc4d2da1dbbe4ce85c4caa2290d83200f8"},
|
||||
{file = "ruff-0.0.277-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:3e60605e07482183ba1c1b7237eca827bd6cbd3535fe8a4ede28cbe2a323cb97"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7baa97c3d7186e5ed4d5d4f6834d759a27e56cf7d5874b98c507335f0ad5aadb"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:74e4b206cb24f2e98a615f87dbe0bde18105217cbcc8eb785bb05a644855ba50"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:479864a3ccd8a6a20a37a6e7577bdc2406868ee80b1e65605478ad3b8eb2ba0b"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:468bfb0a7567443cec3d03cf408d6f562b52f30c3c29df19927f1e0e13a40cd7"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f32ec416c24542ca2f9cc8c8b65b84560530d338aaf247a4a78e74b99cd476b4"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14a7b2f00f149c5a295f188a643ac25226ff8a4d08f7a62b1d4b0a1dc9f9b85c"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9879f59f763cc5628aa01c31ad256a0f4dc61a29355c7315b83c2a5aac932b5"},
|
||||
{file = "ruff-0.0.277-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f612e0a14b3d145d90eb6ead990064e22f6f27281d847237560b4e10bf2251f3"},
|
||||
{file = "ruff-0.0.277-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:323b674c98078be9aaded5b8b51c0d9c424486566fb6ec18439b496ce79e5998"},
|
||||
{file = "ruff-0.0.277-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3a43fbe026ca1a2a8c45aa0d600a0116bec4dfa6f8bf0c3b871ecda51ef2b5dd"},
|
||||
{file = "ruff-0.0.277-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:734165ea8feb81b0d53e3bf523adc2413fdb76f1264cde99555161dd5a725522"},
|
||||
{file = "ruff-0.0.277-py3-none-win32.whl", hash = "sha256:88d0f2afb2e0c26ac1120e7061ddda2a566196ec4007bd66d558f13b374b9efc"},
|
||||
{file = "ruff-0.0.277-py3-none-win_amd64.whl", hash = "sha256:6fe81732f788894a00f6ade1fe69e996cc9e485b7c35b0f53fb00284397284b2"},
|
||||
{file = "ruff-0.0.277-py3-none-win_arm64.whl", hash = "sha256:2d4444c60f2e705c14cd802b55cd2b561d25bf4311702c463a002392d3116b22"},
|
||||
{file = "ruff-0.0.277.tar.gz", hash = "sha256:2dab13cdedbf3af6d4427c07f47143746b6b95d9e4a254ac369a0edb9280a0d2"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2301,13 +2298,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.25.1"
|
||||
version = "1.26.0"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "sentry-sdk-1.25.1.tar.gz", hash = "sha256:aa796423eb6a2f4a8cd7a5b02ba6558cb10aab4ccdc0537f63a47b038c520c38"},
|
||||
{file = "sentry_sdk-1.25.1-py2.py3-none-any.whl", hash = "sha256:79afb7c896014038e358401ad1d36889f97a129dfa8031c49b3f238cd1aa3935"},
|
||||
{file = "sentry-sdk-1.26.0.tar.gz", hash = "sha256:760e4fb6d01c994110507133e08ecd4bdf4d75ee4be77f296a3579796cf73134"},
|
||||
{file = "sentry_sdk-1.26.0-py2.py3-none-any.whl", hash = "sha256:0c9f858337ec3781cf4851972ef42bba8c9828aea116b0dbed8f38c5f9a1896c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2410,9 +2407,7 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
canonicaljson = ">=1.0.0"
|
||||
importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
|
||||
pynacl = ">=0.3.0"
|
||||
typing-extensions = {version = ">=3.5", markers = "python_version < \"3.8\""}
|
||||
unpaddedbase64 = ">=1.0.1"
|
||||
|
||||
[package.extras]
|
||||
@@ -2852,39 +2847,6 @@ files = [
|
||||
six = "*"
|
||||
twisted = "*"
|
||||
|
||||
[[package]]
|
||||
name = "typed-ast"
|
||||
version = "1.5.4"
|
||||
description = "a fork of Python 2 and 3 ast modules with type comment support"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"},
|
||||
{file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"},
|
||||
{file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"},
|
||||
{file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"},
|
||||
{file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"},
|
||||
{file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"},
|
||||
{file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"},
|
||||
{file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"},
|
||||
{file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"},
|
||||
{file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"},
|
||||
{file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"},
|
||||
{file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"},
|
||||
{file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"},
|
||||
{file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"},
|
||||
{file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"},
|
||||
{file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"},
|
||||
{file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"},
|
||||
{file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"},
|
||||
{file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"},
|
||||
{file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"},
|
||||
{file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"},
|
||||
{file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"},
|
||||
{file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"},
|
||||
{file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-bleach"
|
||||
version = "6.0.0.3"
|
||||
@@ -2964,13 +2926,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-pyopenssl"
|
||||
version = "23.2.0.0"
|
||||
version = "23.2.0.1"
|
||||
description = "Typing stubs for pyOpenSSL"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-pyOpenSSL-23.2.0.0.tar.gz", hash = "sha256:43e307e8dfb3a7a8208a19874ca060305f460c529d4eaca8a2669ea89499f244"},
|
||||
{file = "types_pyOpenSSL-23.2.0.0-py3-none-any.whl", hash = "sha256:ba803a99440b0c2e9ab4e197084aeefc55bdfe8a580d367b2aa4210810a21240"},
|
||||
{file = "types-pyOpenSSL-23.2.0.1.tar.gz", hash = "sha256:beeb5d22704c625a1e4b6dc756355c5b4af0b980138b702a9d9f932acf020903"},
|
||||
{file = "types_pyOpenSSL-23.2.0.1-py3-none-any.whl", hash = "sha256:0568553f104466f1b8e0db3360fbe6770137d02e21a1a45c209bf2b1b03d90d4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3293,5 +3255,5 @@ user-search = ["pyicu"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "7f31754a1009d7b6c9a1bd7221a0b243ffd510f362c28f0da417aaac16757a87"
|
||||
python-versions = "^3.8.0"
|
||||
content-hash = "0a8c6605e7e1d0ac7188a5d02b47a029bfb0f917458b87cb40755911442383d8"
|
||||
|
||||
+12
-7
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.87.0"
|
||||
version = "1.88.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -147,7 +147,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7.1"
|
||||
python = "^3.8.0"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
@@ -203,9 +203,6 @@ ijson = ">=3.1.4"
|
||||
matrix-common = "^1.3.0"
|
||||
# We need packaging.requirements.Requirement, added in 16.1.
|
||||
packaging = ">=16.1"
|
||||
# At the time of writing, we only use functions from the version `importlib.metadata`
|
||||
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
|
||||
importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
# This is the most recent version of Pydantic with available on common distros.
|
||||
# We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858)
|
||||
pydantic = "^1.7.4"
|
||||
@@ -312,7 +309,7 @@ all = [
|
||||
# We pin black so that our tests don't start failing on new releases.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
ruff = "0.0.275"
|
||||
ruff = "0.0.277"
|
||||
|
||||
# Typechecking
|
||||
lxml-stubs = ">=0.4.0"
|
||||
@@ -376,7 +373,15 @@ build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
|
||||
# We skip:
|
||||
# - CPython 3.6 and 3.7: EOLed
|
||||
# - PyPy 3.7: we only support Python 3.8+
|
||||
# - musllinux i686: excluded to reduce number of wheels we build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
|
||||
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
|
||||
# c.f. https://github.com/matrix-org/synapse/pull/14259
|
||||
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
|
||||
@@ -22,15 +22,18 @@ from typing import Collection, Optional, Sequence, Set
|
||||
|
||||
# These are expanded inside the dockerfile to be a fully qualified image name.
|
||||
# e.g. docker.io/library/debian:bullseye
|
||||
#
|
||||
# If an EOL is forced by a Python version and we're dropping support for it, make sure
|
||||
# to remove references to the distibution across Synapse (search for "bullseye" for
|
||||
# example)
|
||||
DISTS = (
|
||||
"debian:buster", # oldstable: EOL 2022-08
|
||||
"debian:bullseye",
|
||||
"debian:bookworm",
|
||||
"debian:sid",
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01)
|
||||
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
|
||||
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
||||
@@ -253,6 +253,10 @@ if [[ -n "$ASYNCIO_REACTOR" ]]; then
|
||||
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
|
||||
fi
|
||||
|
||||
if [[ -n "$UNIX_SOCKETS" ]]; then
|
||||
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
|
||||
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
|
||||
fi
|
||||
|
||||
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
# Set the log level to what is desired
|
||||
|
||||
+2
-2
@@ -25,8 +25,8 @@ from synapse.util.rust import check_rust_lib_up_to_date
|
||||
from synapse.util.stringutils import strtobool
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 7):
|
||||
print("Synapse requires Python 3.7 or above.")
|
||||
if sys.version_info < (3, 8):
|
||||
print("Synapse requires Python 3.8 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
|
||||
@@ -61,6 +61,7 @@ from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpda
|
||||
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
|
||||
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
EventsBackgroundUpdatesStore,
|
||||
@@ -196,6 +197,11 @@ IGNORED_TABLES = {
|
||||
"ui_auth_sessions",
|
||||
"ui_auth_sessions_credentials",
|
||||
"ui_auth_sessions_ips",
|
||||
# Ignore the worker locks table, as a) there shouldn't be any acquired locks
|
||||
# after porting, and b) the circular foreign key constraints make it hard to
|
||||
# port.
|
||||
"worker_read_write_locks_mode",
|
||||
"worker_read_write_locks",
|
||||
}
|
||||
|
||||
|
||||
@@ -239,6 +245,7 @@ class Store(
|
||||
PresenceBackgroundUpdateStore,
|
||||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
@@ -803,7 +810,9 @@ class Porter:
|
||||
)
|
||||
# Map from table name to args passed to `handle_table`, i.e. a tuple
|
||||
# of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`.
|
||||
tables_to_port_info_map = {r[0]: r[1:] for r in setup_res}
|
||||
tables_to_port_info_map = {
|
||||
r[0]: r[1:] for r in setup_res if r[0] not in IGNORED_TABLES
|
||||
}
|
||||
|
||||
# Step 5. Do the copying.
|
||||
#
|
||||
|
||||
@@ -31,7 +31,7 @@ class AuthConfig(Config):
|
||||
|
||||
# The default value of password_config.enabled is True, unless msc3861 is enabled.
|
||||
msc3861_enabled = (
|
||||
config.get("experimental_features", {})
|
||||
(config.get("experimental_features") or {})
|
||||
.get("msc3861", {})
|
||||
.get("enabled", False)
|
||||
)
|
||||
|
||||
@@ -382,9 +382,6 @@ class ExperimentalConfig(Config):
|
||||
# Check that none of the other config options conflict with MSC3861 when enabled
|
||||
self.msc3861.check_config_conflicts(self.root)
|
||||
|
||||
# MSC4009: E.164 Matrix IDs
|
||||
self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False)
|
||||
|
||||
# MSC4010: Do not allow setting m.push_rules account data.
|
||||
self.msc4010_push_rules_account_data = experimental.get(
|
||||
"msc4010_push_rules_account_data", False
|
||||
|
||||
+51
-23
@@ -41,11 +41,17 @@ Synapse version. Please use ``%s: name_of_worker`` instead.
|
||||
|
||||
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """
|
||||
Missing data for a worker to connect to main process. Please include '%s' in the
|
||||
`instance_map` declared in your shared yaml configuration, or optionally(as a deprecated
|
||||
solution) in every worker's yaml as various `worker_replication_*` settings as defined
|
||||
in workers documentation here:
|
||||
`instance_map` declared in your shared yaml configuration as defined in configuration
|
||||
documentation here:
|
||||
`https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#instance_map`
|
||||
"""
|
||||
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE = """
|
||||
'%s' is no longer a supported worker setting, please place '%s' onto your shared
|
||||
configuration under `main` inside the `instance_map`. See workers documentation here:
|
||||
`https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration`
|
||||
"""
|
||||
|
||||
# This allows for a handy knob when it's time to change from 'master' to
|
||||
# something with less 'history'
|
||||
MAIN_PROCESS_INSTANCE_NAME = "master"
|
||||
@@ -88,7 +94,7 @@ class ConfigModel(BaseModel):
|
||||
allow_mutation = False
|
||||
|
||||
|
||||
class InstanceLocationConfig(ConfigModel):
|
||||
class InstanceTcpLocationConfig(ConfigModel):
|
||||
"""The host and port to talk to an instance via HTTP replication."""
|
||||
|
||||
host: StrictStr
|
||||
@@ -104,6 +110,23 @@ class InstanceLocationConfig(ConfigModel):
|
||||
return f"{self.host}:{self.port}"
|
||||
|
||||
|
||||
class InstanceUnixLocationConfig(ConfigModel):
|
||||
"""The socket file to talk to an instance via HTTP replication."""
|
||||
|
||||
path: StrictStr
|
||||
|
||||
def scheme(self) -> str:
|
||||
"""Hardcode a retrievable scheme"""
|
||||
return "unix"
|
||||
|
||||
def netloc(self) -> str:
|
||||
"""Nicely format the address location data"""
|
||||
return f"{self.path}"
|
||||
|
||||
|
||||
InstanceLocationConfig = Union[InstanceTcpLocationConfig, InstanceUnixLocationConfig]
|
||||
|
||||
|
||||
@attr.s
|
||||
class WriterLocations:
|
||||
"""Specifies the instances that write various streams.
|
||||
@@ -216,22 +239,37 @@ class WorkerConfig(Config):
|
||||
)
|
||||
|
||||
# A map from instance name to host/port of their HTTP replication endpoint.
|
||||
# Check if the main process is declared. Inject it into the map if it's not,
|
||||
# based first on if a 'main' block is declared then on 'worker_replication_*'
|
||||
# data. If both are available, default to instance_map. The main process
|
||||
# itself doesn't need this data as it would never have to talk to itself.
|
||||
# Check if the main process is declared. The main process itself doesn't need
|
||||
# this data as it would never have to talk to itself.
|
||||
instance_map: Dict[str, Any] = config.get("instance_map", {})
|
||||
|
||||
if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME:
|
||||
# TODO: The next 3 condition blocks can be deleted after some time has
|
||||
# passed and we're ready to stop checking for these settings.
|
||||
# The host used to connect to the main synapse
|
||||
main_host = config.get("worker_replication_host", None)
|
||||
if main_host:
|
||||
raise ConfigError(
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
|
||||
% ("worker_replication_host", main_host)
|
||||
)
|
||||
|
||||
# The port on the main synapse for HTTP replication endpoint
|
||||
main_port = config.get("worker_replication_http_port")
|
||||
if main_port:
|
||||
raise ConfigError(
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
|
||||
% ("worker_replication_http_port", main_port)
|
||||
)
|
||||
|
||||
# The tls mode on the main synapse for HTTP replication endpoint.
|
||||
# For backward compatibility this defaults to False.
|
||||
main_tls = config.get("worker_replication_http_tls", False)
|
||||
if main_tls:
|
||||
raise ConfigError(
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
|
||||
% ("worker_replication_http_tls", main_tls)
|
||||
)
|
||||
|
||||
# For now, accept 'main' in the instance_map, but the replication system
|
||||
# expects 'master', force that into being until it's changed later.
|
||||
@@ -241,30 +279,20 @@ class WorkerConfig(Config):
|
||||
]
|
||||
del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME]
|
||||
|
||||
# This is the backwards compatibility bit that handles the
|
||||
# worker_replication_* bits using setdefault() to not overwrite anything.
|
||||
elif main_host is not None and main_port is not None:
|
||||
instance_map.setdefault(
|
||||
MAIN_PROCESS_INSTANCE_NAME,
|
||||
{
|
||||
"host": main_host,
|
||||
"port": main_port,
|
||||
"tls": main_tls,
|
||||
},
|
||||
)
|
||||
|
||||
else:
|
||||
# If we've gotten here, it means that the main process is not on the
|
||||
# instance_map and that not enough worker_replication_* variables
|
||||
# were declared in the worker's yaml.
|
||||
# instance_map.
|
||||
raise ConfigError(
|
||||
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA
|
||||
% MAIN_PROCESS_INSTANCE_MAP_NAME
|
||||
)
|
||||
|
||||
# type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently
|
||||
self.instance_map: Dict[
|
||||
str, InstanceLocationConfig
|
||||
] = parse_and_validate_mapping(instance_map, InstanceLocationConfig)
|
||||
] = parse_and_validate_mapping(
|
||||
instance_map, InstanceLocationConfig # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
# Map from type of streams to source, c.f. WriterLocations.
|
||||
writers = config.get("stream_writers") or {}
|
||||
|
||||
@@ -277,7 +277,9 @@ class DirectoryHandler:
|
||||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to fetch alias")
|
||||
except CodeMessageException as e:
|
||||
logging.warning("Error retrieving alias")
|
||||
logging.warning(
|
||||
"Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg
|
||||
)
|
||||
if e.code == 404:
|
||||
fed_result = None
|
||||
else:
|
||||
|
||||
@@ -143,15 +143,10 @@ class RegistrationHandler:
|
||||
assigned_user_id: Optional[str] = None,
|
||||
inhibit_user_in_use_error: bool = False,
|
||||
) -> None:
|
||||
if types.contains_invalid_mxid_characters(
|
||||
localpart, self.hs.config.experimental.msc4009_e164_mxids
|
||||
):
|
||||
extra_chars = (
|
||||
"=_-./+" if self.hs.config.experimental.msc4009_e164_mxids else "=_-./"
|
||||
)
|
||||
if types.contains_invalid_mxid_characters(localpart):
|
||||
raise SynapseError(
|
||||
400,
|
||||
f"User ID can only contain characters a-z, 0-9, or '{extra_chars}'",
|
||||
"User ID can only contain characters a-z, 0-9, or '=_-./+'",
|
||||
Codes.INVALID_USERNAME,
|
||||
)
|
||||
|
||||
|
||||
@@ -27,9 +27,9 @@ from synapse.http.servlet import parse_string
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.types import (
|
||||
MXID_LOCALPART_ALLOWED_CHARACTERS,
|
||||
UserID,
|
||||
map_username_to_mxid_localpart,
|
||||
mxid_localpart_allowed_characters,
|
||||
)
|
||||
from synapse.util.iterutils import chunk_seq
|
||||
|
||||
@@ -371,7 +371,7 @@ class SamlHandler:
|
||||
|
||||
|
||||
DOT_REPLACE_PATTERN = re.compile(
|
||||
"[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),)
|
||||
"[^%s]" % (re.escape("".join(MXID_LOCALPART_ALLOWED_CHARACTERS)),)
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -225,8 +225,6 @@ class SsoHandler:
|
||||
|
||||
self._consent_at_registration = hs.config.consent.user_consent_at_registration
|
||||
|
||||
self._e164_mxids = hs.config.experimental.msc4009_e164_mxids
|
||||
|
||||
def register_identity_provider(self, p: SsoIdentityProvider) -> None:
|
||||
p_id = p.idp_id
|
||||
assert p_id not in self._identity_providers
|
||||
@@ -713,7 +711,7 @@ class SsoHandler:
|
||||
# Since the localpart is provided via a potentially untrusted module,
|
||||
# ensure the MXID is valid before registering.
|
||||
if not attributes.localpart or contains_invalid_mxid_characters(
|
||||
attributes.localpart, self._e164_mxids
|
||||
attributes.localpart
|
||||
):
|
||||
raise MappingException("localpart is invalid: %s" % (attributes.localpart,))
|
||||
|
||||
@@ -946,7 +944,7 @@ class SsoHandler:
|
||||
localpart,
|
||||
)
|
||||
|
||||
if contains_invalid_mxid_characters(localpart, self._e164_mxids):
|
||||
if contains_invalid_mxid_characters(localpart):
|
||||
raise SynapseError(400, "localpart is invalid: %s" % (localpart,))
|
||||
user_id = UserID(localpart, self._server_name).to_string()
|
||||
user_infos = await self._store.get_users_by_id_case_insensitive(user_id)
|
||||
|
||||
@@ -18,7 +18,11 @@ from typing import Dict, Optional
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.internet.endpoints import (
|
||||
HostnameEndpoint,
|
||||
UNIXClientEndpoint,
|
||||
wrapClientTLS,
|
||||
)
|
||||
from twisted.internet.interfaces import IStreamClientEndpoint
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.client import URI, HTTPConnectionPool, _AgentBase
|
||||
@@ -32,7 +36,11 @@ from twisted.web.iweb import (
|
||||
IResponse,
|
||||
)
|
||||
|
||||
from synapse.config.workers import InstanceLocationConfig
|
||||
from synapse.config.workers import (
|
||||
InstanceLocationConfig,
|
||||
InstanceTcpLocationConfig,
|
||||
InstanceUnixLocationConfig,
|
||||
)
|
||||
from synapse.types import ISynapseReactor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -40,7 +48,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
@implementer(IAgentEndpointFactory)
|
||||
class ReplicationEndpointFactory:
|
||||
"""Connect to a given TCP socket"""
|
||||
"""Connect to a given TCP or UNIX socket"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -64,24 +72,27 @@ class ReplicationEndpointFactory:
|
||||
# The given URI has a special scheme and includes the worker name. The
|
||||
# actual connection details are pulled from the instance map.
|
||||
worker_name = uri.netloc.decode("utf-8")
|
||||
scheme = self.instance_map[worker_name].scheme()
|
||||
location_config = self.instance_map[worker_name]
|
||||
scheme = location_config.scheme()
|
||||
|
||||
if scheme in ("http", "https"):
|
||||
if isinstance(location_config, InstanceTcpLocationConfig):
|
||||
endpoint = HostnameEndpoint(
|
||||
self.reactor,
|
||||
self.instance_map[worker_name].host,
|
||||
self.instance_map[worker_name].port,
|
||||
location_config.host,
|
||||
location_config.port,
|
||||
)
|
||||
if scheme == "https":
|
||||
endpoint = wrapClientTLS(
|
||||
# The 'port' argument below isn't actually used by the function
|
||||
self.context_factory.creatorForNetloc(
|
||||
self.instance_map[worker_name].host.encode("utf-8"),
|
||||
self.instance_map[worker_name].port,
|
||||
location_config.host.encode("utf-8"),
|
||||
location_config.port,
|
||||
),
|
||||
endpoint,
|
||||
)
|
||||
return endpoint
|
||||
elif isinstance(location_config, InstanceUnixLocationConfig):
|
||||
return UNIXClientEndpoint(self.reactor, location_config.path)
|
||||
else:
|
||||
raise SchemeNotSupported(f"Unsupported scheme: {scheme}")
|
||||
|
||||
@@ -138,13 +149,16 @@ class ReplicationAgent(_AgentBase):
|
||||
An existing connection from the connection pool may be used or a new
|
||||
one may be created.
|
||||
|
||||
Currently, HTTP and HTTPS schemes are supported in uri.
|
||||
Currently, HTTP, HTTPS and UNIX schemes are supported in uri.
|
||||
|
||||
This is copied from twisted.web.client.Agent, except:
|
||||
|
||||
* It uses a different pool key (combining the host & port).
|
||||
* It does not call _ensureValidURI(...) since it breaks on some
|
||||
UNIX paths.
|
||||
* It uses a different pool key (combining the scheme with either host & port or
|
||||
socket path).
|
||||
* It does not call _ensureValidURI(...) as the strictness of IDNA2008 is not
|
||||
required when using a worker's name as a 'hostname' for Synapse HTTP
|
||||
Replication machinery. Specifically, this allows a range of ascii characters
|
||||
such as '+' and '_' in hostnames/worker's names.
|
||||
|
||||
See: twisted.web.iweb.IAgent.request
|
||||
"""
|
||||
@@ -154,9 +168,12 @@ class ReplicationAgent(_AgentBase):
|
||||
except SchemeNotSupported:
|
||||
return defer.fail(Failure())
|
||||
|
||||
worker_name = parsedURI.netloc.decode("utf-8")
|
||||
key_scheme = self._endpointFactory.instance_map[worker_name].scheme()
|
||||
key_netloc = self._endpointFactory.instance_map[worker_name].netloc()
|
||||
# This sets the Pool key to be:
|
||||
# (http(s), <host:ip>)
|
||||
key = (parsedURI.scheme, parsedURI.netloc)
|
||||
# (http(s), <host:port>) or (unix, <socket_path>)
|
||||
key = (key_scheme, key_netloc)
|
||||
|
||||
# _requestWithEndpoint comes from _AgentBase class
|
||||
return self._requestWithEndpoint(
|
||||
|
||||
@@ -16,13 +16,17 @@
|
||||
import logging
|
||||
import threading
|
||||
import traceback
|
||||
from typing import Dict, Mapping, Set, Tuple
|
||||
from typing import TYPE_CHECKING, Dict, Mapping, Optional, Set, Tuple
|
||||
|
||||
from prometheus_client.core import Counter, Histogram
|
||||
|
||||
from synapse.logging.context import current_context
|
||||
from synapse.logging.opentracing import get_prometheus_exemplar
|
||||
from synapse.metrics import LaterGauge
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import opentracing
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -143,7 +147,12 @@ LaterGauge(
|
||||
|
||||
|
||||
class RequestMetrics:
|
||||
def start(self, time_sec: float, name: str, method: str) -> None:
|
||||
def start(
|
||||
self,
|
||||
time_sec: float,
|
||||
name: str,
|
||||
method: str,
|
||||
) -> None:
|
||||
self.start_ts = time_sec
|
||||
self.start_context = current_context()
|
||||
self.name = name
|
||||
@@ -162,7 +171,13 @@ class RequestMetrics:
|
||||
with _in_flight_requests_lock:
|
||||
_in_flight_requests.add(self)
|
||||
|
||||
def stop(self, time_sec: float, response_code: int, sent_bytes: int) -> None:
|
||||
def stop(
|
||||
self,
|
||||
time_sec: float,
|
||||
response_code: int,
|
||||
sent_bytes: int,
|
||||
span: Optional["opentracing.Span"],
|
||||
) -> None:
|
||||
with _in_flight_requests_lock:
|
||||
_in_flight_requests.discard(self)
|
||||
|
||||
@@ -193,7 +208,7 @@ class RequestMetrics:
|
||||
response_count.labels(self.method, self.name, tag).inc()
|
||||
|
||||
response_timer.labels(self.method, self.name, tag, response_code_str).observe(
|
||||
time_sec - self.start_ts
|
||||
time_sec - self.start_ts, exemplar=get_prometheus_exemplar(span)
|
||||
)
|
||||
|
||||
resource_usage = context.get_resource_usage()
|
||||
|
||||
@@ -910,7 +910,7 @@ def set_cors_headers(request: SynapseRequest) -> None:
|
||||
)
|
||||
request.setHeader(
|
||||
b"Access-Control-Expose-Headers",
|
||||
b"Synapse-Trace-Id",
|
||||
b"Synapse-Trace-Id, Server",
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -487,7 +487,9 @@ class SynapseRequest(Request):
|
||||
self._opentracing_span.finish()
|
||||
|
||||
try:
|
||||
self.request_metrics.stop(self.finish_time, self.code, self.sentLength)
|
||||
self.request_metrics.stop(
|
||||
self.finish_time, self.code, self.sentLength, self._opentracing_span
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to stop metrics: %r", e)
|
||||
|
||||
|
||||
@@ -672,6 +672,20 @@ def active_span() -> Optional["opentracing.Span"]:
|
||||
return opentracing.tracer.active_span
|
||||
|
||||
|
||||
def get_prometheus_exemplar(
|
||||
span: Optional["opentracing.Span"] = None,
|
||||
) -> Optional[Dict[str, str]]:
|
||||
if not span:
|
||||
span = active_span()
|
||||
|
||||
if not span:
|
||||
return None
|
||||
|
||||
trace_id = getattr(span, "trace_id", None)
|
||||
|
||||
return {"trace_id": f"{trace_id:x}"}
|
||||
|
||||
|
||||
@ensure_active_span("set a tag")
|
||||
def set_tag(key: str, value: Union[str, bool, int, float]) -> None:
|
||||
"""Sets a tag on the active span"""
|
||||
@@ -1070,7 +1084,7 @@ def trace_servlet(
|
||||
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
||||
tags.HTTP_METHOD: request.get_method(),
|
||||
tags.HTTP_URL: request.get_redacted_uri(),
|
||||
tags.PEER_HOST_IPV6: request.getClientAddress().host,
|
||||
tags.PEER_HOST_IPV6: request.get_client_ip_if_available(),
|
||||
}
|
||||
|
||||
request_name = request.request_metrics.name
|
||||
@@ -1091,9 +1105,11 @@ def trace_servlet(
|
||||
# with JsonResource).
|
||||
scope.span.set_operation_name(request.request_metrics.name)
|
||||
|
||||
# Mypy seems to think that start_context.tag below can be Optional[str], but
|
||||
# that doesn't appear to be correct and works in practice.
|
||||
request_tags[
|
||||
SynapseTags.REQUEST_TAG
|
||||
] = request.request_metrics.start_context.tag
|
||||
] = request.request_metrics.start_context.tag # type: ignore[assignment]
|
||||
|
||||
# set the tags *after* the servlet completes, in case it decided to
|
||||
# prioritise the span (tags will get dropped on unprioritised spans)
|
||||
|
||||
@@ -35,6 +35,7 @@ from synapse.api.errors import (
|
||||
from synapse.config.repository import ThumbnailRequirement
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import defer_to_thread
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.media._base import (
|
||||
FileInfo,
|
||||
Responder,
|
||||
@@ -174,6 +175,7 @@ class MediaRepository:
|
||||
else:
|
||||
self.recently_accessed_locals.add(media_id)
|
||||
|
||||
@trace
|
||||
async def create_content(
|
||||
self,
|
||||
media_type: str,
|
||||
@@ -710,6 +712,7 @@ class MediaRepository:
|
||||
# Could not generate thumbnail.
|
||||
return None
|
||||
|
||||
@trace
|
||||
async def _generate_thumbnails(
|
||||
self,
|
||||
server_name: Optional[str],
|
||||
|
||||
@@ -38,6 +38,7 @@ from twisted.protocols.basic import FileSender
|
||||
|
||||
from synapse.api.errors import NotFoundError
|
||||
from synapse.logging.context import defer_to_thread, make_deferred_yieldable
|
||||
from synapse.logging.opentracing import start_active_span, trace, trace_with_opname
|
||||
from synapse.util import Clock
|
||||
from synapse.util.file_consumer import BackgroundFileConsumer
|
||||
|
||||
@@ -76,6 +77,7 @@ class MediaStorage:
|
||||
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@trace_with_opname("MediaStorage.store_file")
|
||||
async def store_file(self, source: IO, file_info: FileInfo) -> str:
|
||||
"""Write `source` to the on disk media store, and also any other
|
||||
configured storage providers
|
||||
@@ -89,16 +91,19 @@ class MediaStorage:
|
||||
"""
|
||||
|
||||
with self.store_into_file(file_info) as (f, fname, finish_cb):
|
||||
# Write to the main repository
|
||||
# Write to the main media repository
|
||||
await self.write_to_file(source, f)
|
||||
# Write to the other storage providers
|
||||
await finish_cb()
|
||||
|
||||
return fname
|
||||
|
||||
@trace_with_opname("MediaStorage.write_to_file")
|
||||
async def write_to_file(self, source: IO, output: IO) -> None:
|
||||
"""Asynchronously write the `source` to `output`."""
|
||||
await defer_to_thread(self.reactor, _write_file_synchronously, source, output)
|
||||
|
||||
@trace_with_opname("MediaStorage.store_into_file")
|
||||
@contextlib.contextmanager
|
||||
def store_into_file(
|
||||
self, file_info: FileInfo
|
||||
@@ -113,9 +118,9 @@ class MediaStorage:
|
||||
fname can be used to read the contents from after upload, e.g. to
|
||||
generate thumbnails.
|
||||
|
||||
finish_cb must be called and waited on after the file has been
|
||||
successfully been written to. Should not be called if there was an
|
||||
error.
|
||||
finish_cb must be called and waited on after the file has been successfully been
|
||||
written to. Should not be called if there was an error. Checks for spam and
|
||||
stores the file into the configured storage providers.
|
||||
|
||||
Args:
|
||||
file_info: Info about the file to store
|
||||
@@ -135,35 +140,48 @@ class MediaStorage:
|
||||
|
||||
finished_called = [False]
|
||||
|
||||
main_media_repo_write_trace_scope = start_active_span(
|
||||
"writing to main media repo"
|
||||
)
|
||||
main_media_repo_write_trace_scope.__enter__()
|
||||
|
||||
try:
|
||||
with open(fname, "wb") as f:
|
||||
|
||||
async def finish() -> None:
|
||||
# Ensure that all writes have been flushed and close the
|
||||
# file.
|
||||
f.flush()
|
||||
f.close()
|
||||
# When someone calls finish, we assume they are done writing to the main media repo
|
||||
main_media_repo_write_trace_scope.__exit__(None, None, None)
|
||||
|
||||
spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam(
|
||||
ReadableFileWrapper(self.clock, fname), file_info
|
||||
)
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
logger.info("Blocking media due to spam checker")
|
||||
# Note that we'll delete the stored media, due to the
|
||||
# try/except below. The media also won't be stored in
|
||||
# the DB.
|
||||
# We currently ignore any additional field returned by
|
||||
# the spam-check API.
|
||||
raise SpamMediaException(errcode=spam_check[0])
|
||||
with start_active_span("writing to other storage providers"):
|
||||
# Ensure that all writes have been flushed and close the
|
||||
# file.
|
||||
f.flush()
|
||||
f.close()
|
||||
|
||||
for provider in self.storage_providers:
|
||||
await provider.store_file(path, file_info)
|
||||
spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam(
|
||||
ReadableFileWrapper(self.clock, fname), file_info
|
||||
)
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
logger.info("Blocking media due to spam checker")
|
||||
# Note that we'll delete the stored media, due to the
|
||||
# try/except below. The media also won't be stored in
|
||||
# the DB.
|
||||
# We currently ignore any additional field returned by
|
||||
# the spam-check API.
|
||||
raise SpamMediaException(errcode=spam_check[0])
|
||||
|
||||
finished_called[0] = True
|
||||
for provider in self.storage_providers:
|
||||
with start_active_span(str(provider)):
|
||||
await provider.store_file(path, file_info)
|
||||
|
||||
finished_called[0] = True
|
||||
|
||||
yield f, fname, finish
|
||||
except Exception as e:
|
||||
try:
|
||||
main_media_repo_write_trace_scope.__exit__(
|
||||
type(e), None, e.__traceback__
|
||||
)
|
||||
os.remove(fname)
|
||||
except Exception:
|
||||
pass
|
||||
@@ -171,7 +189,11 @@ class MediaStorage:
|
||||
raise e from None
|
||||
|
||||
if not finished_called:
|
||||
raise Exception("Finished callback not called")
|
||||
exc = Exception("Finished callback not called")
|
||||
main_media_repo_write_trace_scope.__exit__(
|
||||
type(exc), None, exc.__traceback__
|
||||
)
|
||||
raise exc
|
||||
|
||||
async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
|
||||
"""Attempts to fetch media described by file_info from the local cache
|
||||
@@ -214,6 +236,7 @@ class MediaStorage:
|
||||
|
||||
return None
|
||||
|
||||
@trace
|
||||
async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str:
|
||||
"""Ensures that the given file is in the local cache. Attempts to
|
||||
download it from storage providers if it isn't.
|
||||
@@ -259,6 +282,7 @@ class MediaStorage:
|
||||
|
||||
raise NotFoundError()
|
||||
|
||||
@trace
|
||||
def _file_info_to_path(self, file_info: FileInfo) -> str:
|
||||
"""Converts file_info into a relative path.
|
||||
|
||||
@@ -301,6 +325,7 @@ class MediaStorage:
|
||||
return self.filepaths.local_media_filepath_rel(file_info.file_id)
|
||||
|
||||
|
||||
@trace
|
||||
def _write_file_synchronously(source: IO, dest: IO) -> None:
|
||||
"""Write `source` to the file like `dest` synchronously. Should be called
|
||||
from a thread.
|
||||
|
||||
@@ -20,6 +20,7 @@ from typing import TYPE_CHECKING, Callable, Optional
|
||||
|
||||
from synapse.config._base import Config
|
||||
from synapse.logging.context import defer_to_thread, run_in_background
|
||||
from synapse.logging.opentracing import start_active_span, trace_with_opname
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
from ._base import FileInfo, Responder
|
||||
@@ -86,6 +87,7 @@ class StorageProviderWrapper(StorageProvider):
|
||||
def __str__(self) -> str:
|
||||
return "StorageProviderWrapper[%s]" % (self.backend,)
|
||||
|
||||
@trace_with_opname("StorageProviderWrapper.store_file")
|
||||
async def store_file(self, path: str, file_info: FileInfo) -> None:
|
||||
if not file_info.server_name and not self.store_local:
|
||||
return None
|
||||
@@ -114,6 +116,7 @@ class StorageProviderWrapper(StorageProvider):
|
||||
|
||||
run_in_background(store)
|
||||
|
||||
@trace_with_opname("StorageProviderWrapper.fetch")
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
if file_info.url_cache:
|
||||
# Files in the URL preview cache definitely aren't stored here,
|
||||
@@ -141,6 +144,7 @@ class FileStorageProviderBackend(StorageProvider):
|
||||
def __str__(self) -> str:
|
||||
return "FileStorageProviderBackend[%s]" % (self.base_directory,)
|
||||
|
||||
@trace_with_opname("FileStorageProviderBackend.store_file")
|
||||
async def store_file(self, path: str, file_info: FileInfo) -> None:
|
||||
"""See StorageProvider.store_file"""
|
||||
|
||||
@@ -152,13 +156,15 @@ class FileStorageProviderBackend(StorageProvider):
|
||||
|
||||
# mypy needs help inferring the type of the second parameter, which is generic
|
||||
shutil_copyfile: Callable[[str, str], str] = shutil.copyfile
|
||||
await defer_to_thread(
|
||||
self.hs.get_reactor(),
|
||||
shutil_copyfile,
|
||||
primary_fname,
|
||||
backup_fname,
|
||||
)
|
||||
with start_active_span("shutil_copyfile"):
|
||||
await defer_to_thread(
|
||||
self.hs.get_reactor(),
|
||||
shutil_copyfile,
|
||||
primary_fname,
|
||||
backup_fname,
|
||||
)
|
||||
|
||||
@trace_with_opname("FileStorageProviderBackend.fetch")
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
"""See StorageProvider.fetch"""
|
||||
|
||||
|
||||
@@ -19,6 +19,8 @@ from typing import Optional, Tuple, Type
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from synapse.logging.opentracing import trace
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
EXIF_ORIENTATION_TAG = 0x0112
|
||||
@@ -82,6 +84,7 @@ class Thumbnailer:
|
||||
# A lot of parsing errors can happen when parsing EXIF
|
||||
logger.info("Error parsing image EXIF information: %s", e)
|
||||
|
||||
@trace
|
||||
def transpose(self) -> Tuple[int, int]:
|
||||
"""Transpose the image using its EXIF Orientation tag
|
||||
|
||||
@@ -131,8 +134,9 @@ class Thumbnailer:
|
||||
else:
|
||||
with self.image:
|
||||
self.image = self.image.convert("RGB")
|
||||
return self.image.resize((width, height), Image.ANTIALIAS)
|
||||
return self.image.resize((width, height), Image.LANCZOS)
|
||||
|
||||
@trace
|
||||
def scale(self, width: int, height: int, output_type: str) -> BytesIO:
|
||||
"""Rescales the image to the given dimensions.
|
||||
|
||||
@@ -142,6 +146,7 @@ class Thumbnailer:
|
||||
with self._resize(width, height) as scaled:
|
||||
return self._encode_image(scaled, output_type)
|
||||
|
||||
@trace
|
||||
def crop(self, width: int, height: int, output_type: str) -> BytesIO:
|
||||
"""Rescales and crops the image to the given dimensions preserving
|
||||
aspect::
|
||||
|
||||
@@ -13,12 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from prometheus_client import REGISTRY, CollectorRegistry, generate_latest
|
||||
from prometheus_client import REGISTRY, CollectorRegistry
|
||||
from prometheus_client.openmetrics.exposition import generate_latest
|
||||
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.server import Request
|
||||
|
||||
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
|
||||
CONTENT_TYPE_LATEST = "application/openmetrics-text; version=1.0.0; charset=utf-8"
|
||||
|
||||
|
||||
class MetricsResource(Resource):
|
||||
|
||||
@@ -788,6 +788,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
|
||||
return RegistrationBehaviour.ALLOW
|
||||
|
||||
@trace
|
||||
async def check_media_file_for_spam(
|
||||
self, file_wrapper: ReadableFileWrapper, file_info: FileInfo
|
||||
) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
from typing import Dict
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.events import EventBase
|
||||
from synapse.push.presentable_names import calculate_room_name, name_from_member_event
|
||||
from synapse.storage.controllers import StorageControllers
|
||||
@@ -49,7 +50,41 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
|
||||
async def get_context_for_event(
|
||||
storage: StorageControllers, ev: EventBase, user_id: str
|
||||
) -> Dict[str, str]:
|
||||
ctx = {}
|
||||
ctx: Dict[str, str] = {}
|
||||
|
||||
if ev.internal_metadata.outlier:
|
||||
# We don't have state for outliers, so we can't compute the context
|
||||
# except for invites and knocks. (Such events are known as 'out-of-band
|
||||
# memberships' for the user).
|
||||
if ev.type != EventTypes.Member:
|
||||
return ctx
|
||||
|
||||
# We might be able to pull out the display name for the sender straight
|
||||
# from the membership event
|
||||
event_display_name = ev.content.get("displayname")
|
||||
if event_display_name and ev.state_key == ev.sender:
|
||||
ctx["sender_display_name"] = event_display_name
|
||||
|
||||
room_state = []
|
||||
if ev.content.get("membership") == Membership.INVITE:
|
||||
room_state = ev.unsigned.get("invite_room_state", [])
|
||||
elif ev.content.get("membership") == Membership.KNOCK:
|
||||
room_state = ev.unsigned.get("knock_room_state", [])
|
||||
|
||||
# Ideally we'd reuse the logic in `calculate_room_name`, but that gets
|
||||
# complicated to handle partial events vs pulling events from the DB.
|
||||
for state_dict in room_state:
|
||||
type_tuple = (state_dict["type"], state_dict.get("state_key"))
|
||||
if type_tuple == (EventTypes.Member, ev.sender):
|
||||
display_name = state_dict["content"].get("displayname")
|
||||
if display_name:
|
||||
ctx["sender_display_name"] = display_name
|
||||
elif type_tuple == (EventTypes.Name, ""):
|
||||
room_name = state_dict["content"].get("name")
|
||||
if room_name:
|
||||
ctx["name"] = room_name
|
||||
|
||||
return ctx
|
||||
|
||||
room_state_ids = await storage.state.get_state_ids_for_event(ev.event_id)
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ from synapse.http.servlet import (
|
||||
parse_integer,
|
||||
parse_json_object_from_request,
|
||||
parse_string,
|
||||
parse_strings_from_args,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.rest.admin._base import (
|
||||
@@ -64,6 +65,9 @@ class UsersRestServletV2(RestServlet):
|
||||
The parameter `guests` can be used to exclude guest users.
|
||||
The parameter `deactivated` can be used to include deactivated users.
|
||||
The parameter `order_by` can be used to order the result.
|
||||
The parameter `not_user_type` can be used to exclude certain user types.
|
||||
Possible values are `bot`, `support` or "empty string".
|
||||
"empty string" here means to exclude users without a type.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@@ -131,6 +135,10 @@ class UsersRestServletV2(RestServlet):
|
||||
|
||||
direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
|
||||
|
||||
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
|
||||
args: Dict[bytes, List[bytes]] = request.args # type: ignore
|
||||
not_user_types = parse_strings_from_args(args, "not_user_type")
|
||||
|
||||
users, total = await self.store.get_users_paginate(
|
||||
start,
|
||||
limit,
|
||||
@@ -141,6 +149,7 @@ class UsersRestServletV2(RestServlet):
|
||||
order_by,
|
||||
direction,
|
||||
approved,
|
||||
not_user_types,
|
||||
)
|
||||
|
||||
# If support for MSC3866 is not enabled, don't show the approval flag.
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
</div>
|
||||
<h1>It works! Synapse is running</h1>
|
||||
<p>Your Synapse server is listening on this port and is ready for messages.</p>
|
||||
<p>To use this server you'll need <a href="https://matrix.org/docs/projects/try-matrix-now.html#clients" target="_blank" rel="noopener noreferrer">a Matrix client</a>.
|
||||
<p>To use this server you'll need <a href="https://matrix.org/ecosystem/clients/" target="_blank" rel="noopener noreferrer">a Matrix client</a>.
|
||||
</p>
|
||||
<p>Welcome to the Matrix universe :)</p>
|
||||
<hr>
|
||||
|
||||
@@ -11,8 +11,9 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import abc
|
||||
import logging
|
||||
from enum import IntEnum
|
||||
from enum import Enum, IntEnum
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -24,12 +25,16 @@ from typing import (
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
)
|
||||
|
||||
import attr
|
||||
from pydantic import BaseModel
|
||||
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.types import Connection, Cursor
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import Clock, json_encoder
|
||||
@@ -48,6 +53,83 @@ DEFAULT_BATCH_SIZE_CALLBACK = Callable[[str, str], Awaitable[int]]
|
||||
MIN_BATCH_SIZE_CALLBACK = Callable[[str, str], Awaitable[int]]
|
||||
|
||||
|
||||
class Constraint(metaclass=abc.ABCMeta):
|
||||
"""Base class representing different constraints.
|
||||
|
||||
Used by `register_background_validate_constraint_and_delete_rows`.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def make_check_clause(self, table: str) -> str:
|
||||
"""Returns an SQL expression that checks the row passes the constraint."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def make_constraint_clause_postgres(self) -> str:
|
||||
"""Returns an SQL clause for creating the constraint.
|
||||
|
||||
Only used on Postgres DBs
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class ForeignKeyConstraint(Constraint):
|
||||
"""A foreign key constraint.
|
||||
|
||||
Attributes:
|
||||
referenced_table: The "parent" table name.
|
||||
columns: The list of mappings of columns from table to referenced table
|
||||
deferred: Whether to defer checking of the constraint to the end of the
|
||||
transaction. This is useful for e.g. backwards compatibility where
|
||||
an older version inserted data in the wrong order.
|
||||
"""
|
||||
|
||||
referenced_table: str
|
||||
columns: Sequence[Tuple[str, str]]
|
||||
deferred: bool
|
||||
|
||||
def make_check_clause(self, table: str) -> str:
|
||||
join_clause = " AND ".join(
|
||||
f"{col1} = {table}.{col2}" for col1, col2 in self.columns
|
||||
)
|
||||
return f"EXISTS (SELECT 1 FROM {self.referenced_table} WHERE {join_clause})"
|
||||
|
||||
def make_constraint_clause_postgres(self) -> str:
|
||||
column1_list = ", ".join(col1 for col1, col2 in self.columns)
|
||||
column2_list = ", ".join(col2 for col1, col2 in self.columns)
|
||||
defer_clause = " DEFERRABLE INITIALLY DEFERRED" if self.deferred else ""
|
||||
return f"FOREIGN KEY ({column1_list}) REFERENCES {self.referenced_table} ({column2_list}) {defer_clause}"
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class NotNullConstraint(Constraint):
|
||||
"""A NOT NULL column constraint"""
|
||||
|
||||
column: str
|
||||
|
||||
def make_check_clause(self, table: str) -> str:
|
||||
return f"{self.column} IS NOT NULL"
|
||||
|
||||
def make_constraint_clause_postgres(self) -> str:
|
||||
return f"CHECK ({self.column} IS NOT NULL)"
|
||||
|
||||
|
||||
class ValidateConstraintProgress(BaseModel):
|
||||
"""The format of the progress JSON for validate constraint background
|
||||
updates.
|
||||
|
||||
Used by `register_background_validate_constraint_and_delete_rows`.
|
||||
"""
|
||||
|
||||
class State(str, Enum):
|
||||
check = "check"
|
||||
validate = "validate"
|
||||
|
||||
state: State = State.validate
|
||||
lower_bound: Sequence[Any] = ()
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class _BackgroundUpdateHandler:
|
||||
"""A handler for a given background update.
|
||||
@@ -740,6 +822,179 @@ class BackgroundUpdater:
|
||||
logger.info("Adding index %s to %s", index_name, table)
|
||||
await self.db_pool.runWithConnection(runner)
|
||||
|
||||
def register_background_validate_constraint_and_delete_rows(
|
||||
self,
|
||||
update_name: str,
|
||||
table: str,
|
||||
constraint_name: str,
|
||||
constraint: Constraint,
|
||||
unique_columns: Sequence[str],
|
||||
) -> None:
|
||||
"""Helper for store classes to do a background validate constraint, and
|
||||
delete rows that do not pass the constraint check.
|
||||
|
||||
Note: This deletes rows that don't match the constraint. This may not be
|
||||
appropriate in all situations, and so the suitability of using this
|
||||
method should be considered on a case-by-case basis.
|
||||
|
||||
This only applies on PostgreSQL.
|
||||
|
||||
For SQLite the table gets recreated as part of the schema delta and the
|
||||
data is copied over synchronously (or whatever the correct way to
|
||||
describe it as).
|
||||
|
||||
Args:
|
||||
update_name: The name of the background update.
|
||||
table: The table with the invalid constraint.
|
||||
constraint_name: The name of the constraint
|
||||
constraint: A `Constraint` object matching the type of constraint.
|
||||
unique_columns: A sequence of columns that form a unique constraint
|
||||
on the table. Used to iterate over the table.
|
||||
"""
|
||||
|
||||
assert isinstance(
|
||||
self.db_pool.engine, engines.PostgresEngine
|
||||
), "validate constraint background update registered for non-Postres database"
|
||||
|
||||
async def updater(progress: JsonDict, batch_size: int) -> int:
|
||||
return await self.validate_constraint_and_delete_in_background(
|
||||
update_name=update_name,
|
||||
table=table,
|
||||
constraint_name=constraint_name,
|
||||
constraint=constraint,
|
||||
unique_columns=unique_columns,
|
||||
progress=progress,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
|
||||
self._background_update_handlers[update_name] = _BackgroundUpdateHandler(
|
||||
updater, oneshot=True
|
||||
)
|
||||
|
||||
async def validate_constraint_and_delete_in_background(
|
||||
self,
|
||||
update_name: str,
|
||||
table: str,
|
||||
constraint_name: str,
|
||||
constraint: Constraint,
|
||||
unique_columns: Sequence[str],
|
||||
progress: JsonDict,
|
||||
batch_size: int,
|
||||
) -> int:
|
||||
"""Validates a table constraint that has been marked as `NOT VALID`,
|
||||
deleting rows that don't pass the constraint check.
|
||||
|
||||
This will delete rows that do not meet the validation check.
|
||||
|
||||
update_name: str,
|
||||
table: str,
|
||||
constraint_name: str,
|
||||
constraint: Constraint,
|
||||
unique_columns: Sequence[str],
|
||||
"""
|
||||
|
||||
# We validate the constraint by:
|
||||
# 1. Trying to validate the constraint as is. If this succeeds then
|
||||
# we're done.
|
||||
# 2. Otherwise, we manually scan the table to remove rows that don't
|
||||
# match the constraint.
|
||||
# 3. We try re-validating the constraint.
|
||||
|
||||
parsed_progress = ValidateConstraintProgress.parse_obj(progress)
|
||||
|
||||
if parsed_progress.state == ValidateConstraintProgress.State.check:
|
||||
return_columns = ", ".join(unique_columns)
|
||||
order_columns = ", ".join(unique_columns)
|
||||
|
||||
where_clause = ""
|
||||
args: List[Any] = []
|
||||
if parsed_progress.lower_bound:
|
||||
where_clause = f"""WHERE ({order_columns}) > ({", ".join("?" for _ in unique_columns)})"""
|
||||
args.extend(parsed_progress.lower_bound)
|
||||
|
||||
args.append(batch_size)
|
||||
|
||||
sql = f"""
|
||||
SELECT
|
||||
{return_columns},
|
||||
{constraint.make_check_clause(table)} AS check
|
||||
FROM {table}
|
||||
{where_clause}
|
||||
ORDER BY {order_columns}
|
||||
LIMIT ?
|
||||
"""
|
||||
|
||||
def validate_constraint_in_background_check(
|
||||
txn: "LoggingTransaction",
|
||||
) -> None:
|
||||
txn.execute(sql, args)
|
||||
rows = txn.fetchall()
|
||||
|
||||
new_progress = parsed_progress.copy()
|
||||
|
||||
if not rows:
|
||||
new_progress.state = ValidateConstraintProgress.State.validate
|
||||
self._background_update_progress_txn(
|
||||
txn, update_name, new_progress.dict()
|
||||
)
|
||||
return
|
||||
|
||||
new_progress.lower_bound = rows[-1][:-1]
|
||||
|
||||
to_delete = [row[:-1] for row in rows if not row[-1]]
|
||||
|
||||
if to_delete:
|
||||
logger.warning(
|
||||
"Deleting %d rows that do not pass new constraint",
|
||||
len(to_delete),
|
||||
)
|
||||
|
||||
self.db_pool.simple_delete_many_batch_txn(
|
||||
txn, table=table, keys=unique_columns, values=to_delete
|
||||
)
|
||||
|
||||
self._background_update_progress_txn(
|
||||
txn, update_name, new_progress.dict()
|
||||
)
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"validate_constraint_in_background_check",
|
||||
validate_constraint_in_background_check,
|
||||
)
|
||||
|
||||
return batch_size
|
||||
|
||||
elif parsed_progress.state == ValidateConstraintProgress.State.validate:
|
||||
sql = f"ALTER TABLE {table} VALIDATE CONSTRAINT {constraint_name}"
|
||||
|
||||
def validate_constraint_in_background_validate(
|
||||
txn: "LoggingTransaction",
|
||||
) -> None:
|
||||
txn.execute(sql)
|
||||
|
||||
try:
|
||||
await self.db_pool.runInteraction(
|
||||
"validate_constraint_in_background_validate",
|
||||
validate_constraint_in_background_validate,
|
||||
)
|
||||
|
||||
await self._end_background_update(update_name)
|
||||
except self.db_pool.engine.module.IntegrityError as e:
|
||||
# If we get an integrity error here, then we go back and recheck the table.
|
||||
logger.warning("Integrity error when validating constraint: %s", e)
|
||||
await self._background_update_progress(
|
||||
update_name,
|
||||
ValidateConstraintProgress(
|
||||
state=ValidateConstraintProgress.State.check
|
||||
).dict(),
|
||||
)
|
||||
|
||||
return batch_size
|
||||
else:
|
||||
raise Exception(
|
||||
f"Unrecognized state '{parsed_progress.state}' when trying to validate_constraint_and_delete_in_background"
|
||||
)
|
||||
|
||||
async def _end_background_update(self, update_name: str) -> None:
|
||||
"""Removes a completed background update task from the queue.
|
||||
|
||||
@@ -795,3 +1050,86 @@ class BackgroundUpdater:
|
||||
keyvalues={"update_name": update_name},
|
||||
updatevalues={"progress_json": progress_json},
|
||||
)
|
||||
|
||||
|
||||
def run_validate_constraint_and_delete_rows_schema_delta(
|
||||
txn: "LoggingTransaction",
|
||||
ordering: int,
|
||||
update_name: str,
|
||||
table: str,
|
||||
constraint_name: str,
|
||||
constraint: Constraint,
|
||||
sqlite_table_name: str,
|
||||
sqlite_table_schema: str,
|
||||
) -> None:
|
||||
"""Runs a schema delta to add a constraint to the table. This should be run
|
||||
in a schema delta file.
|
||||
|
||||
For PostgreSQL the constraint is added and validated in the background.
|
||||
|
||||
For SQLite the table is recreated and data copied across immediately. This
|
||||
is done by the caller passing in a script to create the new table. Note that
|
||||
table indexes and triggers are copied over automatically.
|
||||
|
||||
There must be a corresponding call to
|
||||
`register_background_validate_constraint_and_delete_rows` to register the
|
||||
background update in one of the data store classes.
|
||||
|
||||
Attributes:
|
||||
txn ordering, update_name: For adding a row to background_updates table.
|
||||
table: The table to add constraint to. constraint_name: The name of the
|
||||
new constraint constraint: A `Constraint` object describing the
|
||||
constraint sqlite_table_name: For SQLite the name of the empty copy of
|
||||
table sqlite_table_schema: A SQL script for creating the above table.
|
||||
"""
|
||||
|
||||
if isinstance(txn.database_engine, PostgresEngine):
|
||||
# For postgres we can just add the constraint and mark it as NOT VALID,
|
||||
# and then insert a background update to go and check the validity in
|
||||
# the background.
|
||||
txn.execute(
|
||||
f"""
|
||||
ALTER TABLE {table}
|
||||
ADD CONSTRAINT {constraint_name} {constraint.make_constraint_clause_postgres()}
|
||||
NOT VALID
|
||||
"""
|
||||
)
|
||||
|
||||
txn.execute(
|
||||
"INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (?, ?, '{}')",
|
||||
(ordering, update_name),
|
||||
)
|
||||
else:
|
||||
# For SQLite, we:
|
||||
# 1. fetch all indexes/triggers/etc related to the table
|
||||
# 2. create an empty copy of the table
|
||||
# 3. copy across the rows (that satisfy the check)
|
||||
# 4. replace the old table with the new able.
|
||||
# 5. add back all the indexes/triggers/etc
|
||||
|
||||
# Fetch the indexes/triggers/etc. Note that `sql` column being null is
|
||||
# due to indexes being auto created based on the class definition (e.g.
|
||||
# PRIMARY KEY), and so don't need to be recreated.
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT sql FROM sqlite_master
|
||||
WHERE tbl_name = ? AND type != 'table' AND sql IS NOT NULL
|
||||
""",
|
||||
(table,),
|
||||
)
|
||||
extras = [row[0] for row in txn]
|
||||
|
||||
txn.execute(sqlite_table_schema)
|
||||
|
||||
sql = f"""
|
||||
INSERT INTO {sqlite_table_name} SELECT * FROM {table}
|
||||
WHERE {constraint.make_check_clause(table)}
|
||||
"""
|
||||
|
||||
txn.execute(sql)
|
||||
|
||||
txn.execute(f"DROP TABLE {table}")
|
||||
txn.execute(f"ALTER TABLE {sqlite_table_name} RENAME TO {table}")
|
||||
|
||||
for extra in extras:
|
||||
txn.execute(extra)
|
||||
|
||||
@@ -98,6 +98,8 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
|
||||
"event_push_summary": "event_push_summary_unique_index2",
|
||||
"receipts_linearized": "receipts_linearized_unique_index",
|
||||
"receipts_graph": "receipts_graph_unique_index",
|
||||
"profiles": "profiles_full_user_id_key_idx",
|
||||
"user_filters": "full_users_filters_unique_idx",
|
||||
}
|
||||
|
||||
|
||||
@@ -2313,6 +2315,43 @@ class DatabasePool:
|
||||
|
||||
return txn.rowcount
|
||||
|
||||
@staticmethod
|
||||
def simple_delete_many_batch_txn(
|
||||
txn: LoggingTransaction,
|
||||
table: str,
|
||||
keys: Collection[str],
|
||||
values: Iterable[Iterable[Any]],
|
||||
) -> None:
|
||||
"""Executes a DELETE query on the named table.
|
||||
|
||||
The input is given as a list of rows, where each row is a list of values.
|
||||
(Actually any iterable is fine.)
|
||||
|
||||
Args:
|
||||
txn: The transaction to use.
|
||||
table: string giving the table name
|
||||
keys: list of column names
|
||||
values: for each row, a list of values in the same order as `keys`
|
||||
"""
|
||||
|
||||
if isinstance(txn.database_engine, PostgresEngine):
|
||||
# We use `execute_values` as it can be a lot faster than `execute_batch`,
|
||||
# but it's only available on postgres.
|
||||
sql = "DELETE FROM %s WHERE (%s) IN (VALUES ?)" % (
|
||||
table,
|
||||
", ".join(k for k in keys),
|
||||
)
|
||||
|
||||
txn.execute_values(sql, values, fetch=False)
|
||||
else:
|
||||
sql = "DELETE FROM %s WHERE (%s) = (%s)" % (
|
||||
table,
|
||||
", ".join(k for k in keys),
|
||||
", ".join("?" for _ in keys),
|
||||
)
|
||||
|
||||
txn.execute_batch(sql, values)
|
||||
|
||||
def get_cache_dict(
|
||||
self,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
|
||||
@@ -15,10 +15,11 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, cast
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast
|
||||
|
||||
from synapse.api.constants import Direction
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.storage._base import make_in_list_sql_clause
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
@@ -170,6 +171,7 @@ class DataStore(
|
||||
order_by: str = UserSortOrder.NAME.value,
|
||||
direction: Direction = Direction.FORWARDS,
|
||||
approved: bool = True,
|
||||
not_user_types: Optional[List[str]] = None,
|
||||
) -> Tuple[List[JsonDict], int]:
|
||||
"""Function to retrieve a paginated list of users from
|
||||
users list. This will return a json list of users and the
|
||||
@@ -185,6 +187,7 @@ class DataStore(
|
||||
order_by: the sort order of the returned list
|
||||
direction: sort ascending or descending
|
||||
approved: whether to include approved users
|
||||
not_user_types: list of user types to exclude
|
||||
Returns:
|
||||
A tuple of a list of mappings from user to information and a count of total users.
|
||||
"""
|
||||
@@ -193,7 +196,7 @@ class DataStore(
|
||||
txn: LoggingTransaction,
|
||||
) -> Tuple[List[JsonDict], int]:
|
||||
filters = []
|
||||
args = [self.hs.config.server.server_name]
|
||||
args: List[Union[str, int]] = []
|
||||
|
||||
# Set ordering
|
||||
order_by_column = UserSortOrder(order_by).value
|
||||
@@ -222,11 +225,45 @@ class DataStore(
|
||||
# be already existing users that we consider as already approved.
|
||||
filters.append("approved IS FALSE")
|
||||
|
||||
if not_user_types:
|
||||
if len(not_user_types) == 1 and not_user_types[0] == "":
|
||||
# Only exclude NULL type users
|
||||
filters.append("user_type IS NOT NULL")
|
||||
else:
|
||||
not_user_types_has_empty = False
|
||||
not_user_types_without_empty = []
|
||||
|
||||
for not_user_type in not_user_types:
|
||||
if not_user_type == "":
|
||||
not_user_types_has_empty = True
|
||||
else:
|
||||
not_user_types_without_empty.append(not_user_type)
|
||||
|
||||
not_user_type_clause, not_user_type_args = make_in_list_sql_clause(
|
||||
self.database_engine,
|
||||
"u.user_type",
|
||||
not_user_types_without_empty,
|
||||
)
|
||||
|
||||
if not_user_types_has_empty:
|
||||
# NULL values should be excluded.
|
||||
# They evaluate to false > nothing to do here.
|
||||
filters.append("NOT %s" % (not_user_type_clause))
|
||||
else:
|
||||
# NULL values should *not* be excluded.
|
||||
# Add a special predicate to the query.
|
||||
filters.append(
|
||||
"(NOT %s OR %s IS NULL)"
|
||||
% (not_user_type_clause, "u.user_type")
|
||||
)
|
||||
|
||||
args.extend(not_user_type_args)
|
||||
|
||||
where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else ""
|
||||
|
||||
sql_base = f"""
|
||||
FROM users as u
|
||||
LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ?
|
||||
LEFT JOIN profiles AS p ON u.name = p.full_user_id
|
||||
LEFT JOIN erased_users AS eu ON u.name = eu.user_id
|
||||
{where_clause}
|
||||
"""
|
||||
|
||||
@@ -1950,12 +1950,16 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
|
||||
# Delete older entries in the table, as we really only care about
|
||||
# when the latest change happened.
|
||||
txn.execute_batch(
|
||||
"""
|
||||
cleanup_obsolete_stmt = """
|
||||
DELETE FROM device_lists_stream
|
||||
WHERE user_id = ? AND device_id = ? AND stream_id < ?
|
||||
""",
|
||||
[(user_id, device_id, min_stream_id) for device_id in device_ids],
|
||||
WHERE user_id = ? AND stream_id < ? AND %s
|
||||
"""
|
||||
device_ids_clause, device_ids_args = make_in_list_sql_clause(
|
||||
txn.database_engine, "device_id", device_ids
|
||||
)
|
||||
txn.execute(
|
||||
cleanup_obsolete_stmt % (device_ids_clause,),
|
||||
[user_id, min_stream_id] + device_ids_args,
|
||||
)
|
||||
|
||||
self.db_pool.simple_insert_many_txn(
|
||||
|
||||
@@ -38,6 +38,7 @@ from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.logging.opentracing import tag_args, trace
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||
from synapse.storage.background_updates import ForeignKeyConstraint
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
@@ -140,6 +141,17 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
|
||||
|
||||
self._clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000)
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
self.db_pool.updates.register_background_validate_constraint_and_delete_rows(
|
||||
update_name="event_forward_extremities_event_id_foreign_key_constraint_update",
|
||||
table="event_forward_extremities",
|
||||
constraint_name="event_forward_extremities_event_id",
|
||||
constraint=ForeignKeyConstraint(
|
||||
"events", [("event_id", "event_id")], deferred=True
|
||||
),
|
||||
unique_columns=("event_id", "room_id"),
|
||||
)
|
||||
|
||||
async def get_auth_chain(
|
||||
self, room_id: str, event_ids: Collection[str], include_given: bool = False
|
||||
) -> List[EventBase]:
|
||||
|
||||
@@ -415,12 +415,6 @@ class PersistEventsStore:
|
||||
backfilled=False,
|
||||
)
|
||||
|
||||
self._update_forward_extremities_txn(
|
||||
txn,
|
||||
new_forward_extremities=new_forward_extremities,
|
||||
max_stream_order=max_stream_order,
|
||||
)
|
||||
|
||||
# Ensure that we don't have the same event twice.
|
||||
events_and_contexts = self._filter_events_and_contexts_for_duplicates(
|
||||
events_and_contexts
|
||||
@@ -439,6 +433,12 @@ class PersistEventsStore:
|
||||
|
||||
self._store_event_txn(txn, events_and_contexts=events_and_contexts)
|
||||
|
||||
self._update_forward_extremities_txn(
|
||||
txn,
|
||||
new_forward_extremities=new_forward_extremities,
|
||||
max_stream_order=max_stream_order,
|
||||
)
|
||||
|
||||
self._persist_transaction_ids_txn(txn, events_and_contexts)
|
||||
|
||||
# Insert into event_to_state_groups.
|
||||
|
||||
@@ -188,14 +188,13 @@ class FilteringWorkerStore(SQLBaseStore):
|
||||
filter_id = max_id + 1
|
||||
|
||||
sql = (
|
||||
"INSERT INTO user_filters (full_user_id, user_id, filter_id, filter_json)"
|
||||
"VALUES(?, ?, ?, ?)"
|
||||
"INSERT INTO user_filters (full_user_id, filter_id, filter_json)"
|
||||
"VALUES(?, ?, ?)"
|
||||
)
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
user_id.to_string(),
|
||||
user_id.localpart,
|
||||
filter_id,
|
||||
bytearray(def_json),
|
||||
),
|
||||
|
||||
@@ -25,6 +25,7 @@ from synapse.storage.database import (
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
)
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
@@ -68,12 +69,20 @@ class LockStore(SQLBaseStore):
|
||||
self._reactor = hs.get_reactor()
|
||||
self._instance_name = hs.get_instance_id()
|
||||
|
||||
# A map from `(lock_name, lock_key)` to the token of any locks that we
|
||||
# think we currently hold.
|
||||
self._live_tokens: WeakValueDictionary[
|
||||
# A map from `(lock_name, lock_key)` to lock that we think we
|
||||
# currently hold.
|
||||
self._live_lock_tokens: WeakValueDictionary[
|
||||
Tuple[str, str], Lock
|
||||
] = WeakValueDictionary()
|
||||
|
||||
# A map from `(lock_name, lock_key, token)` to read/write lock that we
|
||||
# think we currently hold. For a given lock_name/lock_key, there can be
|
||||
# multiple read locks at a time but only one write lock (no mixing read
|
||||
# and write locks at the same time).
|
||||
self._live_read_write_lock_tokens: WeakValueDictionary[
|
||||
Tuple[str, str, str], Lock
|
||||
] = WeakValueDictionary()
|
||||
|
||||
# When we shut down we want to remove the locks. Technically this can
|
||||
# lead to a race, as we may drop the lock while we are still processing.
|
||||
# However, a) it should be a small window, b) the lock is best effort
|
||||
@@ -91,11 +100,13 @@ class LockStore(SQLBaseStore):
|
||||
"""Called when the server is shutting down"""
|
||||
logger.info("Dropping held locks due to shutdown")
|
||||
|
||||
# We need to take a copy of the tokens dict as dropping the locks will
|
||||
# cause the dictionary to change.
|
||||
locks = dict(self._live_tokens)
|
||||
# We need to take a copy of the locks as dropping the locks will cause
|
||||
# the dictionary to change.
|
||||
locks = list(self._live_lock_tokens.values()) + list(
|
||||
self._live_read_write_lock_tokens.values()
|
||||
)
|
||||
|
||||
for lock in locks.values():
|
||||
for lock in locks:
|
||||
await lock.release()
|
||||
|
||||
logger.info("Dropped locks due to shutdown")
|
||||
@@ -122,7 +133,7 @@ class LockStore(SQLBaseStore):
|
||||
"""
|
||||
|
||||
# Check if this process has taken out a lock and if it's still valid.
|
||||
lock = self._live_tokens.get((lock_name, lock_key))
|
||||
lock = self._live_lock_tokens.get((lock_name, lock_key))
|
||||
if lock and await lock.is_still_valid():
|
||||
return None
|
||||
|
||||
@@ -176,61 +187,111 @@ class LockStore(SQLBaseStore):
|
||||
self._reactor,
|
||||
self._clock,
|
||||
self,
|
||||
read_write=False,
|
||||
lock_name=lock_name,
|
||||
lock_key=lock_key,
|
||||
token=token,
|
||||
)
|
||||
|
||||
self._live_tokens[(lock_name, lock_key)] = lock
|
||||
self._live_lock_tokens[(lock_name, lock_key)] = lock
|
||||
|
||||
return lock
|
||||
|
||||
async def _is_lock_still_valid(
|
||||
self, lock_name: str, lock_key: str, token: str
|
||||
) -> bool:
|
||||
"""Checks whether this instance still holds the lock."""
|
||||
last_renewed_ts = await self.db_pool.simple_select_one_onecol(
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
retcol="last_renewed_ts",
|
||||
allow_none=True,
|
||||
desc="is_lock_still_valid",
|
||||
)
|
||||
return (
|
||||
last_renewed_ts is not None
|
||||
and self._clock.time_msec() - _LOCK_TIMEOUT_MS < last_renewed_ts
|
||||
async def try_acquire_read_write_lock(
|
||||
self,
|
||||
lock_name: str,
|
||||
lock_key: str,
|
||||
write: bool,
|
||||
) -> Optional["Lock"]:
|
||||
"""Try to acquire a lock for the given name/key. Will return an async
|
||||
context manager if the lock is successfully acquired, which *must* be
|
||||
used (otherwise the lock will leak).
|
||||
"""
|
||||
|
||||
now = self._clock.time_msec()
|
||||
token = random_string(6)
|
||||
|
||||
def _try_acquire_read_write_lock_txn(txn: LoggingTransaction) -> None:
|
||||
# We attempt to acquire the lock by inserting into
|
||||
# `worker_read_write_locks` and seeing if that fails any
|
||||
# constraints. If it doesn't then we have acquired the lock,
|
||||
# otherwise we haven't.
|
||||
#
|
||||
# Before that though we clear the table of any stale locks.
|
||||
|
||||
delete_sql = """
|
||||
DELETE FROM worker_read_write_locks
|
||||
WHERE last_renewed_ts < ? AND lock_name = ? AND lock_key = ?;
|
||||
"""
|
||||
|
||||
insert_sql = """
|
||||
INSERT INTO worker_read_write_locks (lock_name, lock_key, write_lock, instance_name, token, last_renewed_ts)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
"""
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
# For Postgres we can send these queries at the same time.
|
||||
txn.execute(
|
||||
delete_sql + ";" + insert_sql,
|
||||
(
|
||||
# DELETE args
|
||||
now - _LOCK_TIMEOUT_MS,
|
||||
lock_name,
|
||||
lock_key,
|
||||
# UPSERT args
|
||||
lock_name,
|
||||
lock_key,
|
||||
write,
|
||||
self._instance_name,
|
||||
token,
|
||||
now,
|
||||
),
|
||||
)
|
||||
else:
|
||||
# For SQLite these need to be two queries.
|
||||
txn.execute(
|
||||
delete_sql,
|
||||
(
|
||||
now - _LOCK_TIMEOUT_MS,
|
||||
lock_name,
|
||||
lock_key,
|
||||
),
|
||||
)
|
||||
txn.execute(
|
||||
insert_sql,
|
||||
(
|
||||
lock_name,
|
||||
lock_key,
|
||||
write,
|
||||
self._instance_name,
|
||||
token,
|
||||
now,
|
||||
),
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
await self.db_pool.runInteraction(
|
||||
"try_acquire_read_write_lock",
|
||||
_try_acquire_read_write_lock_txn,
|
||||
)
|
||||
except self.database_engine.module.IntegrityError:
|
||||
return None
|
||||
|
||||
lock = Lock(
|
||||
self._reactor,
|
||||
self._clock,
|
||||
self,
|
||||
read_write=True,
|
||||
lock_name=lock_name,
|
||||
lock_key=lock_key,
|
||||
token=token,
|
||||
)
|
||||
|
||||
async def _renew_lock(self, lock_name: str, lock_key: str, token: str) -> None:
|
||||
"""Attempt to renew the lock if we still hold it."""
|
||||
await self.db_pool.simple_update(
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
updatevalues={"last_renewed_ts": self._clock.time_msec()},
|
||||
desc="renew_lock",
|
||||
)
|
||||
self._live_read_write_lock_tokens[(lock_name, lock_key, token)] = lock
|
||||
|
||||
async def _drop_lock(self, lock_name: str, lock_key: str, token: str) -> None:
|
||||
"""Attempt to drop the lock, if we still hold it"""
|
||||
await self.db_pool.simple_delete(
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
desc="drop_lock",
|
||||
)
|
||||
|
||||
self._live_tokens.pop((lock_name, lock_key), None)
|
||||
return lock
|
||||
|
||||
|
||||
class Lock:
|
||||
@@ -259,6 +320,7 @@ class Lock:
|
||||
reactor: IReactorCore,
|
||||
clock: Clock,
|
||||
store: LockStore,
|
||||
read_write: bool,
|
||||
lock_name: str,
|
||||
lock_key: str,
|
||||
token: str,
|
||||
@@ -266,13 +328,23 @@ class Lock:
|
||||
self._reactor = reactor
|
||||
self._clock = clock
|
||||
self._store = store
|
||||
self._read_write = read_write
|
||||
self._lock_name = lock_name
|
||||
self._lock_key = lock_key
|
||||
|
||||
self._token = token
|
||||
|
||||
self._table = "worker_read_write_locks" if read_write else "worker_locks"
|
||||
|
||||
self._looping_call = clock.looping_call(
|
||||
self._renew, _RENEWAL_INTERVAL_MS, store, lock_name, lock_key, token
|
||||
self._renew,
|
||||
_RENEWAL_INTERVAL_MS,
|
||||
store,
|
||||
clock,
|
||||
read_write,
|
||||
lock_name,
|
||||
lock_key,
|
||||
token,
|
||||
)
|
||||
|
||||
self._dropped = False
|
||||
@@ -281,6 +353,8 @@ class Lock:
|
||||
@wrap_as_background_process("Lock._renew")
|
||||
async def _renew(
|
||||
store: LockStore,
|
||||
clock: Clock,
|
||||
read_write: bool,
|
||||
lock_name: str,
|
||||
lock_key: str,
|
||||
token: str,
|
||||
@@ -291,12 +365,34 @@ class Lock:
|
||||
don't end up with a reference to `self` in the reactor, which would stop
|
||||
this from being cleaned up if we dropped the context manager.
|
||||
"""
|
||||
await store._renew_lock(lock_name, lock_key, token)
|
||||
table = "worker_read_write_locks" if read_write else "worker_locks"
|
||||
await store.db_pool.simple_update(
|
||||
table=table,
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
updatevalues={"last_renewed_ts": clock.time_msec()},
|
||||
desc="renew_lock",
|
||||
)
|
||||
|
||||
async def is_still_valid(self) -> bool:
|
||||
"""Check if the lock is still held by us"""
|
||||
return await self._store._is_lock_still_valid(
|
||||
self._lock_name, self._lock_key, self._token
|
||||
last_renewed_ts = await self._store.db_pool.simple_select_one_onecol(
|
||||
table=self._table,
|
||||
keyvalues={
|
||||
"lock_name": self._lock_name,
|
||||
"lock_key": self._lock_key,
|
||||
"token": self._token,
|
||||
},
|
||||
retcol="last_renewed_ts",
|
||||
allow_none=True,
|
||||
desc="is_lock_still_valid",
|
||||
)
|
||||
return (
|
||||
last_renewed_ts is not None
|
||||
and self._clock.time_msec() - _LOCK_TIMEOUT_MS < last_renewed_ts
|
||||
)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
@@ -325,7 +421,23 @@ class Lock:
|
||||
if self._looping_call.running:
|
||||
self._looping_call.stop()
|
||||
|
||||
await self._store._drop_lock(self._lock_name, self._lock_key, self._token)
|
||||
await self._store.db_pool.simple_delete(
|
||||
table=self._table,
|
||||
keyvalues={
|
||||
"lock_name": self._lock_name,
|
||||
"lock_key": self._lock_key,
|
||||
"token": self._token,
|
||||
},
|
||||
desc="drop_lock",
|
||||
)
|
||||
|
||||
if self._read_write:
|
||||
self._store._live_read_write_lock_tokens.pop(
|
||||
(self._lock_name, self._lock_key, self._token), None
|
||||
)
|
||||
else:
|
||||
self._store._live_lock_tokens.pop((self._lock_name, self._lock_key), None)
|
||||
|
||||
self._dropped = True
|
||||
|
||||
def __del__(self) -> None:
|
||||
|
||||
@@ -27,6 +27,7 @@ from typing import (
|
||||
)
|
||||
|
||||
from synapse.api.constants import Direction
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
@@ -328,6 +329,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
"get_local_media_ids", _get_local_media_ids_txn
|
||||
)
|
||||
|
||||
@trace
|
||||
async def store_local_media(
|
||||
self,
|
||||
media_id: str,
|
||||
@@ -447,6 +449,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
desc="get_local_media_thumbnails",
|
||||
)
|
||||
|
||||
@trace
|
||||
async def store_local_thumbnail(
|
||||
self,
|
||||
media_id: str,
|
||||
@@ -568,6 +571,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
desc="get_remote_media_thumbnails",
|
||||
)
|
||||
|
||||
@trace
|
||||
async def get_remote_media_thumbnail(
|
||||
self,
|
||||
origin: str,
|
||||
@@ -599,6 +603,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
||||
desc="get_remote_media_thumbnail",
|
||||
)
|
||||
|
||||
@trace
|
||||
async def store_remote_media_thumbnail(
|
||||
self,
|
||||
origin: str,
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, cast
|
||||
|
||||
from synapse.api.presence import PresenceState, UserPresenceState
|
||||
@@ -24,6 +23,7 @@ from synapse.storage.database import (
|
||||
)
|
||||
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.engines._base import IsolationLevel
|
||||
from synapse.storage.types import Connection
|
||||
from synapse.storage.util.id_generators import (
|
||||
AbstractStreamIdGenerator,
|
||||
@@ -115,11 +115,16 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
|
||||
)
|
||||
|
||||
async with stream_ordering_manager as stream_orderings:
|
||||
# Run the interaction with an isolation level of READ_COMMITTED to avoid
|
||||
# serialization errors(and rollbacks) in the database. This way it will
|
||||
# ignore new rows during the DELETE, but will pick them up the next time
|
||||
# this is run. Currently, that is between 5-60 seconds.
|
||||
await self.db_pool.runInteraction(
|
||||
"update_presence",
|
||||
self._update_presence_txn,
|
||||
stream_orderings,
|
||||
presence_states,
|
||||
isolation_level=IsolationLevel.READ_COMMITTED,
|
||||
)
|
||||
|
||||
return stream_orderings[-1], self._presence_id_gen.get_current_token()
|
||||
|
||||
@@ -173,10 +173,9 @@ class ProfileWorkerStore(SQLBaseStore):
|
||||
)
|
||||
|
||||
async def create_profile(self, user_id: UserID) -> None:
|
||||
user_localpart = user_id.localpart
|
||||
await self.db_pool.simple_insert(
|
||||
table="profiles",
|
||||
values={"user_id": user_localpart, "full_user_id": user_id.to_string()},
|
||||
values={"full_user_id": user_id.to_string()},
|
||||
desc="create_profile",
|
||||
)
|
||||
|
||||
@@ -191,13 +190,11 @@ class ProfileWorkerStore(SQLBaseStore):
|
||||
new_displayname: The new display name. If this is None, the user's display
|
||||
name is removed.
|
||||
"""
|
||||
user_localpart = user_id.localpart
|
||||
await self.db_pool.simple_upsert(
|
||||
table="profiles",
|
||||
keyvalues={"user_id": user_localpart},
|
||||
keyvalues={"full_user_id": user_id.to_string()},
|
||||
values={
|
||||
"displayname": new_displayname,
|
||||
"full_user_id": user_id.to_string(),
|
||||
},
|
||||
desc="set_profile_displayname",
|
||||
)
|
||||
@@ -213,11 +210,10 @@ class ProfileWorkerStore(SQLBaseStore):
|
||||
new_avatar_url: The new avatar URL. If this is None, the user's avatar is
|
||||
removed.
|
||||
"""
|
||||
user_localpart = user_id.localpart
|
||||
await self.db_pool.simple_upsert(
|
||||
table="profiles",
|
||||
keyvalues={"user_id": user_localpart},
|
||||
values={"avatar_url": new_avatar_url, "full_user_id": user_id.to_string()},
|
||||
keyvalues={"full_user_id": user_id.to_string()},
|
||||
values={"avatar_url": new_avatar_url},
|
||||
desc="set_profile_avatar_url",
|
||||
)
|
||||
|
||||
|
||||
@@ -62,7 +62,6 @@ from synapse.types import (
|
||||
get_domain_from_id,
|
||||
get_localpart_from_id,
|
||||
)
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -771,9 +770,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
# This should be unreachable.
|
||||
raise Exception("Unrecognized database engine")
|
||||
|
||||
for p in profiles:
|
||||
txn.call_after(self.get_user_in_directory.invalidate, (p.user_id,))
|
||||
|
||||
async def add_users_who_share_private_room(
|
||||
self, room_id: str, user_id_tuples: Iterable[Tuple[str, str]]
|
||||
) -> None:
|
||||
@@ -831,14 +827,12 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
txn.execute(f"{truncate} user_directory_search")
|
||||
txn.execute(f"{truncate} users_in_public_rooms")
|
||||
txn.execute(f"{truncate} users_who_share_private_rooms")
|
||||
txn.call_after(self.get_user_in_directory.invalidate_all)
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"delete_all_from_user_dir", _delete_all_from_user_dir_txn
|
||||
)
|
||||
|
||||
@cached()
|
||||
async def get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]:
|
||||
async def _get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]:
|
||||
return await self.db_pool.simple_select_one(
|
||||
table="user_directory",
|
||||
keyvalues={"user_id": user_id},
|
||||
@@ -900,7 +894,6 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
|
||||
table="users_who_share_private_rooms",
|
||||
keyvalues={"other_user_id": user_id},
|
||||
)
|
||||
txn.call_after(self.get_user_in_directory.invalidate, (user_id,))
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"remove_from_user_dir", _remove_from_user_dir_txn
|
||||
|
||||
@@ -45,6 +45,15 @@ class PostgresEngine(
|
||||
|
||||
psycopg2.extensions.register_adapter(bytes, _disable_bytes_adapter)
|
||||
self.synchronous_commit: bool = database_config.get("synchronous_commit", True)
|
||||
# Set the statement timeout to 1 hour by default.
|
||||
# Any query taking more than 1 hour should probably be considered a bug;
|
||||
# most of the time this is a sign that work needs to be split up or that
|
||||
# some degenerate query plan has been created and the client has probably
|
||||
# timed out/walked off anyway.
|
||||
# This is in milliseconds.
|
||||
self.statement_timeout: Optional[int] = database_config.get(
|
||||
"statement_timeout", 60 * 60 * 1000
|
||||
)
|
||||
self._version: Optional[int] = None # unknown as yet
|
||||
|
||||
self.isolation_level_map: Mapping[int, int] = {
|
||||
@@ -157,6 +166,10 @@ class PostgresEngine(
|
||||
if not self.synchronous_commit:
|
||||
cursor.execute("SET synchronous_commit TO OFF")
|
||||
|
||||
# Abort really long-running statements and turn them into errors.
|
||||
if self.statement_timeout is not None:
|
||||
cursor.execute("SET statement_timeout TO ?", (self.statement_timeout,))
|
||||
|
||||
cursor.close()
|
||||
db_conn.commit()
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
SCHEMA_VERSION = 78 # remember to update the list below when updating
|
||||
SCHEMA_VERSION = 79 # remember to update the list below when updating
|
||||
"""Represents the expectations made by the codebase about the database schema
|
||||
|
||||
This should be incremented whenever the codebase changes its requirements on the
|
||||
@@ -106,6 +106,9 @@ Changes in SCHEMA_VERSION = 77
|
||||
|
||||
Changes in SCHEMA_VERSION = 78
|
||||
- Validate check (full_user_id IS NOT NULL) on tables profiles and user_filters
|
||||
|
||||
Changes in SCHEMA_VERSION = 79
|
||||
- We no longer write to column user_id of tables profiles and user_filters
|
||||
"""
|
||||
|
||||
|
||||
@@ -118,7 +121,9 @@ SCHEMA_COMPAT_VERSION = (
|
||||
#
|
||||
# insertions to the column `full_user_id` of tables profiles and user_filters can no
|
||||
# longer be null
|
||||
76
|
||||
#
|
||||
# we no longer write to column `full_user_id` of tables profiles and user_filters
|
||||
78
|
||||
)
|
||||
"""Limit on how far the synapse codebase can be rolled back without breaking db compat
|
||||
|
||||
|
||||
@@ -0,0 +1,57 @@
|
||||
# Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""
|
||||
This migration adds foreign key constraint to `event_forward_extremities` table.
|
||||
"""
|
||||
from synapse.storage.background_updates import (
|
||||
ForeignKeyConstraint,
|
||||
run_validate_constraint_and_delete_rows_schema_delta,
|
||||
)
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import BaseDatabaseEngine
|
||||
|
||||
FORWARD_EXTREMITIES_TABLE_SCHEMA = """
|
||||
CREATE TABLE event_forward_extremities2(
|
||||
event_id TEXT NOT NULL,
|
||||
room_id TEXT NOT NULL,
|
||||
UNIQUE (event_id, room_id),
|
||||
CONSTRAINT event_forward_extremities_event_id FOREIGN KEY (event_id) REFERENCES events (event_id) DEFERRABLE INITIALLY DEFERRED
|
||||
)
|
||||
"""
|
||||
|
||||
|
||||
def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
|
||||
# We mark this as a deferred constraint, as the previous version of Synapse
|
||||
# inserted the event into the forward extremities *before* the events table.
|
||||
# By marking as deferred we ensure that downgrading to the previous version
|
||||
# will continue to work.
|
||||
run_validate_constraint_and_delete_rows_schema_delta(
|
||||
cur,
|
||||
ordering=7803,
|
||||
update_name="event_forward_extremities_event_id_foreign_key_constraint_update",
|
||||
table="event_forward_extremities",
|
||||
constraint_name="event_forward_extremities_event_id",
|
||||
constraint=ForeignKeyConstraint(
|
||||
"events", [("event_id", "event_id")], deferred=True
|
||||
),
|
||||
sqlite_table_name="event_forward_extremities2",
|
||||
sqlite_table_schema=FORWARD_EXTREMITIES_TABLE_SCHEMA,
|
||||
)
|
||||
|
||||
# We can't add a similar constraint to `event_backward_extremities` as the
|
||||
# events in there don't exist in the `events` table and `event_edges`
|
||||
# doesn't have a unique constraint on `prev_event_id` (so we can't make a
|
||||
# foreign key point to it).
|
||||
@@ -0,0 +1,152 @@
|
||||
/* Copyright 2023 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
-- We implement read/write style locks by using two tables with mutual foreign
|
||||
-- key constraints. Note that this implementation is vulnerable to starving
|
||||
-- writers if read locks repeatedly get acquired.
|
||||
--
|
||||
-- The first table (`worker_read_write_locks_mode`) indicates that a given lock
|
||||
-- has either been acquired in read mode *or* write mode, but not both. This is
|
||||
-- enforced by the unique constraint. Each instance of a lock being acquired is
|
||||
-- associated with a random `token`.
|
||||
--
|
||||
-- The second table (`worker_read_write_locks`) tracks who has currently
|
||||
-- acquired a given lock. For a given lock_name/lock_key, there can be multiple
|
||||
-- read locks at a time but only one write lock (no mixing read and write locks
|
||||
-- at the same time).
|
||||
--
|
||||
-- The foreign key from the second to first table enforces that for any given
|
||||
-- lock the second table cannot have a mix of rows with read or write.
|
||||
--
|
||||
-- The foreign key from the first to second table enforces that we don't have a
|
||||
-- row for a lock in the first table if not in the second table.
|
||||
--
|
||||
--
|
||||
-- Furthermore, we add some triggers to automatically keep the first table up to
|
||||
-- date when inserting/deleting from the second table. This reduces the number
|
||||
-- of round trips needed to acquire and release locks, as those operations
|
||||
-- simply become an INSERT or DELETE. These triggers are added in a separate
|
||||
-- delta due to database specific syntax.
|
||||
|
||||
|
||||
-- A table to track whether a lock is currently acquired, and if so whether its
|
||||
-- in read or write mode.
|
||||
CREATE TABLE worker_read_write_locks_mode (
|
||||
lock_name TEXT NOT NULL,
|
||||
lock_key TEXT NOT NULL,
|
||||
-- Whether this lock is in read (false) or write (true) mode
|
||||
write_lock BOOLEAN NOT NULL,
|
||||
-- A token that has currently acquired the lock. We need this so that we can
|
||||
-- add a foreign constraint from this table to `worker_read_write_locks`.
|
||||
token TEXT NOT NULL
|
||||
);
|
||||
|
||||
-- Ensure that we can only have one row per lock
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key);
|
||||
-- We need this (redundant) constraint so that we can have a foreign key
|
||||
-- constraint against this table.
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock);
|
||||
|
||||
|
||||
-- A table to track who has currently acquired a given lock.
|
||||
CREATE TABLE worker_read_write_locks (
|
||||
lock_name TEXT NOT NULL,
|
||||
lock_key TEXT NOT NULL,
|
||||
-- We write the instance name to ease manual debugging, we don't ever read
|
||||
-- from it.
|
||||
-- Note: instance names aren't guarenteed to be unique.
|
||||
instance_name TEXT NOT NULL,
|
||||
-- Whether the process has taken out a "read" or a "write" lock.
|
||||
write_lock BOOLEAN NOT NULL,
|
||||
-- A random string generated each time an instance takes out a lock. Used by
|
||||
-- the instance to tell whether the lock is still held by it (e.g. in the
|
||||
-- case where the process stalls for a long time the lock may time out and
|
||||
-- be taken out by another instance, at which point the original instance
|
||||
-- can tell it no longer holds the lock as the tokens no longer match).
|
||||
token TEXT NOT NULL,
|
||||
last_renewed_ts BIGINT NOT NULL,
|
||||
|
||||
-- This constraint ensures that a given lock has only been acquired in read
|
||||
-- xor write mode, but not both.
|
||||
FOREIGN KEY (lock_name, lock_key, write_lock) REFERENCES worker_read_write_locks_mode (lock_name, lock_key, write_lock)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token);
|
||||
-- Ensures that only one instance can acquire a lock in write mode at a time.
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock;
|
||||
|
||||
|
||||
-- Add a foreign key constraint to ensure that if a lock is in
|
||||
-- `worker_read_write_locks_mode` then there must be a corresponding row in
|
||||
-- `worker_read_write_locks` (i.e. we don't accidentally end up with a row in
|
||||
-- `worker_read_write_locks_mode` when the lock is not currently acquired).
|
||||
--
|
||||
-- We only add to PostgreSQL as SQLite does not support adding constraints
|
||||
-- after table creation, and so doesn't support "circular" foreign key
|
||||
-- constraints.
|
||||
ALTER TABLE worker_read_write_locks_mode ADD CONSTRAINT worker_read_write_locks_mode_foreign
|
||||
FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED;
|
||||
|
||||
|
||||
-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try
|
||||
-- and acquire a lock, i.e. insert into `worker_read_write_locks`,
|
||||
CREATE OR REPLACE FUNCTION upsert_read_write_lock_parent() RETURNS trigger AS $$
|
||||
BEGIN
|
||||
INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token)
|
||||
VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO NOTHING;
|
||||
RETURN NEW;
|
||||
END
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER upsert_read_write_lock_parent_trigger BEFORE INSERT ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE upsert_read_write_lock_parent();
|
||||
|
||||
|
||||
-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock
|
||||
-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we
|
||||
-- update the `worker_read_write_locks_mode.token` to match another instance
|
||||
-- that has currently acquired the lock, or we delete the row if nobody has
|
||||
-- currently acquired a lock.
|
||||
CREATE OR REPLACE FUNCTION delete_read_write_lock_parent() RETURNS trigger AS $$
|
||||
DECLARE
|
||||
new_token TEXT;
|
||||
BEGIN
|
||||
SELECT token INTO new_token FROM worker_read_write_locks
|
||||
WHERE
|
||||
lock_name = OLD.lock_name
|
||||
AND lock_key = OLD.lock_key;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
ELSE
|
||||
UPDATE worker_read_write_locks_mode
|
||||
SET token = new_token
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER delete_read_write_lock_parent_trigger AFTER DELETE ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE delete_read_write_lock_parent();
|
||||
@@ -0,0 +1,119 @@
|
||||
/* Copyright 2023 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
-- c.f. the postgres version for context. The tables and constraints are the
|
||||
-- same, however they need to be defined slightly differently to work around how
|
||||
-- each database handles circular foreign key references.
|
||||
|
||||
|
||||
|
||||
-- A table to track whether a lock is currently acquired, and if so whether its
|
||||
-- in read or write mode.
|
||||
CREATE TABLE worker_read_write_locks_mode (
|
||||
lock_name TEXT NOT NULL,
|
||||
lock_key TEXT NOT NULL,
|
||||
-- Whether this lock is in read (false) or write (true) mode
|
||||
write_lock BOOLEAN NOT NULL,
|
||||
-- A token that has currently acquired the lock. We need this so that we can
|
||||
-- add a foreign constraint from this table to `worker_read_write_locks`.
|
||||
token TEXT NOT NULL,
|
||||
-- Add a foreign key constraint to ensure that if a lock is in
|
||||
-- `worker_read_write_locks_mode` then there must be a corresponding row in
|
||||
-- `worker_read_write_locks` (i.e. we don't accidentally end up with a row in
|
||||
-- `worker_read_write_locks_mode` when the lock is not currently acquired).
|
||||
FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED
|
||||
);
|
||||
|
||||
-- Ensure that we can only have one row per lock
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key);
|
||||
-- We need this (redundant) constraint so that we can have a foreign key
|
||||
-- constraint against this table.
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock);
|
||||
|
||||
|
||||
-- A table to track who has currently acquired a given lock.
|
||||
CREATE TABLE worker_read_write_locks (
|
||||
lock_name TEXT NOT NULL,
|
||||
lock_key TEXT NOT NULL,
|
||||
-- We write the instance name to ease manual debugging, we don't ever read
|
||||
-- from it.
|
||||
-- Note: instance names aren't guarenteed to be unique.
|
||||
instance_name TEXT NOT NULL,
|
||||
-- Whether the process has taken out a "read" or a "write" lock.
|
||||
write_lock BOOLEAN NOT NULL,
|
||||
-- A random string generated each time an instance takes out a lock. Used by
|
||||
-- the instance to tell whether the lock is still held by it (e.g. in the
|
||||
-- case where the process stalls for a long time the lock may time out and
|
||||
-- be taken out by another instance, at which point the original instance
|
||||
-- can tell it no longer holds the lock as the tokens no longer match).
|
||||
token TEXT NOT NULL,
|
||||
last_renewed_ts BIGINT NOT NULL,
|
||||
|
||||
-- This constraint ensures that a given lock has only been acquired in read
|
||||
-- xor write mode, but not both.
|
||||
FOREIGN KEY (lock_name, lock_key, write_lock) REFERENCES worker_read_write_locks_mode (lock_name, lock_key, write_lock)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token);
|
||||
-- Ensures that only one instance can acquire a lock in write mode at a time.
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock;
|
||||
|
||||
|
||||
-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try
|
||||
-- and acquire a lock, i.e. insert into `worker_read_write_locks`,
|
||||
CREATE TRIGGER IF NOT EXISTS upsert_read_write_lock_parent_trigger
|
||||
BEFORE INSERT ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
-- First ensure that `worker_read_write_locks_mode` doesn't have stale
|
||||
-- entries in it, as on SQLite we don't have the foreign key constraint to
|
||||
-- enforce this.
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_read_write_locks
|
||||
WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key
|
||||
);
|
||||
|
||||
INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token)
|
||||
VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO NOTHING;
|
||||
END;
|
||||
|
||||
-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock
|
||||
-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we
|
||||
-- update the `worker_read_write_locks_mode.token` to match another instance
|
||||
-- that has currently acquired the lock, or we delete the row if nobody has
|
||||
-- currently acquired a lock.
|
||||
CREATE TRIGGER IF NOT EXISTS delete_read_write_lock_parent_trigger
|
||||
AFTER DELETE ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_read_write_locks
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
);
|
||||
|
||||
UPDATE worker_read_write_locks_mode
|
||||
SET token = (
|
||||
SELECT token FROM worker_read_write_locks
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
)
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
END;
|
||||
@@ -0,0 +1,50 @@
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
|
||||
|
||||
|
||||
def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
|
||||
"""
|
||||
Update to drop the NOT NULL constraint on column user_id so that we can cease to
|
||||
write to it without inserts to other columns triggering the constraint
|
||||
"""
|
||||
|
||||
if isinstance(database_engine, PostgresEngine):
|
||||
drop_sql = """
|
||||
ALTER TABLE profiles ALTER COLUMN user_id DROP NOT NULL
|
||||
"""
|
||||
cur.execute(drop_sql)
|
||||
else:
|
||||
# irritatingly in SQLite we need to rewrite the table to drop the constraint.
|
||||
cur.execute("DROP TABLE IF EXISTS temp_profiles")
|
||||
|
||||
create_sql = """
|
||||
CREATE TABLE temp_profiles (
|
||||
full_user_id text NOT NULL,
|
||||
user_id text,
|
||||
displayname text,
|
||||
avatar_url text,
|
||||
UNIQUE (full_user_id),
|
||||
UNIQUE (user_id)
|
||||
)
|
||||
"""
|
||||
cur.execute(create_sql)
|
||||
|
||||
copy_sql = """
|
||||
INSERT INTO temp_profiles (
|
||||
user_id,
|
||||
displayname,
|
||||
avatar_url,
|
||||
full_user_id)
|
||||
SELECT user_id, displayname, avatar_url, full_user_id FROM profiles
|
||||
"""
|
||||
cur.execute(copy_sql)
|
||||
|
||||
drop_sql = """
|
||||
DROP TABLE profiles
|
||||
"""
|
||||
cur.execute(drop_sql)
|
||||
|
||||
rename_sql = """
|
||||
ALTER TABLE temp_profiles RENAME to profiles
|
||||
"""
|
||||
cur.execute(rename_sql)
|
||||
@@ -0,0 +1,54 @@
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
|
||||
|
||||
|
||||
def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
|
||||
"""
|
||||
Update to drop the NOT NULL constraint on column user_id so that we can cease to
|
||||
write to it without inserts to other columns triggering the constraint
|
||||
"""
|
||||
if isinstance(database_engine, PostgresEngine):
|
||||
drop_sql = """
|
||||
ALTER TABLE user_filters ALTER COLUMN user_id DROP NOT NULL
|
||||
"""
|
||||
cur.execute(drop_sql)
|
||||
|
||||
else:
|
||||
# irritatingly in SQLite we need to rewrite the table to drop the constraint.
|
||||
cur.execute("DROP TABLE IF EXISTS temp_user_filters")
|
||||
|
||||
create_sql = """
|
||||
CREATE TABLE temp_user_filters (
|
||||
full_user_id text NOT NULL,
|
||||
user_id text,
|
||||
filter_id bigint NOT NULL,
|
||||
filter_json bytea NOT NULL
|
||||
)
|
||||
"""
|
||||
cur.execute(create_sql)
|
||||
|
||||
index_sql = """
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS user_filters_full_user_id_unique ON
|
||||
temp_user_filters (full_user_id, filter_id)
|
||||
"""
|
||||
cur.execute(index_sql)
|
||||
|
||||
copy_sql = """
|
||||
INSERT INTO temp_user_filters (
|
||||
user_id,
|
||||
filter_id,
|
||||
filter_json,
|
||||
full_user_id)
|
||||
SELECT user_id, filter_id, filter_json, full_user_id FROM user_filters
|
||||
"""
|
||||
cur.execute(copy_sql)
|
||||
|
||||
drop_sql = """
|
||||
DROP TABLE user_filters
|
||||
"""
|
||||
cur.execute(drop_sql)
|
||||
|
||||
rename_sql = """
|
||||
ALTER TABLE temp_user_filters RENAME to user_filters
|
||||
"""
|
||||
cur.execute(rename_sql)
|
||||
@@ -348,22 +348,15 @@ class EventID(DomainSpecificString):
|
||||
SIGIL = "$"
|
||||
|
||||
|
||||
mxid_localpart_allowed_characters = set(
|
||||
"_-./=" + string.ascii_lowercase + string.digits
|
||||
MXID_LOCALPART_ALLOWED_CHARACTERS = set(
|
||||
"_-./=+" + string.ascii_lowercase + string.digits
|
||||
)
|
||||
# MSC4007 adds the + to the allowed characters.
|
||||
#
|
||||
# TODO If this was accepted, update the SSO code to support this, see the callers
|
||||
# of map_username_to_mxid_localpart.
|
||||
extended_mxid_localpart_allowed_characters = mxid_localpart_allowed_characters | {"+"}
|
||||
|
||||
# Guest user IDs are purely numeric.
|
||||
GUEST_USER_ID_PATTERN = re.compile(r"^\d+$")
|
||||
|
||||
|
||||
def contains_invalid_mxid_characters(
|
||||
localpart: str, use_extended_character_set: bool
|
||||
) -> bool:
|
||||
def contains_invalid_mxid_characters(localpart: str) -> bool:
|
||||
"""Check for characters not allowed in an mxid or groupid localpart
|
||||
|
||||
Args:
|
||||
@@ -374,12 +367,7 @@ def contains_invalid_mxid_characters(
|
||||
Returns:
|
||||
True if there are any naughty characters
|
||||
"""
|
||||
allowed_characters = (
|
||||
extended_mxid_localpart_allowed_characters
|
||||
if use_extended_character_set
|
||||
else mxid_localpart_allowed_characters
|
||||
)
|
||||
return any(c not in allowed_characters for c in localpart)
|
||||
return any(c not in MXID_LOCALPART_ALLOWED_CHARACTERS for c in localpart)
|
||||
|
||||
|
||||
UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
|
||||
@@ -396,7 +384,7 @@ UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
|
||||
# bytes rather than strings
|
||||
#
|
||||
NON_MXID_CHARACTER_PATTERN = re.compile(
|
||||
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters - {"="})),)).encode(
|
||||
("[^%s]" % (re.escape("".join(MXID_LOCALPART_ALLOWED_CHARACTERS - {"="})),)).encode(
|
||||
"ascii"
|
||||
)
|
||||
)
|
||||
|
||||
@@ -21,16 +21,13 @@ require. But this is probably just symptomatic of Python's package management.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from importlib import metadata
|
||||
from typing import Iterable, NamedTuple, Optional
|
||||
|
||||
from packaging.requirements import Requirement
|
||||
|
||||
DISTRIBUTION_NAME = "matrix-synapse"
|
||||
|
||||
try:
|
||||
from importlib import metadata
|
||||
except ImportError:
|
||||
import importlib_metadata as metadata # type: ignore[no-redef]
|
||||
|
||||
__all__ = ["check_requirements"]
|
||||
|
||||
|
||||
@@ -25,9 +25,9 @@ class HomeserverAppStartTestCase(ConfigFileTestCase):
|
||||
# Add a blank line as otherwise the next addition ends up on a line with a comment
|
||||
self.add_lines_to_config([" "])
|
||||
self.add_lines_to_config(["worker_app: test_worker_app"])
|
||||
self.add_lines_to_config(["worker_replication_host: 127.0.0.1"])
|
||||
self.add_lines_to_config(["worker_replication_http_port: 0"])
|
||||
|
||||
self.add_lines_to_config(["worker_log_config: /data/logconfig.config"])
|
||||
self.add_lines_to_config(["instance_map:"])
|
||||
self.add_lines_to_config([" main:", " host: 127.0.0.1", " port: 1234"])
|
||||
# Ensure that starting master process with worker config raises an exception
|
||||
with self.assertRaises(ConfigError):
|
||||
synapse.app.homeserver.setup(["-c", self.config_file])
|
||||
|
||||
@@ -17,7 +17,7 @@ from unittest.mock import Mock
|
||||
from immutabledict import immutabledict
|
||||
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.workers import InstanceLocationConfig, WorkerConfig
|
||||
from synapse.config.workers import WorkerConfig
|
||||
|
||||
from tests.unittest import TestCase
|
||||
|
||||
@@ -323,28 +323,3 @@ class WorkerDutyConfigTestCase(TestCase):
|
||||
)
|
||||
self.assertTrue(worker2_config.should_notify_appservices)
|
||||
self.assertFalse(worker2_config.should_update_user_directory)
|
||||
|
||||
def test_worker_instance_map_compat(self) -> None:
|
||||
"""
|
||||
Test that `worker_replication_*` settings are compatibly handled by
|
||||
adding them to the instance map as a `main` entry.
|
||||
"""
|
||||
|
||||
worker1_config = self._make_worker_config(
|
||||
worker_app="synapse.app.generic_worker",
|
||||
worker_name="worker1",
|
||||
extras={
|
||||
"notify_appservices_from_worker": "worker2",
|
||||
"update_user_directory_from_worker": "worker1",
|
||||
"worker_replication_host": "127.0.0.42",
|
||||
"worker_replication_http_port": 1979,
|
||||
},
|
||||
)
|
||||
self.assertEqual(
|
||||
worker1_config.instance_map,
|
||||
{
|
||||
"master": InstanceLocationConfig(
|
||||
host="127.0.0.42", port=1979, tls=False
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -587,17 +587,16 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
|
||||
self.assertFalse(self.get_success(d))
|
||||
|
||||
def test_invalid_user_id(self) -> None:
|
||||
invalid_user_id = "+abcd"
|
||||
invalid_user_id = "^abcd"
|
||||
self.get_failure(
|
||||
self.handler.register_user(localpart=invalid_user_id), SynapseError
|
||||
)
|
||||
|
||||
@override_config({"experimental_features": {"msc4009_e164_mxids": True}})
|
||||
def text_extended_user_ids(self) -> None:
|
||||
"""+ should be allowed according to MSC4009."""
|
||||
valid_user_id = "+1234"
|
||||
def test_special_chars(self) -> None:
|
||||
"""Ensure that characters which are allowed in Matrix IDs work."""
|
||||
valid_user_id = "a1234_-./=+"
|
||||
user_id = self.get_success(self.handler.register_user(localpart=valid_user_id))
|
||||
self.assertEqual(user_id, valid_user_id)
|
||||
self.assertEqual(user_id, f"@{valid_user_id}:test")
|
||||
|
||||
def test_invalid_user_id_length(self) -> None:
|
||||
invalid_user_id = "x" * 256
|
||||
|
||||
@@ -356,7 +356,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
||||
support_user_id, ProfileInfo("I love support me", None)
|
||||
)
|
||||
)
|
||||
profile = self.get_success(self.store.get_user_in_directory(support_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(support_user_id))
|
||||
self.assertIsNone(profile)
|
||||
display_name = "display_name"
|
||||
|
||||
@@ -364,7 +364,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
||||
self.get_success(
|
||||
self.handler.handle_local_profile_change(regular_user_id, profile_info)
|
||||
)
|
||||
profile = self.get_success(self.store.get_user_in_directory(regular_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(regular_user_id))
|
||||
assert profile is not None
|
||||
self.assertTrue(profile["display_name"] == display_name)
|
||||
|
||||
@@ -383,7 +383,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
|
||||
# profile is in directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(r_user_id))
|
||||
assert profile is not None
|
||||
self.assertTrue(profile["display_name"] == display_name)
|
||||
|
||||
@@ -392,7 +392,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
||||
self.get_success(self.handler.handle_local_user_deactivated(r_user_id))
|
||||
|
||||
# profile is not in directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(r_user_id))
|
||||
self.assertIsNone(profile)
|
||||
|
||||
# update profile after deactivation
|
||||
@@ -401,7 +401,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
|
||||
# profile is furthermore not in directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(r_user_id))
|
||||
self.assertIsNone(profile)
|
||||
|
||||
def test_handle_local_profile_change_with_appservice_user(self) -> None:
|
||||
@@ -411,7 +411,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
|
||||
# profile is not in directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(as_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(as_user_id))
|
||||
self.assertIsNone(profile)
|
||||
|
||||
# update profile
|
||||
@@ -421,13 +421,13 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
|
||||
# profile is still not in directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(as_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(as_user_id))
|
||||
self.assertIsNone(profile)
|
||||
|
||||
def test_handle_local_profile_change_with_appservice_sender(self) -> None:
|
||||
# profile is not in directory
|
||||
profile = self.get_success(
|
||||
self.store.get_user_in_directory(self.appservice.sender)
|
||||
self.store._get_user_in_directory(self.appservice.sender)
|
||||
)
|
||||
self.assertIsNone(profile)
|
||||
|
||||
@@ -441,7 +441,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
# profile is still not in directory
|
||||
profile = self.get_success(
|
||||
self.store.get_user_in_directory(self.appservice.sender)
|
||||
self.store._get_user_in_directory(self.appservice.sender)
|
||||
)
|
||||
self.assertIsNone(profile)
|
||||
|
||||
|
||||
@@ -12,19 +12,13 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from importlib import metadata
|
||||
from typing import Dict, Tuple
|
||||
|
||||
from typing_extensions import Protocol
|
||||
|
||||
try:
|
||||
from importlib import metadata
|
||||
except ImportError:
|
||||
import importlib_metadata as metadata # type: ignore[no-redef]
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
from pkg_resources import parse_version
|
||||
from prometheus_client.core import Sample
|
||||
from typing_extensions import Protocol
|
||||
|
||||
from synapse.app._base import _set_prometheus_client_use_created_metrics
|
||||
from synapse.metrics import REGISTRY, InFlightGauge, generate_latest
|
||||
|
||||
@@ -22,6 +22,7 @@ from twisted.test.proto_helpers import MemoryReactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from synapse.app.generic_worker import GenericWorkerServer
|
||||
from synapse.config.workers import InstanceTcpLocationConfig, InstanceUnixLocationConfig
|
||||
from synapse.http.site import SynapseRequest, SynapseSite
|
||||
from synapse.replication.http import ReplicationRestResource
|
||||
from synapse.replication.tcp.client import ReplicationDataHandler
|
||||
@@ -339,7 +340,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
|
||||
# `_handle_http_replication_attempt` like we do with the master HS.
|
||||
instance_name = worker_hs.get_instance_name()
|
||||
instance_loc = worker_hs.config.worker.instance_map.get(instance_name)
|
||||
if instance_loc:
|
||||
if instance_loc and isinstance(instance_loc, InstanceTcpLocationConfig):
|
||||
# Ensure the host is one that has a fake DNS entry.
|
||||
if instance_loc.host not in self.reactor.lookups:
|
||||
raise Exception(
|
||||
@@ -360,6 +361,10 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
|
||||
instance_loc.port,
|
||||
lambda: self._handle_http_replication_attempt(worker_hs, port),
|
||||
)
|
||||
elif instance_loc and isinstance(instance_loc, InstanceUnixLocationConfig):
|
||||
raise Exception(
|
||||
"Unix sockets are not supported for unit tests at this time."
|
||||
)
|
||||
|
||||
store = worker_hs.get_datastores().main
|
||||
store.db_pool._db_pool = self.database_pool._db_pool
|
||||
|
||||
@@ -933,6 +933,84 @@ class UsersListTestCase(unittest.HomeserverTestCase):
|
||||
self.assertEqual(1, len(non_admin_user_ids), non_admin_user_ids)
|
||||
self.assertEqual(not_approved_user, non_admin_user_ids[0])
|
||||
|
||||
def test_filter_not_user_types(self) -> None:
|
||||
"""Tests that the endpoint handles the not_user_types param"""
|
||||
|
||||
regular_user_id = self.register_user("normalo", "secret")
|
||||
|
||||
bot_user_id = self.register_user("robo", "secret")
|
||||
self.make_request(
|
||||
"PUT",
|
||||
"/_synapse/admin/v2/users/" + urllib.parse.quote(bot_user_id),
|
||||
{"user_type": UserTypes.BOT},
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
support_user_id = self.register_user("foo", "secret")
|
||||
self.make_request(
|
||||
"PUT",
|
||||
"/_synapse/admin/v2/users/" + urllib.parse.quote(support_user_id),
|
||||
{"user_type": UserTypes.SUPPORT},
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
def test_user_type(
|
||||
expected_user_ids: List[str], not_user_types: Optional[List[str]] = None
|
||||
) -> None:
|
||||
"""Runs a test for the not_user_types param
|
||||
Args:
|
||||
expected_user_ids: Ids of the users that are expected to be returned
|
||||
not_user_types: List of values for the not_user_types param
|
||||
"""
|
||||
|
||||
user_type_query = ""
|
||||
|
||||
if not_user_types is not None:
|
||||
user_type_query = "&".join(
|
||||
[f"not_user_type={u}" for u in not_user_types]
|
||||
)
|
||||
|
||||
test_url = f"{self.url}?{user_type_query}"
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
test_url,
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
self.assertEqual(200, channel.code)
|
||||
self.assertEqual(channel.json_body["total"], len(expected_user_ids))
|
||||
self.assertEqual(
|
||||
expected_user_ids,
|
||||
[u["name"] for u in channel.json_body["users"]],
|
||||
)
|
||||
|
||||
# Request without user_types → all users expected
|
||||
test_user_type([self.admin_user, support_user_id, regular_user_id, bot_user_id])
|
||||
|
||||
# Request and exclude bot users
|
||||
test_user_type(
|
||||
[self.admin_user, support_user_id, regular_user_id],
|
||||
not_user_types=[UserTypes.BOT],
|
||||
)
|
||||
|
||||
# Request and exclude bot and support users
|
||||
test_user_type(
|
||||
[self.admin_user, regular_user_id],
|
||||
not_user_types=[UserTypes.BOT, UserTypes.SUPPORT],
|
||||
)
|
||||
|
||||
# Request and exclude empty user types → only expected the bot and support user
|
||||
test_user_type([support_user_id, bot_user_id], not_user_types=[""])
|
||||
|
||||
# Request and exclude empty user types and bots → only expected the support user
|
||||
test_user_type([support_user_id], not_user_types=["", UserTypes.BOT])
|
||||
|
||||
# Request and exclude a custom type (neither service nor bot) → expect all users
|
||||
test_user_type(
|
||||
[self.admin_user, support_user_id, regular_user_id, bot_user_id],
|
||||
not_user_types=["custom"],
|
||||
)
|
||||
|
||||
def test_erasure_status(self) -> None:
|
||||
# Create a new user.
|
||||
user_id = self.register_user("eraseme", "eraseme")
|
||||
@@ -2394,7 +2472,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
|
||||
"""
|
||||
|
||||
# is in user directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(self.other_user))
|
||||
profile = self.get_success(self.store._get_user_in_directory(self.other_user))
|
||||
assert profile is not None
|
||||
self.assertTrue(profile["display_name"] == "User")
|
||||
|
||||
@@ -2411,7 +2489,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
|
||||
self.assertTrue(channel.json_body["deactivated"])
|
||||
|
||||
# is not in user directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(self.other_user))
|
||||
profile = self.get_success(self.store._get_user_in_directory(self.other_user))
|
||||
self.assertIsNone(profile)
|
||||
|
||||
# Set new displayname user
|
||||
@@ -2428,7 +2506,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
|
||||
self.assertEqual("Foobar", channel.json_body["displayname"])
|
||||
|
||||
# is not in user directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(self.other_user))
|
||||
profile = self.get_success(self.store._get_user_in_directory(self.other_user))
|
||||
self.assertIsNone(profile)
|
||||
|
||||
def test_reactivate_user(self) -> None:
|
||||
|
||||
+31
-1
@@ -53,6 +53,7 @@ from twisted.internet.interfaces import (
|
||||
IConnector,
|
||||
IConsumer,
|
||||
IHostnameResolver,
|
||||
IListeningPort,
|
||||
IProducer,
|
||||
IProtocol,
|
||||
IPullProducer,
|
||||
@@ -62,7 +63,7 @@ from twisted.internet.interfaces import (
|
||||
IResolverSimple,
|
||||
ITransport,
|
||||
)
|
||||
from twisted.internet.protocol import ClientFactory, DatagramProtocol
|
||||
from twisted.internet.protocol import ClientFactory, DatagramProtocol, Factory
|
||||
from twisted.python import threadpool
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
|
||||
@@ -523,6 +524,35 @@ class ThreadedMemoryReactorClock(MemoryReactorClock):
|
||||
"""
|
||||
self._tcp_callbacks[(host, port)] = callback
|
||||
|
||||
def connectUNIX(
|
||||
self,
|
||||
address: str,
|
||||
factory: ClientFactory,
|
||||
timeout: float = 30,
|
||||
checkPID: int = 0,
|
||||
) -> IConnector:
|
||||
"""
|
||||
Unix sockets aren't supported for unit tests yet. Make it obvious to any
|
||||
developer trying it out that they will need to do some work before being able
|
||||
to use it in tests.
|
||||
"""
|
||||
raise Exception("Unix sockets are not implemented for tests yet, sorry.")
|
||||
|
||||
def listenUNIX(
|
||||
self,
|
||||
address: str,
|
||||
factory: Factory,
|
||||
backlog: int = 50,
|
||||
mode: int = 0o666,
|
||||
wantPID: int = 0,
|
||||
) -> IListeningPort:
|
||||
"""
|
||||
Unix sockets aren't supported for unit tests yet. Make it obvious to any
|
||||
developer trying it out that they will need to do some work before being able
|
||||
to use it in tests.
|
||||
"""
|
||||
raise Exception("Unix sockets are not implemented for tests, sorry")
|
||||
|
||||
def connectTCP(
|
||||
self,
|
||||
host: str,
|
||||
|
||||
@@ -166,4 +166,285 @@ class LockTestCase(unittest.HomeserverTestCase):
|
||||
# Now call the shutdown code
|
||||
self.get_success(self.store._on_shutdown())
|
||||
|
||||
self.assertEqual(self.store._live_tokens, {})
|
||||
self.assertEqual(self.store._live_lock_tokens, {})
|
||||
|
||||
|
||||
class ReadWriteLockTestCase(unittest.HomeserverTestCase):
|
||||
"""Test the read/write lock implementation."""
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
def test_acquire_write_contention(self) -> None:
|
||||
"""Test that we can only acquire one write lock at a time"""
|
||||
# Track the number of tasks holding the lock.
|
||||
# Should be at most 1.
|
||||
in_lock = 0
|
||||
max_in_lock = 0
|
||||
|
||||
release_lock: "Deferred[None]" = Deferred()
|
||||
|
||||
async def task() -> None:
|
||||
nonlocal in_lock
|
||||
nonlocal max_in_lock
|
||||
|
||||
lock = await self.store.try_acquire_read_write_lock(
|
||||
"name", "key", write=True
|
||||
)
|
||||
if not lock:
|
||||
return
|
||||
|
||||
async with lock:
|
||||
in_lock += 1
|
||||
max_in_lock = max(max_in_lock, in_lock)
|
||||
|
||||
# Block to allow other tasks to attempt to take the lock.
|
||||
await release_lock
|
||||
|
||||
in_lock -= 1
|
||||
|
||||
# Start 3 tasks.
|
||||
task1 = defer.ensureDeferred(task())
|
||||
task2 = defer.ensureDeferred(task())
|
||||
task3 = defer.ensureDeferred(task())
|
||||
|
||||
# Give the reactor a kick so that the database transaction returns.
|
||||
self.pump()
|
||||
|
||||
release_lock.callback(None)
|
||||
|
||||
# Run the tasks to completion.
|
||||
# To work around `Linearizer`s using a different reactor to sleep when
|
||||
# contended (#12841), we call `runUntilCurrent` on
|
||||
# `twisted.internet.reactor`, which is a different reactor to that used
|
||||
# by the homeserver.
|
||||
assert isinstance(reactor, ReactorBase)
|
||||
self.get_success(task1)
|
||||
reactor.runUntilCurrent()
|
||||
self.get_success(task2)
|
||||
reactor.runUntilCurrent()
|
||||
self.get_success(task3)
|
||||
|
||||
# At most one task should have held the lock at a time.
|
||||
self.assertEqual(max_in_lock, 1)
|
||||
|
||||
def test_acquire_multiple_reads(self) -> None:
|
||||
"""Test that we can acquire multiple read locks at a time"""
|
||||
# Track the number of tasks holding the lock.
|
||||
in_lock = 0
|
||||
max_in_lock = 0
|
||||
|
||||
release_lock: "Deferred[None]" = Deferred()
|
||||
|
||||
async def task() -> None:
|
||||
nonlocal in_lock
|
||||
nonlocal max_in_lock
|
||||
|
||||
lock = await self.store.try_acquire_read_write_lock(
|
||||
"name", "key", write=False
|
||||
)
|
||||
if not lock:
|
||||
return
|
||||
|
||||
async with lock:
|
||||
in_lock += 1
|
||||
max_in_lock = max(max_in_lock, in_lock)
|
||||
|
||||
# Block to allow other tasks to attempt to take the lock.
|
||||
await release_lock
|
||||
|
||||
in_lock -= 1
|
||||
|
||||
# Start 3 tasks.
|
||||
task1 = defer.ensureDeferred(task())
|
||||
task2 = defer.ensureDeferred(task())
|
||||
task3 = defer.ensureDeferred(task())
|
||||
|
||||
# Give the reactor a kick so that the database transaction returns.
|
||||
self.pump()
|
||||
|
||||
release_lock.callback(None)
|
||||
|
||||
# Run the tasks to completion.
|
||||
# To work around `Linearizer`s using a different reactor to sleep when
|
||||
# contended (#12841), we call `runUntilCurrent` on
|
||||
# `twisted.internet.reactor`, which is a different reactor to that used
|
||||
# by the homeserver.
|
||||
assert isinstance(reactor, ReactorBase)
|
||||
self.get_success(task1)
|
||||
reactor.runUntilCurrent()
|
||||
self.get_success(task2)
|
||||
reactor.runUntilCurrent()
|
||||
self.get_success(task3)
|
||||
|
||||
# At most one task should have held the lock at a time.
|
||||
self.assertEqual(max_in_lock, 3)
|
||||
|
||||
def test_write_lock_acquired(self) -> None:
|
||||
"""Test that we can take out a write lock and that while we hold it
|
||||
nobody else can take it out.
|
||||
"""
|
||||
# First to acquire this lock, so it should complete
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
assert lock is not None
|
||||
|
||||
# Enter the context manager
|
||||
self.get_success(lock.__aenter__())
|
||||
|
||||
# Attempting to acquire the lock again fails, as both read and write.
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNone(lock2)
|
||||
|
||||
lock3 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=False)
|
||||
)
|
||||
self.assertIsNone(lock3)
|
||||
|
||||
# Calling `is_still_valid` reports true.
|
||||
self.assertTrue(self.get_success(lock.is_still_valid()))
|
||||
|
||||
# Drop the lock
|
||||
self.get_success(lock.__aexit__(None, None, None))
|
||||
|
||||
# We can now acquire the lock again.
|
||||
lock4 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
assert lock4 is not None
|
||||
self.get_success(lock4.__aenter__())
|
||||
self.get_success(lock4.__aexit__(None, None, None))
|
||||
|
||||
def test_read_lock_acquired(self) -> None:
|
||||
"""Test that we can take out a read lock and that while we hold it
|
||||
only other reads can use it.
|
||||
"""
|
||||
# First to acquire this lock, so it should complete
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=False)
|
||||
)
|
||||
assert lock is not None
|
||||
|
||||
# Enter the context manager
|
||||
self.get_success(lock.__aenter__())
|
||||
|
||||
# Attempting to acquire the write lock fails
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNone(lock2)
|
||||
|
||||
# Attempting to acquire a read lock succeeds
|
||||
lock3 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=False)
|
||||
)
|
||||
assert lock3 is not None
|
||||
self.get_success(lock3.__aenter__())
|
||||
|
||||
# Calling `is_still_valid` reports true.
|
||||
self.assertTrue(self.get_success(lock.is_still_valid()))
|
||||
|
||||
# Drop the first lock
|
||||
self.get_success(lock.__aexit__(None, None, None))
|
||||
|
||||
# Attempting to acquire the write lock still fails, as lock3 is still
|
||||
# active.
|
||||
lock4 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNone(lock4)
|
||||
|
||||
# Drop the still open third lock
|
||||
self.get_success(lock3.__aexit__(None, None, None))
|
||||
|
||||
# We can now acquire the lock again.
|
||||
lock5 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
assert lock5 is not None
|
||||
self.get_success(lock5.__aenter__())
|
||||
self.get_success(lock5.__aexit__(None, None, None))
|
||||
|
||||
def test_maintain_lock(self) -> None:
|
||||
"""Test that we don't time out locks while they're still active (lock is
|
||||
renewed in the background if the process is still alive)"""
|
||||
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
assert lock is not None
|
||||
|
||||
self.get_success(lock.__aenter__())
|
||||
|
||||
# Wait for ages with the lock, we should not be able to get the lock.
|
||||
self.reactor.advance(5 * _LOCK_TIMEOUT_MS / 1000)
|
||||
self.pump()
|
||||
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNone(lock2)
|
||||
|
||||
self.get_success(lock.__aexit__(None, None, None))
|
||||
|
||||
def test_timeout_lock(self) -> None:
|
||||
"""Test that we time out locks if they're not updated for ages"""
|
||||
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
assert lock is not None
|
||||
|
||||
self.get_success(lock.__aenter__())
|
||||
|
||||
# We simulate the process getting stuck by cancelling the looping call
|
||||
# that keeps the lock active.
|
||||
lock._looping_call.stop()
|
||||
|
||||
# Wait for the lock to timeout.
|
||||
self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000)
|
||||
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNotNone(lock2)
|
||||
|
||||
self.assertFalse(self.get_success(lock.is_still_valid()))
|
||||
|
||||
def test_drop(self) -> None:
|
||||
"""Test that dropping the context manager means we stop renewing the lock"""
|
||||
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNotNone(lock)
|
||||
|
||||
del lock
|
||||
|
||||
# Wait for the lock to timeout.
|
||||
self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000)
|
||||
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNotNone(lock2)
|
||||
|
||||
def test_shutdown(self) -> None:
|
||||
"""Test that shutting down Synapse releases the locks"""
|
||||
# Acquire two locks
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNotNone(lock)
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key2", write=True)
|
||||
)
|
||||
self.assertIsNotNone(lock2)
|
||||
|
||||
# Now call the shutdown code
|
||||
self.get_success(self.store._on_shutdown())
|
||||
|
||||
self.assertEqual(self.store._live_read_write_lock_tokens, {})
|
||||
|
||||
@@ -20,7 +20,14 @@ from twisted.internet.defer import Deferred, ensureDeferred
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.background_updates import BackgroundUpdater
|
||||
from synapse.storage.background_updates import (
|
||||
BackgroundUpdater,
|
||||
ForeignKeyConstraint,
|
||||
NotNullConstraint,
|
||||
run_validate_constraint_and_delete_rows_schema_delta,
|
||||
)
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import Clock
|
||||
|
||||
@@ -404,3 +411,225 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase):
|
||||
self.pump()
|
||||
self._update_ctx_manager.__aexit__.assert_called()
|
||||
self.get_success(do_update_d)
|
||||
|
||||
|
||||
class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase):
|
||||
"""Tests the validate contraint and delete background handlers."""
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates
|
||||
# the base test class should have run the real bg updates for us
|
||||
self.assertTrue(
|
||||
self.get_success(self.updates.has_completed_background_updates())
|
||||
)
|
||||
|
||||
self.store = self.hs.get_datastores().main
|
||||
|
||||
def test_not_null_constraint(self) -> None:
|
||||
# Create the initial tables, where we have some invalid data.
|
||||
"""Tests adding a not null constraint."""
|
||||
table_sql = """
|
||||
CREATE TABLE test_constraint(
|
||||
a INT PRIMARY KEY,
|
||||
b INT
|
||||
);
|
||||
"""
|
||||
self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"test_not_null_constraint", lambda _: None, table_sql
|
||||
)
|
||||
)
|
||||
|
||||
# We add an index so that we can check that its correctly recreated when
|
||||
# using SQLite.
|
||||
index_sql = "CREATE INDEX test_index ON test_constraint(a)"
|
||||
self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"test_not_null_constraint", lambda _: None, index_sql
|
||||
)
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 1, "b": 1})
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": None})
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 3, "b": 3})
|
||||
)
|
||||
|
||||
# Now lets do the migration
|
||||
|
||||
table2_sqlite = """
|
||||
CREATE TABLE test_constraint2(
|
||||
a INT PRIMARY KEY,
|
||||
b INT,
|
||||
CONSTRAINT test_constraint_name CHECK (b is NOT NULL)
|
||||
);
|
||||
"""
|
||||
|
||||
def delta(txn: LoggingTransaction) -> None:
|
||||
run_validate_constraint_and_delete_rows_schema_delta(
|
||||
txn,
|
||||
ordering=1000,
|
||||
update_name="test_bg_update",
|
||||
table="test_constraint",
|
||||
constraint_name="test_constraint_name",
|
||||
constraint=NotNullConstraint("b"),
|
||||
sqlite_table_name="test_constraint2",
|
||||
sqlite_table_schema=table2_sqlite,
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.runInteraction(
|
||||
"test_not_null_constraint",
|
||||
delta,
|
||||
)
|
||||
)
|
||||
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
# Postgres uses a background update
|
||||
self.updates.register_background_validate_constraint_and_delete_rows(
|
||||
"test_bg_update",
|
||||
table="test_constraint",
|
||||
constraint_name="test_constraint_name",
|
||||
constraint=NotNullConstraint("b"),
|
||||
unique_columns=["a"],
|
||||
)
|
||||
|
||||
# Tell the DataStore that it hasn't finished all updates yet
|
||||
self.store.db_pool.updates._all_done = False
|
||||
|
||||
# Now let's actually drive the updates to completion
|
||||
self.wait_for_background_updates()
|
||||
|
||||
# Check the correct values are in the new table.
|
||||
rows = self.get_success(
|
||||
self.store.db_pool.simple_select_list(
|
||||
table="test_constraint",
|
||||
keyvalues={},
|
||||
retcols=("a", "b"),
|
||||
)
|
||||
)
|
||||
|
||||
self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}])
|
||||
|
||||
# And check that invalid rows get correctly rejected.
|
||||
self.get_failure(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": None}),
|
||||
exc=self.store.database_engine.module.IntegrityError,
|
||||
)
|
||||
|
||||
# Check the index is still there for SQLite.
|
||||
if isinstance(self.store.database_engine, Sqlite3Engine):
|
||||
# Ensure the index exists in the schema.
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_select_one_onecol(
|
||||
table="sqlite_master",
|
||||
keyvalues={"tbl_name": "test_constraint"},
|
||||
retcol="name",
|
||||
)
|
||||
)
|
||||
|
||||
def test_foreign_constraint(self) -> None:
|
||||
"""Tests adding a not foreign key constraint."""
|
||||
|
||||
# Create the initial tables, where we have some invalid data.
|
||||
base_sql = """
|
||||
CREATE TABLE base_table(
|
||||
b INT PRIMARY KEY
|
||||
);
|
||||
"""
|
||||
|
||||
table_sql = """
|
||||
CREATE TABLE test_constraint(
|
||||
a INT PRIMARY KEY,
|
||||
b INT NOT NULL
|
||||
);
|
||||
"""
|
||||
self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"test_foreign_key_constraint", lambda _: None, base_sql
|
||||
)
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"test_foreign_key_constraint", lambda _: None, table_sql
|
||||
)
|
||||
)
|
||||
|
||||
self.get_success(self.store.db_pool.simple_insert("base_table", {"b": 1}))
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 1, "b": 1})
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": 2})
|
||||
)
|
||||
self.get_success(self.store.db_pool.simple_insert("base_table", {"b": 3}))
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 3, "b": 3})
|
||||
)
|
||||
|
||||
table2_sqlite = """
|
||||
CREATE TABLE test_constraint2(
|
||||
a INT PRIMARY KEY,
|
||||
b INT NOT NULL,
|
||||
CONSTRAINT test_constraint_name FOREIGN KEY (b) REFERENCES base_table (b)
|
||||
);
|
||||
"""
|
||||
|
||||
def delta(txn: LoggingTransaction) -> None:
|
||||
run_validate_constraint_and_delete_rows_schema_delta(
|
||||
txn,
|
||||
ordering=1000,
|
||||
update_name="test_bg_update",
|
||||
table="test_constraint",
|
||||
constraint_name="test_constraint_name",
|
||||
constraint=ForeignKeyConstraint(
|
||||
"base_table", [("b", "b")], deferred=False
|
||||
),
|
||||
sqlite_table_name="test_constraint2",
|
||||
sqlite_table_schema=table2_sqlite,
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.runInteraction(
|
||||
"test_foreign_key_constraint",
|
||||
delta,
|
||||
)
|
||||
)
|
||||
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
# Postgres uses a background update
|
||||
self.updates.register_background_validate_constraint_and_delete_rows(
|
||||
"test_bg_update",
|
||||
table="test_constraint",
|
||||
constraint_name="test_constraint_name",
|
||||
constraint=ForeignKeyConstraint(
|
||||
"base_table", [("b", "b")], deferred=False
|
||||
),
|
||||
unique_columns=["a"],
|
||||
)
|
||||
|
||||
# Tell the DataStore that it hasn't finished all updates yet
|
||||
self.store.db_pool.updates._all_done = False
|
||||
|
||||
# Now let's actually drive the updates to completion
|
||||
self.wait_for_background_updates()
|
||||
|
||||
# Check the correct values are in the new table.
|
||||
rows = self.get_success(
|
||||
self.store.db_pool.simple_select_list(
|
||||
table="test_constraint",
|
||||
keyvalues={},
|
||||
retcols=("a", "b"),
|
||||
)
|
||||
)
|
||||
self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}])
|
||||
|
||||
# And check that invalid rows get correctly rejected.
|
||||
self.get_failure(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": 2}),
|
||||
exc=self.store.database_engine.module.IntegrityError,
|
||||
)
|
||||
|
||||
@@ -20,6 +20,7 @@ from parameterized import parameterized
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
@@ -98,8 +99,32 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
|
||||
room2 = "#room2"
|
||||
room3 = "#room3"
|
||||
|
||||
def insert_event(txn: Cursor, i: int, room_id: str) -> None:
|
||||
def insert_event(txn: LoggingTransaction, i: int, room_id: str) -> None:
|
||||
event_id = "$event_%i:local" % i
|
||||
|
||||
# We need to insert into events table to get around the foreign key constraint.
|
||||
self.store.db_pool.simple_insert_txn(
|
||||
txn,
|
||||
table="events",
|
||||
values={
|
||||
"instance_name": "master",
|
||||
"stream_ordering": self.store._stream_id_gen.get_next_txn(txn),
|
||||
"topological_ordering": 1,
|
||||
"depth": 1,
|
||||
"event_id": event_id,
|
||||
"room_id": room_id,
|
||||
"type": EventTypes.Message,
|
||||
"processed": True,
|
||||
"outlier": False,
|
||||
"origin_server_ts": 0,
|
||||
"received_ts": 0,
|
||||
"sender": "@user:local",
|
||||
"contains_url": False,
|
||||
"state_key": None,
|
||||
"rejection_reason": None,
|
||||
},
|
||||
)
|
||||
|
||||
txn.execute(
|
||||
(
|
||||
"INSERT INTO event_forward_extremities (room_id, event_id) "
|
||||
@@ -113,10 +138,14 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
|
||||
self.store.db_pool.runInteraction("insert", insert_event, i, room1)
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.runInteraction("insert", insert_event, i, room2)
|
||||
self.store.db_pool.runInteraction(
|
||||
"insert", insert_event, i + 100, room2
|
||||
)
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.runInteraction("insert", insert_event, i, room3)
|
||||
self.store.db_pool.runInteraction(
|
||||
"insert", insert_event, i + 200, room3
|
||||
)
|
||||
)
|
||||
|
||||
# Test simple case
|
||||
|
||||
@@ -15,8 +15,6 @@
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.types import UserID
|
||||
from synapse.util import Clock
|
||||
|
||||
@@ -64,64 +62,3 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase):
|
||||
self.assertIsNone(
|
||||
self.get_success(self.store.get_profile_avatar_url(self.u_frank))
|
||||
)
|
||||
|
||||
def test_profiles_bg_migration(self) -> None:
|
||||
"""
|
||||
Test background job that copies entries from column user_id to full_user_id, adding
|
||||
the hostname in the process.
|
||||
"""
|
||||
updater = self.hs.get_datastores().main.db_pool.updates
|
||||
|
||||
# drop the constraint so we can insert nulls in full_user_id to populate the test
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
|
||||
def f(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"ALTER TABLE profiles DROP CONSTRAINT full_user_id_not_null"
|
||||
)
|
||||
|
||||
self.get_success(self.store.db_pool.runInteraction("", f))
|
||||
|
||||
for i in range(0, 70):
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
"profiles",
|
||||
{"user_id": f"hello{i:02}"},
|
||||
)
|
||||
)
|
||||
|
||||
# re-add the constraint so that when it's validated it actually exists
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
|
||||
def f(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID"
|
||||
)
|
||||
|
||||
self.get_success(self.store.db_pool.runInteraction("", f))
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
"background_updates",
|
||||
values={
|
||||
"update_name": "populate_full_user_id_profiles",
|
||||
"progress_json": "{}",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
updater.run_background_updates(False),
|
||||
)
|
||||
|
||||
expected_values = []
|
||||
for i in range(0, 70):
|
||||
expected_values.append((f"@hello{i:02}:{self.hs.hostname}",))
|
||||
|
||||
res = self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"", None, "SELECT full_user_id from profiles ORDER BY full_user_id"
|
||||
)
|
||||
)
|
||||
self.assertEqual(len(res), len(expected_values))
|
||||
self.assertEqual(res, expected_values)
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
# Copyright 2023 The Matrix.org Foundation C.I.C
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests import unittest
|
||||
|
||||
|
||||
class UserFiltersStoreTestCase(unittest.HomeserverTestCase):
|
||||
"""
|
||||
Test background migration that copies entries from column user_id to full_user_id, adding
|
||||
the hostname in the process.
|
||||
"""
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
def test_bg_migration(self) -> None:
|
||||
updater = self.hs.get_datastores().main.db_pool.updates
|
||||
|
||||
# drop the constraint so we can insert nulls in full_user_id to populate the test
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
|
||||
def f(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"ALTER TABLE user_filters DROP CONSTRAINT full_user_id_not_null"
|
||||
)
|
||||
|
||||
self.get_success(self.store.db_pool.runInteraction("", f))
|
||||
|
||||
for i in range(0, 70):
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
"user_filters",
|
||||
{
|
||||
"user_id": f"hello{i:02}",
|
||||
"filter_id": i,
|
||||
"filter_json": bytearray(i),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# re-add the constraint so that when it's validated it actually exists
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
|
||||
def f(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID"
|
||||
)
|
||||
|
||||
self.get_success(self.store.db_pool.runInteraction("", f))
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
"background_updates",
|
||||
values={
|
||||
"update_name": "populate_full_user_id_user_filters",
|
||||
"progress_json": "{}",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
updater.run_background_updates(False),
|
||||
)
|
||||
|
||||
expected_values = []
|
||||
for i in range(0, 70):
|
||||
expected_values.append((f"@hello{i:02}:{self.hs.hostname}",))
|
||||
|
||||
res = self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"", None, "SELECT full_user_id from user_filters ORDER BY full_user_id"
|
||||
)
|
||||
)
|
||||
self.assertEqual(len(res), len(expected_values))
|
||||
self.assertEqual(res, expected_values)
|
||||
@@ -268,7 +268,7 @@ class OptionsResourceTests(unittest.TestCase):
|
||||
)
|
||||
self.assertEqual(
|
||||
channel.headers.getRawHeaders(b"Access-Control-Expose-Headers"),
|
||||
[b"Synapse-Trace-Id"],
|
||||
[b"Synapse-Trace-Id, Server"],
|
||||
)
|
||||
|
||||
def _check_cors_msc3886_headers(self, channel: FakeChannel) -> None:
|
||||
|
||||
Reference in New Issue
Block a user