Compare commits
14 Commits
rei/synwor
...
rei/synwor
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c747b4b37d | ||
|
|
7094bcf251 | ||
|
|
5af3ce8bf8 | ||
|
|
7fd218ad8b | ||
|
|
30627554d4 | ||
|
|
3c41609a9e | ||
|
|
18695c7631 | ||
|
|
c76da0d948 | ||
|
|
3f203e1b10 | ||
|
|
ae7f8cccc2 | ||
|
|
37112f76f7 | ||
|
|
f61931fb8a | ||
|
|
ae1712dd71 | ||
|
|
c1229e0218 |
4
.github/workflows/tests.yml
vendored
4
.github/workflows/tests.yml
vendored
@@ -35,8 +35,8 @@ jobs:
|
||||
set -o pipefail
|
||||
synapse/scripts-dev/complement.sh --build-only
|
||||
while :; do
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -f -run TestSendJoinPartialStateResponse -json 2>&1 | gotestfmt | tee /tmp/xxx
|
||||
if grep ❌ /tmp/xxx; then
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -f -run TestMembersLocal -json 2>&1 | gotestfmt | tee /tmp/xxx
|
||||
if grep FAIL /tmp/xxx; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
18
CHANGES.md
18
CHANGES.md
@@ -1,21 +1,3 @@
|
||||
Synapse 1.62.0 (2022-07-05)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.62.0rc3.
|
||||
|
||||
Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse.
|
||||
|
||||
|
||||
Synapse 1.62.0rc3 (2022-07-04)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Update the version of the [ldap3 plugin](https://github.com/matrix-org/matrix-synapse-ldap3/) included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on `packages.matrix.org` to 0.2.1. This fixes [a bug](https://github.com/matrix-org/matrix-synapse-ldap3/pull/163) with usernames containing uppercase characters. ([\#13156](https://github.com/matrix-org/synapse/issues/13156))
|
||||
- Fix a bug introduced in Synapse 1.62.0rc1 affecting unread counts for users on small servers. ([\#13168](https://github.com/matrix-org/synapse/issues/13168))
|
||||
|
||||
|
||||
Synapse 1.62.0rc2 (2022-07-01)
|
||||
==============================
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add type annotations to `tests.utils`.
|
||||
@@ -1 +0,0 @@
|
||||
Faster room joins: Handle race between persisting an event and un-partial stating a room.
|
||||
@@ -1 +0,0 @@
|
||||
Fix application service not being able to join remote federated room without a profile set.
|
||||
@@ -1 +0,0 @@
|
||||
Document how the Synapse team does reviews.
|
||||
@@ -1 +0,0 @@
|
||||
Add type annotations to `tests.server`.
|
||||
@@ -1 +0,0 @@
|
||||
Faster room joins: fix race in recalculation of current room state.
|
||||
@@ -1 +0,0 @@
|
||||
Add the ability to set the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment when using `complement.sh`.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room.
|
||||
@@ -1 +0,0 @@
|
||||
Improve and fix type hints.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing links to config options.
|
||||
@@ -1 +0,0 @@
|
||||
Update config used by Complement to allow device name lookup over federation.
|
||||
@@ -1 +0,0 @@
|
||||
Make use of the more robust `get_current_state` in `_get_state_map_for_room` to avoid breakages.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug where rows were not deleted from `event_push_actions` table on large servers. Introduced in v1.62.0.
|
||||
@@ -1 +0,0 @@
|
||||
Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script.
|
||||
@@ -1 +0,0 @@
|
||||
Remove obsolete and for 8 years unused `RoomEventsStoreTestCase`. Contributed by @arkamar.
|
||||
@@ -1 +0,0 @@
|
||||
Bump the version of `lxml` in matrix.org Docker images Debian packages from 4.8.0 to 4.9.1.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar).
|
||||
12
debian/changelog
vendored
12
debian/changelog
vendored
@@ -1,15 +1,3 @@
|
||||
matrix-synapse-py3 (1.62.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.62.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Jul 2022 11:14:15 +0100
|
||||
|
||||
matrix-synapse-py3 (1.62.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.62.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 04 Jul 2022 16:07:01 +0100
|
||||
|
||||
matrix-synapse-py3 (1.62.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.62.0rc2.
|
||||
|
||||
@@ -67,13 +67,6 @@ The following environment variables are supported in `generate` mode:
|
||||
* `UID`, `GID`: the user id and group id to use for creating the data
|
||||
directories. If unset, and no user is set via `docker run --user`, defaults
|
||||
to `991`, `991`.
|
||||
* `SYNAPSE_LOG_LEVEL`: the log level to use (one of `DEBUG`, `INFO`, `WARNING` or `ERROR`).
|
||||
Defaults to `INFO`.
|
||||
* `SYNAPSE_LOG_SENSITIVE`: if set and the log level is set to `DEBUG`, Synapse
|
||||
will log sensitive information such as access tokens.
|
||||
This should not be needed unless you are a developer attempting to debug something
|
||||
particularly tricky.
|
||||
|
||||
|
||||
## Postgres
|
||||
|
||||
|
||||
@@ -81,8 +81,6 @@ rc_invites:
|
||||
|
||||
federation_rr_transactions_per_room_per_second: 9999
|
||||
|
||||
allow_device_name_lookup_over_federation: true
|
||||
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
|
||||
@@ -49,17 +49,11 @@ handlers:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
|
||||
{% if not SYNAPSE_LOG_SENSITIVE %}
|
||||
{#
|
||||
If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
|
||||
so that DEBUG entries (containing sensitive information) are not emitted.
|
||||
#}
|
||||
loggers:
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
{% endif %}
|
||||
|
||||
root:
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
|
||||
|
||||
@@ -29,10 +29,6 @@
|
||||
# * SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER: Whether to use the forking launcher,
|
||||
# only intended for usage in Complement at the moment.
|
||||
# No stability guarantees are provided.
|
||||
# * SYNAPSE_LOG_LEVEL: Set this to DEBUG, INFO, WARNING or ERROR to change the
|
||||
# log level. INFO is the default.
|
||||
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
|
||||
# regardless of the SYNAPSE_LOG_LEVEL setting.
|
||||
#
|
||||
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
|
||||
# in the project's README), this script may be run multiple times, and functionality should
|
||||
@@ -42,7 +38,7 @@ import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
|
||||
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
@@ -556,17 +552,13 @@ def generate_worker_log_config(
|
||||
Returns: the path to the generated file
|
||||
"""
|
||||
# Check whether we should write worker logs to disk, in addition to the console
|
||||
extra_log_template_args: Dict[str, Optional[str]] = {}
|
||||
extra_log_template_args = {}
|
||||
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
|
||||
extra_log_template_args["LOG_FILE_PATH"] = f"{data_dir}/logs/{worker_name}.log"
|
||||
|
||||
extra_log_template_args["SYNAPSE_LOG_LEVEL"] = environ.get("SYNAPSE_LOG_LEVEL")
|
||||
extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
|
||||
"SYNAPSE_LOG_SENSITIVE"
|
||||
)
|
||||
|
||||
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
|
||||
dir=data_dir, name=worker_name
|
||||
)
|
||||
# Render and write the file
|
||||
log_config_filepath = f"/conf/workers/{worker_name}.log.config"
|
||||
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
|
||||
convert(
|
||||
"/conf/log.config",
|
||||
log_config_filepath,
|
||||
|
||||
@@ -81,7 +81,6 @@
|
||||
# Development
|
||||
- [Contributing Guide](development/contributing_guide.md)
|
||||
- [Code Style](code_style.md)
|
||||
- [Reviewing Code](development/reviews.md)
|
||||
- [Release Cycle](development/releases.md)
|
||||
- [Git Usage](development/git.md)
|
||||
- [Testing]()
|
||||
|
||||
@@ -309,10 +309,6 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
|
||||
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
|
||||
```
|
||||
|
||||
### Prettier formatting with `gotestfmt`
|
||||
|
||||
@@ -351,7 +347,7 @@ To prepare a Pull Request, please:
|
||||
3. `git push` your commit to your fork of Synapse;
|
||||
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
|
||||
5. add a [changelog entry](#changelog) and push it to your Pull Request;
|
||||
6. that's it for now, a non-draft pull request will automatically request review from the team;
|
||||
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
|
||||
7. if you need to update your PR, please avoid rebasing and just add new commits to your branch.
|
||||
|
||||
|
||||
@@ -527,13 +523,10 @@ From this point, you should:
|
||||
1. Look at the results of the CI pipeline.
|
||||
- If there is any error, fix the error.
|
||||
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
|
||||
- A pull request is a conversation, if you disagree with the suggestions, please respond and discuss it.
|
||||
3. Create a new commit with the changes.
|
||||
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
|
||||
- Push this commits to your Pull Request.
|
||||
4. Back to 1.
|
||||
5. Once the pull request is ready for review again please re-request review from whichever developer did your initial
|
||||
review (or leave a comment in the pull request that you believe all required changes have been done).
|
||||
|
||||
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
|
||||
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
Some notes on how we do reviews
|
||||
===============================
|
||||
|
||||
The Synapse team works off a shared review queue -- any new pull requests for
|
||||
Synapse (or related projects) has a review requested from the entire team. Team
|
||||
members should process this queue using the following rules:
|
||||
|
||||
* Any high urgency pull requests (e.g. fixes for broken continuous integration
|
||||
or fixes for release blockers);
|
||||
* Follow-up reviews for pull requests which have previously received reviews;
|
||||
* Any remaining pull requests.
|
||||
|
||||
For the latter two categories above, older pull requests should be prioritised.
|
||||
|
||||
It is explicit that there is no priority given to pull requests from the team
|
||||
(vs from the community). If a pull request requires a quick turn around, please
|
||||
explicitly communicate this via [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)
|
||||
or as a comment on the pull request.
|
||||
|
||||
Once an initial review has been completed and the author has made additional changes,
|
||||
follow-up reviews should go back to the same reviewer. This helps build a shared
|
||||
context and conversation between author and reviewer.
|
||||
|
||||
As a team we aim to keep the number of inflight pull requests to a minimum to ensure
|
||||
that ongoing work is finished before starting new work.
|
||||
|
||||
Performing a review
|
||||
-------------------
|
||||
|
||||
To communicate to the rest of the team the status of each pull request, team
|
||||
members should do the following:
|
||||
|
||||
* Assign themselves to the pull request (they should be left assigned to the
|
||||
pull request until it is merged, closed, or are no longer the reviewer);
|
||||
* Review the pull request by leaving comments, questions, and suggestions;
|
||||
* Mark the pull request appropriately (as needing changes or accepted).
|
||||
|
||||
If you are unsure about a particular part of the pull request (or are not confident
|
||||
in your understanding of part of the code) then ask questions or request review
|
||||
from the team again. When requesting review from the team be sure to leave a comment
|
||||
with the rationale on why you're putting it back in the queue.
|
||||
@@ -143,14 +143,6 @@ to do step 2.
|
||||
|
||||
It is safe to at any time kill the port script and restart it.
|
||||
|
||||
However, under no circumstances should the SQLite database be `VACUUM`ed between
|
||||
multiple runs of the script. Doing so can lead to an inconsistent copy of your database
|
||||
into Postgres.
|
||||
To avoid accidental error, the script will check that SQLite's `auto_vacuum` mechanism
|
||||
is disabled, but the script is not able to protect against a manual `VACUUM` operation
|
||||
performed either by the administrator or by any automated task that the administrator
|
||||
may have configured.
|
||||
|
||||
Note that the database may take up significantly more (25% - 100% more)
|
||||
space on disk after porting to Postgres.
|
||||
|
||||
|
||||
@@ -591,7 +591,7 @@ Example configuration:
|
||||
dummy_events_threshold: 5
|
||||
```
|
||||
---
|
||||
### `delete_stale_devices_after`
|
||||
Config option `delete_stale_devices_after`
|
||||
|
||||
An optional duration. If set, Synapse will run a daily background task to log out and
|
||||
delete any device that hasn't been accessed for more than the specified amount of time.
|
||||
@@ -1843,7 +1843,7 @@ Example configuration:
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
```
|
||||
----
|
||||
### `turn_username` and `turn_password`
|
||||
Config options: `turn_username` and `turn_password`
|
||||
|
||||
The Username and password if the TURN server needs them and does not use a token.
|
||||
|
||||
@@ -3373,7 +3373,7 @@ alias_creation_rules:
|
||||
action: deny
|
||||
```
|
||||
---
|
||||
### `room_list_publication_rules`
|
||||
Config options: `room_list_publication_rules`
|
||||
|
||||
The `room_list_publication_rules` option controls who can publish and
|
||||
which rooms can be published in the public room list.
|
||||
|
||||
4
mypy.ini
4
mypy.ini
@@ -73,6 +73,7 @@ exclude = (?x)
|
||||
|tests/util/test_lrucache.py
|
||||
|tests/util/test_rwlock.py
|
||||
|tests/util/test_wheel_timer.py
|
||||
|tests/utils.py
|
||||
)$
|
||||
|
||||
[mypy-synapse.federation.transport.client]
|
||||
@@ -126,9 +127,6 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.federation.transport.test_client]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.utils]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
|
||||
141
poetry.lock
generated
141
poetry.lock
generated
@@ -502,7 +502,7 @@ pyasn1 = ">=0.4.6"
|
||||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "4.9.1"
|
||||
version = "4.8.0"
|
||||
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
|
||||
category = "main"
|
||||
optional = true
|
||||
@@ -540,7 +540,7 @@ test = ["tox", "twisted", "aiounittest"]
|
||||
|
||||
[[package]]
|
||||
name = "matrix-synapse-ldap3"
|
||||
version = "0.2.1"
|
||||
version = "0.2.0"
|
||||
description = "An LDAP3 auth provider for Synapse"
|
||||
category = "main"
|
||||
optional = true
|
||||
@@ -552,7 +552,7 @@ service-identity = "*"
|
||||
Twisted = ">=15.1.0"
|
||||
|
||||
[package.extras]
|
||||
dev = ["matrix-synapse", "tox", "ldaptor", "mypy (==0.910)", "types-setuptools", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)"]
|
||||
dev = ["matrix-synapse", "tox", "ldaptor", "mypy (==0.910)", "types-setuptools", "black (==21.9b0)", "flake8 (==4.0.1)", "isort (==5.9.3)"]
|
||||
|
||||
[[package]]
|
||||
name = "mccabe"
|
||||
@@ -1937,76 +1937,67 @@ ldap3 = [
|
||||
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
|
||||
]
|
||||
lxml = [
|
||||
{file = "lxml-4.9.1-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:98cafc618614d72b02185ac583c6f7796202062c41d2eeecdf07820bad3295ed"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c62e8dd9754b7debda0c5ba59d34509c4688f853588d75b53c3791983faa96fc"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21fb3d24ab430fc538a96e9fbb9b150029914805d551deeac7d7822f64631dfc"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-win32.whl", hash = "sha256:86e92728ef3fc842c50a5cb1d5ba2bc66db7da08a7af53fb3da79e202d1b2cd3"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4cfbe42c686f33944e12f45a27d25a492cc0e43e1dc1da5d6a87cbcaf2e95627"},
|
||||
{file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dad7b164905d3e534883281c050180afcf1e230c3d4a54e8038aa5cfcf312b84"},
|
||||
{file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a614e4afed58c14254e67862456d212c4dcceebab2eaa44d627c2ca04bf86837"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f9ced82717c7ec65a67667bb05865ffe38af0e835cdd78728f1209c8fffe0cad"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d9fc0bf3ff86c17348dfc5d322f627d78273eba545db865c3cd14b3f19e57fa5"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e5f66bdf0976ec667fc4594d2812a00b07ed14d1b44259d19a41ae3fff99f2b8"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fe17d10b97fdf58155f858606bddb4e037b805a60ae023c009f760d8361a4eb8"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8caf4d16b31961e964c62194ea3e26a0e9561cdf72eecb1781458b67ec83423d"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-win32.whl", hash = "sha256:4780677767dd52b99f0af1f123bc2c22873d30b474aa0e2fc3fe5e02217687c7"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a188cd292c4d2fcd78d04f863b789ef43aa129b233d7c9004de08693728b"},
|
||||
{file = "lxml-4.9.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:be9eb06489bc975c38706902cbc6888f39e946b81383abc2838d186f0e8b6a9d"},
|
||||
{file = "lxml-4.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f1be258c4d3dc609e654a1dc59d37b17d7fef05df912c01fc2e15eb43a9735f3"},
|
||||
{file = "lxml-4.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:927a9dd016d6033bc12e0bf5dee1dde140235fc8d0d51099353c76081c03dc29"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9232b09f5efee6a495a99ae6824881940d6447debe272ea400c02e3b68aad85d"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-win32.whl", hash = "sha256:4d5bae0a37af799207140652a700f21a85946f107a199bcb06720b13a4f1f0b7"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4878e667ebabe9b65e785ac8da4d48886fe81193a84bbe49f12acff8f7a383a4"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:1355755b62c28950f9ce123c7a41460ed9743c699905cbe664a5bcc5c9c7c7fb"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:bcaa1c495ce623966d9fc8a187da80082334236a2a1c7e141763ffaf7a405067"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eafc048ea3f1b3c136c71a86db393be36b5b3d9c87b1c25204e7d397cee9536"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:13c90064b224e10c14dcdf8086688d3f0e612db53766e7478d7754703295c7c8"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206a51077773c6c5d2ce1991327cda719063a47adc02bd703c56a662cdb6c58b"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e8f0c9d65da595cfe91713bc1222af9ecabd37971762cb830dea2fc3b3bb2acf"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8f0a4d179c9a941eb80c3a63cdb495e539e064f8054230844dcf2fcb812b71d3"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:830c88747dce8a3e7525defa68afd742b4580df6aa2fdd6f0855481e3994d391"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-win32.whl", hash = "sha256:1e1cf47774373777936c5aabad489fef7b1c087dcd1f426b621fda9dcc12994e"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:5974895115737a74a00b321e339b9c3f45c20275d226398ae79ac008d908bff7"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:1423631e3d51008871299525b541413c9b6c6423593e89f9c4cfbe8460afc0a2"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:2aaf6a0a6465d39b5ca69688fce82d20088c1838534982996ec46633dc7ad6cc"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:9f36de4cd0c262dd9927886cc2305aa3f2210db437aa4fed3fb4940b8bf4592c"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae06c1e4bc60ee076292e582a7512f304abdf6c70db59b56745cca1684f875a4"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:57e4d637258703d14171b54203fd6822fda218c6c2658a7d30816b10995f29f3"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d279033bf614953c3fc4a0aa9ac33a21e8044ca72d4fa8b9273fe75359d5cca"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a60f90bba4c37962cbf210f0188ecca87daafdf60271f4c6948606e4dabf8785"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ca2264f341dd81e41f3fffecec6e446aa2121e0b8d026fb5130e02de1402785"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-win32.whl", hash = "sha256:27e590352c76156f50f538dbcebd1925317a0f70540f7dc8c97d2931c595783a"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:eea5d6443b093e1545ad0210e6cf27f920482bfcf5c77cdc8596aec73523bb7e"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f05251bbc2145349b8d0b77c0d4e5f3b228418807b1ee27cefb11f69ed3d233b"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:487c8e61d7acc50b8be82bda8c8d21d20e133c3cbf41bd8ad7eb1aaeb3f07c97"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d1a92d8e90b286d491e5626af53afef2ba04da33e82e30744795c71880eaa21"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:b570da8cd0012f4af9fa76a5635cd31f707473e65a5a335b186069d5c7121ff2"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ef87fca280fb15342726bd5f980f6faf8b84a5287fcc2d4962ea8af88b35130"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:93e414e3206779ef41e5ff2448067213febf260ba747fc65389a3ddaa3fb8715"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6653071f4f9bac46fbc30f3c7838b0e9063ee335908c5d61fb7a4a86c8fd2036"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:32a73c53783becdb7eaf75a2a1525ea8e49379fb7248c3eeefb9412123536387"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-win32.whl", hash = "sha256:1a7c59c6ffd6ef5db362b798f350e24ab2cfa5700d53ac6681918f314a4d3b94"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:1436cf0063bba7888e43f1ba8d58824f085410ea2025befe81150aceb123e345"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:4beea0f31491bc086991b97517b9683e5cfb369205dac0148ef685ac12a20a67"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:41fb58868b816c202e8881fd0f179a4644ce6e7cbbb248ef0283a34b73ec73bb"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bd34f6d1810d9354dc7e35158aa6cc33456be7706df4420819af6ed966e85448"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:edffbe3c510d8f4bf8640e02ca019e48a9b72357318383ca60e3330c23aaffc7"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d949f53ad4fc7cf02c44d6678e7ff05ec5f5552b235b9e136bd52e9bf730b91"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:079b68f197c796e42aa80b1f739f058dcee796dc725cc9a1be0cdb08fc45b000"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9c3a88d20e4fe4a2a4a84bf439a5ac9c9aba400b85244c63a1ab7088f85d9d25"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4e285b5f2bf321fc0857b491b5028c5f276ec0c873b985d58d7748ece1d770dd"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-win32.whl", hash = "sha256:ef72013e20dd5ba86a8ae1aed7f56f31d3374189aa8b433e7b12ad182c0d2dfb"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:10d2017f9150248563bb579cd0d07c61c58da85c922b780060dcc9a3aa9f432d"},
|
||||
{file = "lxml-4.9.1-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c"},
|
||||
{file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0645e934e940107e2fdbe7c5b6fb8ec6232444260752598bc4d09511bd056c0b"},
|
||||
{file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6daa662aba22ef3258934105be2dd9afa5bb45748f4f702a3b39a5bf53a1f4dc"},
|
||||
{file = "lxml-4.9.1-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:603a464c2e67d8a546ddaa206d98e3246e5db05594b97db844c2f0a1af37cf5b"},
|
||||
{file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c4b2e0559b68455c085fb0f6178e9752c4be3bba104d6e881eb5573b399d1eb2"},
|
||||
{file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0f3f0059891d3254c7b5fb935330d6db38d6519ecd238ca4fce93c234b4a0f73"},
|
||||
{file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c852b1530083a620cb0de5f3cd6826f19862bafeaf77586f1aef326e49d95f0c"},
|
||||
{file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:287605bede6bd36e930577c5925fcea17cb30453d96a7b4c63c14a257118dbb9"},
|
||||
{file = "lxml-4.9.1.tar.gz", hash = "sha256:fe749b052bb7233fe5d072fcb549221a8cb1a16725c47c37e42b0b9cb3ff2c3f"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:e1ab2fac607842ac36864e358c42feb0960ae62c34aa4caaf12ada0a1fb5d99b"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28d1af847786f68bec57961f31221125c29d6f52d9187c01cd34dc14e2b29430"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b92d40121dcbd74831b690a75533da703750f7041b4bf951befc657c37e5695a"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-win32.whl", hash = "sha256:e01f9531ba5420838c801c21c1b0f45dbc9607cb22ea2cf132844453bec863a5"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-win_amd64.whl", hash = "sha256:6259b511b0f2527e6d55ad87acc1c07b3cbffc3d5e050d7e7bcfa151b8202df9"},
|
||||
{file = "lxml-4.8.0-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1010042bfcac2b2dc6098260a2ed022968dbdfaf285fc65a3acf8e4eb1ffd1bc"},
|
||||
{file = "lxml-4.8.0-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fa56bb08b3dd8eac3a8c5b7d075c94e74f755fd9d8a04543ae8d37b1612dd170"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:31ba2cbc64516dcdd6c24418daa7abff989ddf3ba6d3ea6f6ce6f2ed6e754ec9"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:31499847fc5f73ee17dbe1b8e24c6dafc4e8d5b48803d17d22988976b0171f03"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5f7d7d9afc7b293147e2d506a4596641d60181a35279ef3aa5778d0d9d9123fe"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a3c5f1a719aa11866ffc530d54ad965063a8cbbecae6515acbd5f0fae8f48eaa"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6268e27873a3d191849204d00d03f65c0e343b3bcb518a6eaae05677c95621d1"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-win32.whl", hash = "sha256:330bff92c26d4aee79c5bc4d9967858bdbe73fdbdbacb5daf623a03a914fe05b"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:b2582b238e1658c4061ebe1b4df53c435190d22457642377fd0cb30685cdfb76"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a2bfc7e2a0601b475477c954bf167dee6d0f55cb167e3f3e7cefad906e7759f6"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a1547ff4b8a833511eeaceacbcd17b043214fcdb385148f9c1bc5556ca9623e2"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-win32.whl", hash = "sha256:a9f1c3489736ff8e1c7652e9dc39f80cff820f23624f23d9eab6e122ac99b150"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-win_amd64.whl", hash = "sha256:530f278849031b0eb12f46cca0e5db01cfe5177ab13bd6878c6e739319bae654"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:078306d19a33920004addeb5f4630781aaeabb6a8d01398045fcde085091a169"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:86545e351e879d0b72b620db6a3b96346921fa87b3d366d6c074e5a9a0b8dadb"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24f5c5ae618395ed871b3d8ebfcbb36e3f1091fd847bf54c4de623f9107942f3"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:bbab6faf6568484707acc052f4dfc3802bdb0cafe079383fbaa23f1cdae9ecd4"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7993232bd4044392c47779a3c7e8889fea6883be46281d45a81451acfd704d7e"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d6483b1229470e1d8835e52e0ff3c6973b9b97b24cd1c116dca90b57a2cc613"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ad4332a532e2d5acb231a2e5d33f943750091ee435daffca3fec0a53224e7e33"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-win32.whl", hash = "sha256:db3535733f59e5605a88a706824dfcb9bd06725e709ecb017e165fc1d6e7d429"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-win_amd64.whl", hash = "sha256:5f148b0c6133fb928503cfcdfdba395010f997aa44bcf6474fcdd0c5398d9b63"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:8a31f24e2a0b6317f33aafbb2f0895c0bce772980ae60c2c640d82caac49628a"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:719544565c2937c21a6f76d520e6e52b726d132815adb3447ccffbe9f44203c4"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:c0b88ed1ae66777a798dc54f627e32d3b81c8009967c63993c450ee4cbcbec15"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fa9b7c450be85bfc6cd39f6df8c5b8cbd76b5d6fc1f69efec80203f9894b885f"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e9f84ed9f4d50b74fbc77298ee5c870f67cb7e91dcdc1a6915cb1ff6a317476c"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1d650812b52d98679ed6c6b3b55cbb8fe5a5460a0aef29aeb08dc0b44577df85"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:80bbaddf2baab7e6de4bc47405e34948e694a9efe0861c61cdc23aa774fcb141"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-win32.whl", hash = "sha256:6f7b82934c08e28a2d537d870293236b1000d94d0b4583825ab9649aef7ddf63"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e1fd7d2fe11f1cb63d3336d147c852f6d07de0d0020d704c6031b46a30b02ca8"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:5045ee1ccd45a89c4daec1160217d363fcd23811e26734688007c26f28c9e9e7"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0c1978ff1fd81ed9dcbba4f91cf09faf1f8082c9d72eb122e92294716c605428"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cbf2ff155b19dc4d4100f7442f6a697938bf4493f8d3b0c51d45568d5666b5"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ce13d6291a5f47c1c8dbd375baa78551053bc6b5e5c0e9bb8e39c0a8359fd52f"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11527dc23d5ef44d76fef11213215c34f36af1608074561fcc561d983aeb870"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:60d2f60bd5a2a979df28ab309352cdcf8181bda0cca4529769a945f09aba06f9"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:62f93eac69ec0f4be98d1b96f4d6b964855b8255c345c17ff12c20b93f247b68"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-win32.whl", hash = "sha256:20b8a746a026017acf07da39fdb10aa80ad9877046c9182442bf80c84a1c4696"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:891dc8f522d7059ff0024cd3ae79fd224752676447f9c678f2a5c14b84d9a939"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:b6fc2e2fb6f532cf48b5fed57567ef286addcef38c28874458a41b7837a57807"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:74eb65ec61e3c7c019d7169387d1b6ffcfea1b9ec5894d116a9a903636e4a0b1"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:627e79894770783c129cc5e89b947e52aa26e8e0557c7e205368a809da4b7939"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:545bd39c9481f2e3f2727c78c169425efbfb3fbba6e7db4f46a80ebb249819ca"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5a58d0b12f5053e270510bf12f753a76aaf3d74c453c00942ed7d2c804ca845c"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ec4b4e75fc68da9dc0ed73dcdb431c25c57775383fec325d23a770a64e7ebc87"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5804e04feb4e61babf3911c2a974a5b86f66ee227cc5006230b00ac6d285b3a9"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-win32.whl", hash = "sha256:aa0cf4922da7a3c905d000b35065df6184c0dc1d866dd3b86fd961905bbad2ea"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:dd10383f1d6b7edf247d0960a3db274c07e96cf3a3fc7c41c8448f93eac3fb1c"},
|
||||
{file = "lxml-4.8.0-pp37-pypy37_pp73-macosx_10_14_x86_64.whl", hash = "sha256:2403a6d6fb61c285969b71f4a3527873fe93fd0abe0832d858a17fe68c8fa507"},
|
||||
{file = "lxml-4.8.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:986b7a96228c9b4942ec420eff37556c5777bfba6758edcb95421e4a614b57f9"},
|
||||
{file = "lxml-4.8.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6fe4ef4402df0250b75ba876c3795510d782def5c1e63890bde02d622570d39e"},
|
||||
{file = "lxml-4.8.0-pp38-pypy38_pp73-macosx_10_14_x86_64.whl", hash = "sha256:f10ce66fcdeb3543df51d423ede7e238be98412232fca5daec3e54bcd16b8da0"},
|
||||
{file = "lxml-4.8.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:730766072fd5dcb219dd2b95c4c49752a54f00157f322bc6d71f7d2a31fecd79"},
|
||||
{file = "lxml-4.8.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8b99ec73073b37f9ebe8caf399001848fced9c08064effdbfc4da2b5a8d07b93"},
|
||||
{file = "lxml-4.8.0.tar.gz", hash = "sha256:f63f62fc60e6228a4ca9abae28228f35e1bd3ce675013d1dfb828688d50c6e23"},
|
||||
]
|
||||
markupsafe = [
|
||||
{file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"},
|
||||
@@ -2055,8 +2046,8 @@ matrix-common = [
|
||||
{file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"},
|
||||
]
|
||||
matrix-synapse-ldap3 = [
|
||||
{file = "matrix-synapse-ldap3-0.2.1.tar.gz", hash = "sha256:bfb4390f4a262ffb0d6f057ff3aeb1e46d4e52ff420a064d795fb4f555f00285"},
|
||||
{file = "matrix_synapse_ldap3-0.2.1-py3-none-any.whl", hash = "sha256:1b3310a60f1d06466f35905a269b6df95747fd1305f2b7fe638f373963b2aa2c"},
|
||||
{file = "matrix-synapse-ldap3-0.2.0.tar.gz", hash = "sha256:91a0715b43a41ec3033244174fca20846836da98fda711fb01687f7199eecd2e"},
|
||||
{file = "matrix_synapse_ldap3-0.2.0-py3-none-any.whl", hash = "sha256:0128ca7c3058987adc2e8a88463bb46879915bfd3d373309632813b353e30f9f"},
|
||||
]
|
||||
mccabe = [
|
||||
{file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
|
||||
|
||||
@@ -54,7 +54,7 @@ skip_gitignore = true
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.62.0"
|
||||
version = "1.62.0rc2"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -46,12 +46,10 @@ Run the complement test suite on Synapse.
|
||||
|
||||
-f, --fast
|
||||
Skip rebuilding the docker images, and just use the most recent
|
||||
'complement-synapse:latest' image.
|
||||
Conflicts with --build-only.
|
||||
'complement-synapse:latest' image
|
||||
|
||||
--build-only
|
||||
Only build the Docker images. Don't actually run Complement.
|
||||
Conflicts with -f/--fast.
|
||||
|
||||
For help on arguments to 'go test', run 'go help testflag'.
|
||||
EOF
|
||||
@@ -160,18 +158,6 @@ else
|
||||
test_tags="$test_tags,faster_joins"
|
||||
fi
|
||||
|
||||
|
||||
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
# Set the log level to what is desired
|
||||
export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL"
|
||||
|
||||
# Allow logging sensitive things (currently SQL queries & parameters).
|
||||
# (This won't have any effect if we're not logging at DEBUG level overall.)
|
||||
# Since this is just a test suite, this is fine and won't reveal anyone's
|
||||
# personal information
|
||||
export PASS_SYNAPSE_LOG_SENSITIVE=1
|
||||
fi
|
||||
|
||||
# Run the tests!
|
||||
echo "Images built; running complement"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
@@ -621,25 +621,6 @@ class Porter:
|
||||
self.postgres_store.db_pool.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _is_sqlite_autovacuum_enabled(txn: LoggingTransaction) -> bool:
|
||||
"""
|
||||
Returns true if auto_vacuum is enabled in SQLite.
|
||||
https://www.sqlite.org/pragma.html#pragma_auto_vacuum
|
||||
|
||||
Vacuuming changes the rowids on rows in the database.
|
||||
Auto-vacuuming is therefore dangerous when used in conjunction with this script.
|
||||
|
||||
Note that the auto_vacuum setting can't be changed without performing
|
||||
a VACUUM after trying to change the pragma.
|
||||
"""
|
||||
txn.execute("PRAGMA auto_vacuum")
|
||||
row = txn.fetchone()
|
||||
assert row is not None, "`PRAGMA auto_vacuum` did not give a row."
|
||||
(autovacuum_setting,) = row
|
||||
# 0 means off. 1 means full. 2 means incremental.
|
||||
return autovacuum_setting != 0
|
||||
|
||||
async def run(self) -> None:
|
||||
"""Ports the SQLite database to a PostgreSQL database.
|
||||
|
||||
@@ -656,21 +637,6 @@ class Porter:
|
||||
allow_outdated_version=True,
|
||||
)
|
||||
|
||||
# For safety, ensure auto_vacuums are disabled.
|
||||
if await self.sqlite_store.db_pool.runInteraction(
|
||||
"is_sqlite_autovacuum_enabled", self._is_sqlite_autovacuum_enabled
|
||||
):
|
||||
end_error = (
|
||||
"auto_vacuum is enabled in the SQLite database."
|
||||
" (This is not the default configuration.)\n"
|
||||
" This script relies on rowids being consistent and must not"
|
||||
" be used if the database could be vacuumed between re-runs.\n"
|
||||
" To disable auto_vacuum, you need to stop Synapse and run the following SQL:\n"
|
||||
" PRAGMA auto_vacuum=off;\n"
|
||||
" VACUUM;"
|
||||
)
|
||||
return
|
||||
|
||||
# Check if all background updates are done, abort if not.
|
||||
updates_complete = (
|
||||
await self.sqlite_store.db_pool.updates.has_completed_background_updates()
|
||||
|
||||
@@ -464,7 +464,14 @@ class ThirdPartyEventRules:
|
||||
Returns:
|
||||
A dict mapping (event type, state key) to state event.
|
||||
"""
|
||||
return await self._storage_controllers.state.get_current_state(room_id)
|
||||
state_ids = await self._storage_controllers.state.get_current_state_ids(room_id)
|
||||
room_state_events = await self.store.get_events(state_ids.values())
|
||||
|
||||
state_events = {}
|
||||
for key, event_id in state_ids.items():
|
||||
state_events[key] = room_state_events[event_id]
|
||||
|
||||
return state_events
|
||||
|
||||
async def on_profile_update(
|
||||
self, user_id: str, new_profile: ProfileInfo, by_admin: bool, deactivation: bool
|
||||
|
||||
@@ -67,7 +67,6 @@ from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEduRestServlet,
|
||||
ReplicationGetQueryRestServlet,
|
||||
)
|
||||
from synapse.storage.databases.main.events import PartialStateConflictError
|
||||
from synapse.storage.databases.main.lock import Lock
|
||||
from synapse.types import JsonDict, StateMap, get_domain_from_id
|
||||
from synapse.util import json_decoder, unwrapFirstError
|
||||
@@ -883,20 +882,9 @@ class FederationServer(FederationBase):
|
||||
logger.warning("%s", errmsg)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
|
||||
try:
|
||||
return await self._federation_event_handler.on_send_membership_event(
|
||||
origin, event
|
||||
)
|
||||
except PartialStateConflictError:
|
||||
# The room was un-partial stated while we were persisting the event.
|
||||
# Try once more, with full state this time.
|
||||
logger.info(
|
||||
"Room %s was un-partial stated during `on_send_membership_event`, trying again.",
|
||||
room_id,
|
||||
)
|
||||
return await self._federation_event_handler.on_send_membership_event(
|
||||
origin, event
|
||||
)
|
||||
return await self._federation_event_handler.on_send_membership_event(
|
||||
origin, event
|
||||
)
|
||||
|
||||
async def on_event_auth(
|
||||
self, origin: str, room_id: str, event_id: str
|
||||
|
||||
@@ -45,7 +45,6 @@ from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
FederationError,
|
||||
HttpResponseException,
|
||||
LimitExceededError,
|
||||
NotFoundError,
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
@@ -65,7 +64,6 @@ from synapse.replication.http.federation import (
|
||||
ReplicationCleanRoomRestServlet,
|
||||
ReplicationStoreRoomOnOutlierMembershipRestServlet,
|
||||
)
|
||||
from synapse.storage.databases.main.events import PartialStateConflictError
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import JsonDict, StateMap, get_domain_from_id
|
||||
@@ -551,29 +549,15 @@ class FederationHandler:
|
||||
# https://github.com/matrix-org/synapse/issues/12998
|
||||
await self.store.store_partial_state_room(room_id, ret.servers_in_room)
|
||||
|
||||
try:
|
||||
max_stream_id = (
|
||||
await self._federation_event_handler.process_remote_join(
|
||||
origin,
|
||||
room_id,
|
||||
auth_chain,
|
||||
state,
|
||||
event,
|
||||
room_version_obj,
|
||||
partial_state=ret.partial_state,
|
||||
)
|
||||
)
|
||||
except PartialStateConflictError as e:
|
||||
# The homeserver was already in the room and it is no longer partial
|
||||
# stated. We ought to be doing a local join instead. Turn the error into
|
||||
# a 429, as a hint to the client to try again.
|
||||
# TODO(faster_joins): `_should_perform_remote_join` suggests that we may
|
||||
# do a remote join for restricted rooms even if we have full state.
|
||||
logger.error(
|
||||
"Room %s was un-partial stated while processing remote join.",
|
||||
room_id,
|
||||
)
|
||||
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
|
||||
max_stream_id = await self._federation_event_handler.process_remote_join(
|
||||
origin,
|
||||
room_id,
|
||||
auth_chain,
|
||||
state,
|
||||
event,
|
||||
room_version_obj,
|
||||
partial_state=ret.partial_state,
|
||||
)
|
||||
|
||||
if ret.partial_state:
|
||||
# Kick off the process of asynchronously fetching the state for this
|
||||
@@ -1559,9 +1543,14 @@ class FederationHandler:
|
||||
# all the events are updated, so we can update current state and
|
||||
# clear the lazy-loading flag.
|
||||
logger.info("Updating current state for %s", room_id)
|
||||
# TODO(faster_joins): notify workers in notify_room_un_partial_stated
|
||||
# TODO(faster_joins): support workers
|
||||
# https://github.com/matrix-org/synapse/issues/12994
|
||||
await self.state_handler.update_current_state(room_id)
|
||||
assert (
|
||||
self._storage_controllers.persistence is not None
|
||||
), "worker-mode deployments not currently supported here"
|
||||
await self._storage_controllers.persistence.update_current_state(
|
||||
room_id
|
||||
)
|
||||
|
||||
logger.info("Clearing partial-state flag for %s", room_id)
|
||||
success = await self.store.clear_partial_state_room(room_id)
|
||||
@@ -1578,6 +1567,11 @@ class FederationHandler:
|
||||
|
||||
# we raced against more events arriving with partial state. Go round
|
||||
# the loop again. We've already logged a warning, so no need for more.
|
||||
# TODO(faster_joins): there is still a race here, whereby incoming events which raced
|
||||
# with us will fail to be persisted after the call to `clear_partial_state_room` due to
|
||||
# having partial state.
|
||||
# https://github.com/matrix-org/synapse/issues/12988
|
||||
#
|
||||
continue
|
||||
|
||||
events = await self.store.get_events_as_list(
|
||||
|
||||
@@ -64,7 +64,6 @@ from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEventsRestServlet,
|
||||
)
|
||||
from synapse.state import StateResolutionStore
|
||||
from synapse.storage.databases.main.events import PartialStateConflictError
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import (
|
||||
@@ -276,16 +275,7 @@ class FederationEventHandler:
|
||||
affected=pdu.event_id,
|
||||
)
|
||||
|
||||
try:
|
||||
await self._process_received_pdu(origin, pdu, state_ids=None)
|
||||
except PartialStateConflictError:
|
||||
# The room was un-partial stated while we were processing the PDU.
|
||||
# Try once more, with full state this time.
|
||||
logger.info(
|
||||
"Room %s was un-partial stated while processing the PDU, trying again.",
|
||||
room_id,
|
||||
)
|
||||
await self._process_received_pdu(origin, pdu, state_ids=None)
|
||||
await self._process_received_pdu(origin, pdu, state_ids=None)
|
||||
|
||||
async def on_send_membership_event(
|
||||
self, origin: str, event: EventBase
|
||||
@@ -316,9 +306,6 @@ class FederationEventHandler:
|
||||
|
||||
Raises:
|
||||
SynapseError if the event is not accepted into the room
|
||||
PartialStateConflictError if the room was un-partial stated in between
|
||||
computing the state at the event and persisting it. The caller should
|
||||
retry exactly once in this case.
|
||||
"""
|
||||
logger.debug(
|
||||
"on_send_membership_event: Got event: %s, signatures: %s",
|
||||
@@ -436,8 +423,6 @@ class FederationEventHandler:
|
||||
|
||||
Raises:
|
||||
SynapseError if the response is in some way invalid.
|
||||
PartialStateConflictError if the homeserver is already in the room and it
|
||||
has been un-partial stated.
|
||||
"""
|
||||
create_event = None
|
||||
for e in state:
|
||||
@@ -1099,14 +1084,10 @@ class FederationEventHandler:
|
||||
|
||||
state_ids: Normally None, but if we are handling a gap in the graph
|
||||
(ie, we are missing one or more prev_events), the resolved state at the
|
||||
event. Must not be partial state.
|
||||
event
|
||||
|
||||
backfilled: True if this is part of a historical batch of events (inhibits
|
||||
notification to clients, and validation of device keys.)
|
||||
|
||||
PartialStateConflictError: if the room was un-partial stated in between
|
||||
computing the state at the event and persisting it. The caller should retry
|
||||
exactly once in this case. Will never be raised if `state_ids` is provided.
|
||||
"""
|
||||
logger.debug("Processing event: %s", event)
|
||||
assert not event.internal_metadata.outlier
|
||||
@@ -1952,9 +1933,6 @@ class FederationEventHandler:
|
||||
event: The event itself.
|
||||
context: The event context.
|
||||
backfilled: True if the event was backfilled.
|
||||
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
# this method should not be called on outliers (those code paths call
|
||||
# persist_events_and_notify directly.)
|
||||
@@ -2007,10 +1985,6 @@ class FederationEventHandler:
|
||||
|
||||
Returns:
|
||||
The stream ID after which all events have been persisted.
|
||||
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
if not event_and_contexts:
|
||||
return self._store.get_room_max_stream_ordering()
|
||||
@@ -2019,19 +1993,14 @@ class FederationEventHandler:
|
||||
if instance != self._instance_name:
|
||||
# Limit the number of events sent over replication. We choose 200
|
||||
# here as that is what we default to in `max_request_body_size(..)`
|
||||
try:
|
||||
for batch in batch_iter(event_and_contexts, 200):
|
||||
result = await self._send_events(
|
||||
instance_name=instance,
|
||||
store=self._store,
|
||||
room_id=room_id,
|
||||
event_and_contexts=batch,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
except SynapseError as e:
|
||||
if e.code == HTTPStatus.CONFLICT:
|
||||
raise PartialStateConflictError()
|
||||
raise
|
||||
for batch in batch_iter(event_and_contexts, 200):
|
||||
result = await self._send_events(
|
||||
instance_name=instance,
|
||||
store=self._store,
|
||||
room_id=room_id,
|
||||
event_and_contexts=batch,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
return result["max_stream_id"]
|
||||
else:
|
||||
assert self._storage_controllers.persistence
|
||||
|
||||
@@ -37,7 +37,6 @@ from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
ConsentNotGivenError,
|
||||
LimitExceededError,
|
||||
NotFoundError,
|
||||
ShadowBanError,
|
||||
SynapseError,
|
||||
@@ -54,7 +53,6 @@ from synapse.handlers.directory import DirectoryHandler
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
||||
from synapse.storage.databases.main.events import PartialStateConflictError
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import (
|
||||
@@ -1252,8 +1250,6 @@ class EventCreationHandler:
|
||||
|
||||
Raises:
|
||||
ShadowBanError if the requester has been shadow-banned.
|
||||
SynapseError(503) if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
extra_users = extra_users or []
|
||||
|
||||
@@ -1304,35 +1300,24 @@ class EventCreationHandler:
|
||||
|
||||
# We now persist the event (and update the cache in parallel, since we
|
||||
# don't want to block on it).
|
||||
try:
|
||||
result, _ = await make_deferred_yieldable(
|
||||
gather_results(
|
||||
(
|
||||
run_in_background(
|
||||
self._persist_event,
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
),
|
||||
run_in_background(
|
||||
self.cache_joined_hosts_for_event, event, context
|
||||
).addErrback(
|
||||
log_failure, "cache_joined_hosts_for_event failed"
|
||||
),
|
||||
result, _ = await make_deferred_yieldable(
|
||||
gather_results(
|
||||
(
|
||||
run_in_background(
|
||||
self._persist_event,
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
),
|
||||
consumeErrors=True,
|
||||
)
|
||||
).addErrback(unwrapFirstError)
|
||||
except PartialStateConflictError as e:
|
||||
# The event context needs to be recomputed.
|
||||
# Turn the error into a 429, as a hint to the client to try again.
|
||||
logger.info(
|
||||
"Room %s was un-partial stated while persisting client event.",
|
||||
event.room_id,
|
||||
run_in_background(
|
||||
self.cache_joined_hosts_for_event, event, context
|
||||
).addErrback(log_failure, "cache_joined_hosts_for_event failed"),
|
||||
),
|
||||
consumeErrors=True,
|
||||
)
|
||||
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
return result
|
||||
|
||||
@@ -1347,9 +1332,6 @@ class EventCreationHandler:
|
||||
"""Actually persists the event. Should only be called by
|
||||
`handle_new_client_event`, and see its docstring for documentation of
|
||||
the arguments.
|
||||
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
|
||||
# Skip push notification actions for historical messages
|
||||
@@ -1366,21 +1348,16 @@ class EventCreationHandler:
|
||||
# If we're a worker we need to hit out to the master.
|
||||
writer_instance = self._events_shard_config.get_instance(event.room_id)
|
||||
if writer_instance != self._instance_name:
|
||||
try:
|
||||
result = await self.send_event(
|
||||
instance_name=writer_instance,
|
||||
event_id=event.event_id,
|
||||
store=self.store,
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
)
|
||||
except SynapseError as e:
|
||||
if e.code == HTTPStatus.CONFLICT:
|
||||
raise PartialStateConflictError()
|
||||
raise
|
||||
result = await self.send_event(
|
||||
instance_name=writer_instance,
|
||||
event_id=event.event_id,
|
||||
store=self.store,
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
extra_users=extra_users,
|
||||
)
|
||||
stream_id = result["stream_id"]
|
||||
event_id = result["event_id"]
|
||||
if event_id != event.event_id:
|
||||
@@ -1508,10 +1485,6 @@ class EventCreationHandler:
|
||||
The persisted event. This may be different than the given event if
|
||||
it was de-duplicated (e.g. because we had already persisted an
|
||||
event with the same transaction ID.)
|
||||
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
extra_users = extra_users or []
|
||||
|
||||
|
||||
@@ -67,14 +67,19 @@ class ProfileHandler:
|
||||
target_user = UserID.from_string(user_id)
|
||||
|
||||
if self.hs.is_mine(target_user):
|
||||
profileinfo = await self.store.get_profileinfo(target_user.localpart)
|
||||
if profileinfo.display_name is None:
|
||||
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
|
||||
try:
|
||||
displayname = await self.store.get_profile_displayname(
|
||||
target_user.localpart
|
||||
)
|
||||
avatar_url = await self.store.get_profile_avatar_url(
|
||||
target_user.localpart
|
||||
)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
|
||||
raise
|
||||
|
||||
return {
|
||||
"displayname": profileinfo.display_name,
|
||||
"avatar_url": profileinfo.avatar_url,
|
||||
}
|
||||
return {"displayname": displayname, "avatar_url": avatar_url}
|
||||
else:
|
||||
try:
|
||||
result = await self.federation.make_query(
|
||||
|
||||
@@ -846,17 +846,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
content["membership"] = Membership.JOIN
|
||||
|
||||
try:
|
||||
profile = self.profile_handler
|
||||
if not content_specified:
|
||||
content["displayname"] = await profile.get_displayname(target)
|
||||
content["avatar_url"] = await profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information while processing remote join for %r: %s",
|
||||
target,
|
||||
e,
|
||||
)
|
||||
profile = self.profile_handler
|
||||
if not content_specified:
|
||||
content["displayname"] = await profile.get_displayname(target)
|
||||
content["avatar_url"] = await profile.get_avatar_url(target)
|
||||
|
||||
if requester.is_guest:
|
||||
content["kind"] = "guest"
|
||||
@@ -933,18 +926,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
content["membership"] = Membership.KNOCK
|
||||
|
||||
try:
|
||||
profile = self.profile_handler
|
||||
if "displayname" not in content:
|
||||
content["displayname"] = await profile.get_displayname(target)
|
||||
if "avatar_url" not in content:
|
||||
content["avatar_url"] = await profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information while processing remote knock for %r: %s",
|
||||
target,
|
||||
e,
|
||||
)
|
||||
profile = self.profile_handler
|
||||
if "displayname" not in content:
|
||||
content["displayname"] = await profile.get_displayname(target)
|
||||
if "avatar_url" not in content:
|
||||
content["avatar_url"] = await profile.get_avatar_url(target)
|
||||
|
||||
return await self.remote_knock(
|
||||
remote_room_hosts, room_id, target, content
|
||||
|
||||
@@ -25,7 +25,6 @@ from synapse.replication.http import (
|
||||
push,
|
||||
register,
|
||||
send_event,
|
||||
state,
|
||||
streams,
|
||||
)
|
||||
|
||||
@@ -49,7 +48,6 @@ class ReplicationRestResource(JsonResource):
|
||||
streams.register_servlets(hs, self)
|
||||
account_data.register_servlets(hs, self)
|
||||
push.register_servlets(hs, self)
|
||||
state.register_servlets(hs, self)
|
||||
|
||||
# The following can't currently be instantiated on workers.
|
||||
if hs.config.worker.worker_app is None:
|
||||
|
||||
@@ -60,9 +60,6 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
|
||||
{
|
||||
"max_stream_id": 32443,
|
||||
}
|
||||
|
||||
Responds with a 409 when a `PartialStateConflictError` is raised due to an event
|
||||
context that needs to be recomputed due to the un-partial stating of a room.
|
||||
"""
|
||||
|
||||
NAME = "fed_send_events"
|
||||
|
||||
@@ -59,9 +59,6 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
|
||||
|
||||
{ "stream_id": 12345, "event_id": "$abcdef..." }
|
||||
|
||||
Responds with a 409 when a `PartialStateConflictError` is raised due to an event
|
||||
context that needs to be recomputed due to the un-partial stating of a room.
|
||||
|
||||
The returned event ID may not match the sent event if it was deduplicated.
|
||||
"""
|
||||
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReplicationUpdateCurrentStateRestServlet(ReplicationEndpoint):
|
||||
"""Recalculates the current state for a room, and persists it.
|
||||
|
||||
The API looks like:
|
||||
|
||||
POST /_synapse/replication/update_current_state/:room_id
|
||||
|
||||
{}
|
||||
|
||||
200 OK
|
||||
|
||||
{}
|
||||
"""
|
||||
|
||||
NAME = "update_current_state"
|
||||
PATH_ARGS = ("room_id",)
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self._state_handler = hs.get_state_handler()
|
||||
self._events_shard_config = hs.config.worker.events_shard_config
|
||||
self._instance_name = hs.get_instance_name()
|
||||
|
||||
@staticmethod
|
||||
async def _serialize_payload(room_id: str) -> JsonDict: # type: ignore[override]
|
||||
return {}
|
||||
|
||||
async def _handle_request( # type: ignore[override]
|
||||
self, request: Request, room_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
writer_instance = self._events_shard_config.get_instance(room_id)
|
||||
if writer_instance != self._instance_name:
|
||||
raise SynapseError(
|
||||
400, "/update_current_state request was routed to the wrong worker"
|
||||
)
|
||||
|
||||
await self._state_handler.update_current_state(room_id)
|
||||
|
||||
return 200, {}
|
||||
|
||||
|
||||
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
if hs.get_instance_name() in hs.config.worker.writers.events:
|
||||
ReplicationUpdateCurrentStateRestServlet(hs).register(http_server)
|
||||
@@ -43,7 +43,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersio
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.logging.context import ContextResourceUsage
|
||||
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
|
||||
from synapse.state import v1, v2
|
||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
@@ -130,12 +129,6 @@ class StateHandler:
|
||||
self.hs = hs
|
||||
self._state_resolution_handler = hs.get_state_resolution_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self._events_shard_config = hs.config.worker.events_shard_config
|
||||
self._instance_name = hs.get_instance_name()
|
||||
|
||||
self._update_current_state_client = (
|
||||
ReplicationUpdateCurrentStateRestServlet.make_client(hs)
|
||||
)
|
||||
|
||||
async def get_current_state_ids(
|
||||
self,
|
||||
@@ -430,24 +423,6 @@ class StateHandler:
|
||||
|
||||
return {key: state_map[ev_id] for key, ev_id in new_state.items()}
|
||||
|
||||
async def update_current_state(self, room_id: str) -> None:
|
||||
"""Recalculates the current state for a room, and persists it.
|
||||
|
||||
Raises:
|
||||
SynapseError(502): if all attempts to connect to the event persister worker
|
||||
fail
|
||||
"""
|
||||
writer_instance = self._events_shard_config.get_instance(room_id)
|
||||
if writer_instance != self._instance_name:
|
||||
await self._update_current_state_client(
|
||||
instance_name=writer_instance,
|
||||
room_id=room_id,
|
||||
)
|
||||
return
|
||||
|
||||
assert self._storage_controllers.persistence is not None
|
||||
await self._storage_controllers.persistence.update_current_state(room_id)
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class _StateResMetrics:
|
||||
|
||||
@@ -22,7 +22,6 @@ from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
ClassVar,
|
||||
Collection,
|
||||
Deque,
|
||||
Dict,
|
||||
@@ -34,7 +33,6 @@ from typing import (
|
||||
Set,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
import attr
|
||||
@@ -113,43 +111,9 @@ times_pruned_extremities = Counter(
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, slots=True)
|
||||
class _PersistEventsTask:
|
||||
"""A batch of events to persist."""
|
||||
|
||||
name: ClassVar[str] = "persist_event_batch" # used for opentracing
|
||||
|
||||
class _EventPersistQueueItem:
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]]
|
||||
backfilled: bool
|
||||
|
||||
def try_merge(self, task: "_EventPersistQueueTask") -> bool:
|
||||
"""Batches events with the same backfilled option together."""
|
||||
if (
|
||||
not isinstance(task, _PersistEventsTask)
|
||||
or self.backfilled != task.backfilled
|
||||
):
|
||||
return False
|
||||
|
||||
self.events_and_contexts.extend(task.events_and_contexts)
|
||||
return True
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, slots=True)
|
||||
class _UpdateCurrentStateTask:
|
||||
"""A room whose current state needs recalculating."""
|
||||
|
||||
name: ClassVar[str] = "update_current_state" # used for opentracing
|
||||
|
||||
def try_merge(self, task: "_EventPersistQueueTask") -> bool:
|
||||
"""Deduplicates consecutive recalculations of current state."""
|
||||
return isinstance(task, _UpdateCurrentStateTask)
|
||||
|
||||
|
||||
_EventPersistQueueTask = Union[_PersistEventsTask, _UpdateCurrentStateTask]
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, slots=True)
|
||||
class _EventPersistQueueItem:
|
||||
task: _EventPersistQueueTask
|
||||
deferred: ObservableDeferred
|
||||
|
||||
parent_opentracing_span_contexts: List = attr.ib(factory=list)
|
||||
@@ -163,16 +127,14 @@ _PersistResult = TypeVar("_PersistResult")
|
||||
|
||||
|
||||
class _EventPeristenceQueue(Generic[_PersistResult]):
|
||||
"""Queues up tasks so that they can be processed with only one concurrent
|
||||
transaction per room.
|
||||
|
||||
Tasks can be bulk persistence of events or recalculation of a room's current state.
|
||||
"""Queues up events so that they can be persisted in bulk with only one
|
||||
concurrent transaction per room.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
per_item_callback: Callable[
|
||||
[str, _EventPersistQueueTask],
|
||||
[List[Tuple[EventBase, EventContext]], bool],
|
||||
Awaitable[_PersistResult],
|
||||
],
|
||||
):
|
||||
@@ -188,17 +150,18 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
|
||||
async def add_to_queue(
|
||||
self,
|
||||
room_id: str,
|
||||
task: _EventPersistQueueTask,
|
||||
events_and_contexts: Iterable[Tuple[EventBase, EventContext]],
|
||||
backfilled: bool,
|
||||
) -> _PersistResult:
|
||||
"""Add a task to the queue.
|
||||
"""Add events to the queue, with the given persist_event options.
|
||||
|
||||
If we are not already processing tasks in this room, starts off a background
|
||||
If we are not already processing events in this room, starts off a background
|
||||
process to to so, calling the per_item_callback for each item.
|
||||
|
||||
Args:
|
||||
room_id (str):
|
||||
task (_EventPersistQueueTask): A _PersistEventsTask or
|
||||
_UpdateCurrentStateTask to process.
|
||||
events_and_contexts (list[(EventBase, EventContext)]):
|
||||
backfilled (bool):
|
||||
|
||||
Returns:
|
||||
the result returned by the `_per_item_callback` passed to
|
||||
@@ -206,20 +169,26 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
|
||||
"""
|
||||
queue = self._event_persist_queues.setdefault(room_id, deque())
|
||||
|
||||
if queue and queue[-1].task.try_merge(task):
|
||||
# the new task has been merged into the last task in the queue
|
||||
# if the last item in the queue has the same `backfilled` setting,
|
||||
# we can just add these new events to that item.
|
||||
if queue and queue[-1].backfilled == backfilled:
|
||||
end_item = queue[-1]
|
||||
else:
|
||||
# need to make a new queue item
|
||||
deferred: ObservableDeferred[_PersistResult] = ObservableDeferred(
|
||||
defer.Deferred(), consumeErrors=True
|
||||
)
|
||||
|
||||
end_item = _EventPersistQueueItem(
|
||||
task=task,
|
||||
events_and_contexts=[],
|
||||
backfilled=backfilled,
|
||||
deferred=deferred,
|
||||
)
|
||||
queue.append(end_item)
|
||||
|
||||
# add our events to the queue item
|
||||
end_item.events_and_contexts.extend(events_and_contexts)
|
||||
|
||||
# also add our active opentracing span to the item so that we get a link back
|
||||
span = opentracing.active_span()
|
||||
if span:
|
||||
@@ -233,7 +202,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
|
||||
|
||||
# add another opentracing span which links to the persist trace.
|
||||
with opentracing.start_active_span_follows_from(
|
||||
f"{task.name}_complete", (end_item.opentracing_span_context,)
|
||||
"persist_event_batch_complete", (end_item.opentracing_span_context,)
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -265,14 +234,16 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
|
||||
for item in queue:
|
||||
try:
|
||||
with opentracing.start_active_span_follows_from(
|
||||
item.task.name,
|
||||
"persist_event_batch",
|
||||
item.parent_opentracing_span_contexts,
|
||||
inherit_force_tracing=True,
|
||||
) as scope:
|
||||
if scope:
|
||||
item.opentracing_span_context = scope.span.context
|
||||
|
||||
ret = await self._per_item_callback(room_id, item.task)
|
||||
ret = await self._per_item_callback(
|
||||
item.events_and_contexts, item.backfilled
|
||||
)
|
||||
except Exception:
|
||||
with PreserveLoggingContext():
|
||||
item.deferred.errback()
|
||||
@@ -321,32 +292,9 @@ class EventsPersistenceStorageController:
|
||||
self._clock = hs.get_clock()
|
||||
self._instance_name = hs.get_instance_name()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self._event_persist_queue = _EventPeristenceQueue(
|
||||
self._process_event_persist_queue_task
|
||||
)
|
||||
self._event_persist_queue = _EventPeristenceQueue(self._persist_event_batch)
|
||||
self._state_resolution_handler = hs.get_state_resolution_handler()
|
||||
|
||||
async def _process_event_persist_queue_task(
|
||||
self,
|
||||
room_id: str,
|
||||
task: _EventPersistQueueTask,
|
||||
) -> Dict[str, str]:
|
||||
"""Callback for the _event_persist_queue
|
||||
|
||||
Returns:
|
||||
A dictionary of event ID to event ID we didn't persist as we already
|
||||
had another event persisted with the same TXN ID.
|
||||
"""
|
||||
if isinstance(task, _PersistEventsTask):
|
||||
return await self._persist_event_batch(room_id, task)
|
||||
elif isinstance(task, _UpdateCurrentStateTask):
|
||||
await self._update_current_state(room_id, task)
|
||||
return {}
|
||||
else:
|
||||
raise AssertionError(
|
||||
f"Found an unexpected task type in event persistence queue: {task}"
|
||||
)
|
||||
|
||||
@opentracing.trace
|
||||
async def persist_events(
|
||||
self,
|
||||
@@ -367,10 +315,6 @@ class EventsPersistenceStorageController:
|
||||
if they were deduplicated due to an event already existing that
|
||||
matched the transaction ID; the existing event is returned in such
|
||||
a case.
|
||||
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
partitioned: Dict[str, List[Tuple[EventBase, EventContext]]] = {}
|
||||
for event, ctx in events_and_contexts:
|
||||
@@ -381,8 +325,7 @@ class EventsPersistenceStorageController:
|
||||
) -> Dict[str, str]:
|
||||
room_id, evs_ctxs = item
|
||||
return await self._event_persist_queue.add_to_queue(
|
||||
room_id,
|
||||
_PersistEventsTask(events_and_contexts=evs_ctxs, backfilled=backfilled),
|
||||
room_id, evs_ctxs, backfilled=backfilled
|
||||
)
|
||||
|
||||
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
|
||||
@@ -420,19 +363,12 @@ class EventsPersistenceStorageController:
|
||||
latest persisted event. The returned event may not match the given
|
||||
event if it was deduplicated due to an existing event matching the
|
||||
transaction ID.
|
||||
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
# add_to_queue returns a map from event ID to existing event ID if the
|
||||
# event was deduplicated. (The dict may also include other entries if
|
||||
# the event was persisted in a batch with other events.)
|
||||
replaced_events = await self._event_persist_queue.add_to_queue(
|
||||
event.room_id,
|
||||
_PersistEventsTask(
|
||||
events_and_contexts=[(event, context)], backfilled=backfilled
|
||||
),
|
||||
event.room_id, [(event, context)], backfilled=backfilled
|
||||
)
|
||||
replaced_event = replaced_events.get(event.event_id)
|
||||
if replaced_event:
|
||||
@@ -447,22 +383,20 @@ class EventsPersistenceStorageController:
|
||||
|
||||
async def update_current_state(self, room_id: str) -> None:
|
||||
"""Recalculate the current state for a room, and persist it"""
|
||||
await self._event_persist_queue.add_to_queue(
|
||||
room_id,
|
||||
_UpdateCurrentStateTask(),
|
||||
)
|
||||
|
||||
async def _update_current_state(
|
||||
self, room_id: str, _task: _UpdateCurrentStateTask
|
||||
) -> None:
|
||||
"""Callback for the _event_persist_queue
|
||||
|
||||
Recalculates the current state for a room, and persists it.
|
||||
"""
|
||||
state = await self._calculate_current_state(room_id)
|
||||
delta = await self._calculate_state_delta(room_id, state)
|
||||
|
||||
await self.persist_events_store.update_current_state(room_id, delta)
|
||||
# TODO(faster_joins): get a real stream ordering, to make this work correctly
|
||||
# across workers.
|
||||
# https://github.com/matrix-org/synapse/issues/12994
|
||||
#
|
||||
# TODO(faster_joins): this can race against event persistence, in which case we
|
||||
# will end up with incorrect state. Perhaps we should make this a job we
|
||||
# farm out to the event persister thread, somehow.
|
||||
# https://github.com/matrix-org/synapse/issues/13007
|
||||
#
|
||||
stream_id = self.main_store.get_room_max_stream_ordering()
|
||||
await self.persist_events_store.update_current_state(room_id, delta, stream_id)
|
||||
|
||||
async def _calculate_current_state(self, room_id: str) -> StateMap[str]:
|
||||
"""Calculate the current state of a room, based on the forward extremities
|
||||
@@ -507,7 +441,9 @@ class EventsPersistenceStorageController:
|
||||
return res.state
|
||||
|
||||
async def _persist_event_batch(
|
||||
self, _room_id: str, task: _PersistEventsTask
|
||||
self,
|
||||
events_and_contexts: List[Tuple[EventBase, EventContext]],
|
||||
backfilled: bool = False,
|
||||
) -> Dict[str, str]:
|
||||
"""Callback for the _event_persist_queue
|
||||
|
||||
@@ -517,14 +453,7 @@ class EventsPersistenceStorageController:
|
||||
Returns:
|
||||
A dictionary of event ID to event ID we didn't persist as we already
|
||||
had another event persisted with the same TXN ID.
|
||||
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
events_and_contexts = task.events_and_contexts
|
||||
backfilled = task.backfilled
|
||||
|
||||
replaced_events: Dict[str, str] = {}
|
||||
if not events_and_contexts:
|
||||
return replaced_events
|
||||
|
||||
@@ -982,12 +982,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
stream_row = txn.fetchone()
|
||||
if stream_row:
|
||||
(offset_stream_ordering,) = stream_row
|
||||
|
||||
# We need to bound by the current token to ensure that we handle
|
||||
# out-of-order writes correctly.
|
||||
rotate_to_stream_ordering = min(
|
||||
offset_stream_ordering, self._stream_id_gen.get_current_token()
|
||||
)
|
||||
rotate_to_stream_ordering = offset_stream_ordering
|
||||
caught_up = False
|
||||
else:
|
||||
rotate_to_stream_ordering = self._stream_id_gen.get_current_token()
|
||||
@@ -1013,12 +1008,13 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
sql = """
|
||||
SELECT user_id, room_id,
|
||||
coalesce(old.%s, 0) + upd.cnt,
|
||||
upd.stream_ordering
|
||||
upd.stream_ordering,
|
||||
old.user_id
|
||||
FROM (
|
||||
SELECT user_id, room_id, count(*) as cnt,
|
||||
max(stream_ordering) as stream_ordering
|
||||
FROM event_push_actions
|
||||
WHERE ? < stream_ordering AND stream_ordering <= ?
|
||||
WHERE ? <= stream_ordering AND stream_ordering < ?
|
||||
AND %s = 1
|
||||
GROUP BY user_id, room_id
|
||||
) AS upd
|
||||
@@ -1041,6 +1037,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
summaries[(row[0], row[1])] = _EventPushSummary(
|
||||
unread_count=row[2],
|
||||
stream_ordering=row[3],
|
||||
old_user_id=row[4],
|
||||
notif_count=0,
|
||||
)
|
||||
|
||||
@@ -1061,27 +1058,57 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
summaries[(row[0], row[1])] = _EventPushSummary(
|
||||
unread_count=0,
|
||||
stream_ordering=row[3],
|
||||
old_user_id=row[4],
|
||||
notif_count=row[2],
|
||||
)
|
||||
|
||||
logger.info("Rotating notifications, handling %d rows", len(summaries))
|
||||
|
||||
self.db_pool.simple_upsert_many_txn(
|
||||
# If the `old.user_id` above is NULL then we know there isn't already an
|
||||
# entry in the table, so we simply insert it. Otherwise we update the
|
||||
# existing table.
|
||||
self.db_pool.simple_insert_many_txn(
|
||||
txn,
|
||||
table="event_push_summary",
|
||||
key_names=("user_id", "room_id"),
|
||||
key_values=[(user_id, room_id) for user_id, room_id in summaries],
|
||||
value_names=("notif_count", "unread_count", "stream_ordering"),
|
||||
value_values=[
|
||||
keys=(
|
||||
"user_id",
|
||||
"room_id",
|
||||
"notif_count",
|
||||
"unread_count",
|
||||
"stream_ordering",
|
||||
),
|
||||
values=[
|
||||
(
|
||||
user_id,
|
||||
room_id,
|
||||
summary.notif_count,
|
||||
summary.unread_count,
|
||||
summary.stream_ordering,
|
||||
)
|
||||
for summary in summaries.values()
|
||||
for ((user_id, room_id), summary) in summaries.items()
|
||||
if summary.old_user_id is None
|
||||
],
|
||||
)
|
||||
|
||||
txn.execute_batch(
|
||||
"""
|
||||
UPDATE event_push_summary
|
||||
SET notif_count = ?, unread_count = ?, stream_ordering = ?
|
||||
WHERE user_id = ? AND room_id = ?
|
||||
""",
|
||||
(
|
||||
(
|
||||
summary.notif_count,
|
||||
summary.unread_count,
|
||||
summary.stream_ordering,
|
||||
user_id,
|
||||
room_id,
|
||||
)
|
||||
for ((user_id, room_id), summary) in summaries.items()
|
||||
if summary.old_user_id is not None
|
||||
),
|
||||
)
|
||||
|
||||
txn.execute(
|
||||
"UPDATE event_push_summary_stream_ordering SET stream_ordering = ?",
|
||||
(rotate_to_stream_ordering,),
|
||||
@@ -1114,7 +1141,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT stream_ordering FROM event_push_actions
|
||||
WHERE stream_ordering <= ? AND highlight = 0
|
||||
WHERE stream_ordering < ? AND highlight = 0
|
||||
ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
|
||||
""",
|
||||
(
|
||||
@@ -1129,12 +1156,10 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||
else:
|
||||
stream_ordering = max_stream_ordering_to_delete
|
||||
|
||||
# We need to use a inclusive bound here to handle the case where a
|
||||
# single stream ordering has more than `batch_size` rows.
|
||||
txn.execute(
|
||||
"""
|
||||
DELETE FROM event_push_actions
|
||||
WHERE stream_ordering <= ? AND highlight = 0
|
||||
WHERE stream_ordering < ? AND highlight = 0
|
||||
""",
|
||||
(stream_ordering,),
|
||||
)
|
||||
@@ -1263,4 +1288,5 @@ class _EventPushSummary:
|
||||
|
||||
unread_count: int
|
||||
stream_ordering: int
|
||||
old_user_id: str
|
||||
notif_count: int
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
import itertools
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from http import HTTPStatus
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -36,7 +35,6 @@ from prometheus_client import Counter
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.events import EventBase, relation_from_event
|
||||
from synapse.events.snapshot import EventContext
|
||||
@@ -71,24 +69,6 @@ event_counter = Counter(
|
||||
)
|
||||
|
||||
|
||||
class PartialStateConflictError(SynapseError):
|
||||
"""An internal error raised when attempting to persist an event with partial state
|
||||
after the room containing the event has been un-partial stated.
|
||||
|
||||
This error should be handled by recomputing the event context and trying again.
|
||||
|
||||
This error has an HTTP status code so that it can be transported over replication.
|
||||
It should not be exposed to clients.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
HTTPStatus.CONFLICT,
|
||||
msg="Cannot persist partial state event in un-partial stated room",
|
||||
errcode=Codes.UNKNOWN,
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class DeltaState:
|
||||
"""Deltas to use to update the `current_state_events` table.
|
||||
@@ -174,10 +154,6 @@ class PersistEventsStore:
|
||||
|
||||
Returns:
|
||||
Resolves when the events have been persisted
|
||||
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
|
||||
# We want to calculate the stream orderings as late as possible, as
|
||||
@@ -378,9 +354,6 @@ class PersistEventsStore:
|
||||
For each room, a list of the event ids which are the forward
|
||||
extremities.
|
||||
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
state_delta_for_room = state_delta_for_room or {}
|
||||
new_forward_extremities = new_forward_extremities or {}
|
||||
@@ -1007,16 +980,16 @@ class PersistEventsStore:
|
||||
self,
|
||||
room_id: str,
|
||||
state_delta: DeltaState,
|
||||
stream_id: int,
|
||||
) -> None:
|
||||
"""Update the current state stored in the datatabase for the given room"""
|
||||
|
||||
async with self._stream_id_gen.get_next() as stream_ordering:
|
||||
await self.db_pool.runInteraction(
|
||||
"update_current_state",
|
||||
self._update_current_state_txn,
|
||||
state_delta_by_room={room_id: state_delta},
|
||||
stream_id=stream_ordering,
|
||||
)
|
||||
await self.db_pool.runInteraction(
|
||||
"update_current_state",
|
||||
self._update_current_state_txn,
|
||||
state_delta_by_room={room_id: state_delta},
|
||||
stream_id=stream_id,
|
||||
)
|
||||
|
||||
def _update_current_state_txn(
|
||||
self,
|
||||
@@ -1331,10 +1304,6 @@ class PersistEventsStore:
|
||||
|
||||
Returns:
|
||||
new list, without events which are already in the events table.
|
||||
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
txn.execute(
|
||||
"SELECT event_id, outlier FROM events WHERE event_id in (%s)"
|
||||
@@ -2246,11 +2215,6 @@ class PersistEventsStore:
|
||||
txn: LoggingTransaction,
|
||||
events_and_contexts: Collection[Tuple[EventBase, EventContext]],
|
||||
) -> None:
|
||||
"""
|
||||
Raises:
|
||||
PartialStateConflictError: if attempting to persist a partial state event in
|
||||
a room that has been un-partial stated.
|
||||
"""
|
||||
state_groups = {}
|
||||
for event, context in events_and_contexts:
|
||||
if event.internal_metadata.is_outlier():
|
||||
@@ -2275,37 +2239,19 @@ class PersistEventsStore:
|
||||
# if we have partial state for these events, record the fact. (This happens
|
||||
# here rather than in _store_event_txn because it also needs to happen when
|
||||
# we de-outlier an event.)
|
||||
try:
|
||||
self.db_pool.simple_insert_many_txn(
|
||||
txn,
|
||||
table="partial_state_events",
|
||||
keys=("room_id", "event_id"),
|
||||
values=[
|
||||
(
|
||||
event.room_id,
|
||||
event.event_id,
|
||||
)
|
||||
for event, ctx in events_and_contexts
|
||||
if ctx.partial_state
|
||||
],
|
||||
)
|
||||
except self.db_pool.engine.module.IntegrityError:
|
||||
logger.info(
|
||||
"Cannot persist events %s in rooms %s: room has been un-partial stated",
|
||||
[
|
||||
event.event_id
|
||||
for event, ctx in events_and_contexts
|
||||
if ctx.partial_state
|
||||
],
|
||||
list(
|
||||
{
|
||||
event.room_id
|
||||
for event, ctx in events_and_contexts
|
||||
if ctx.partial_state
|
||||
}
|
||||
),
|
||||
)
|
||||
raise PartialStateConflictError()
|
||||
self.db_pool.simple_insert_many_txn(
|
||||
txn,
|
||||
table="partial_state_events",
|
||||
keys=("room_id", "event_id"),
|
||||
values=[
|
||||
(
|
||||
event.room_id,
|
||||
event.event_id,
|
||||
)
|
||||
for event, ctx in events_and_contexts
|
||||
if ctx.partial_state
|
||||
],
|
||||
)
|
||||
|
||||
self.db_pool.simple_upsert_many_txn(
|
||||
txn,
|
||||
|
||||
@@ -1156,25 +1156,19 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||
return room_servers
|
||||
|
||||
async def clear_partial_state_room(self, room_id: str) -> bool:
|
||||
"""Clears the partial state flag for a room.
|
||||
|
||||
Args:
|
||||
room_id: The room whose partial state flag is to be cleared.
|
||||
|
||||
Returns:
|
||||
`True` if the partial state flag has been cleared successfully.
|
||||
|
||||
`False` if the partial state flag could not be cleared because the room
|
||||
still contains events with partial state.
|
||||
"""
|
||||
# this can race with incoming events, so we watch out for FK errors.
|
||||
# TODO(faster_joins): this still doesn't completely fix the race, since the persist process
|
||||
# is not atomic. I fear we need an application-level lock.
|
||||
# https://github.com/matrix-org/synapse/issues/12988
|
||||
try:
|
||||
await self.db_pool.runInteraction(
|
||||
"clear_partial_state_room", self._clear_partial_state_room_txn, room_id
|
||||
)
|
||||
return True
|
||||
except self.db_pool.engine.module.IntegrityError as e:
|
||||
# Assume that any `IntegrityError`s are due to partial state events.
|
||||
logger.info(
|
||||
except self.db_pool.engine.module.DatabaseError as e:
|
||||
# TODO(faster_joins): how do we distinguish between FK errors and other errors?
|
||||
# https://github.com/matrix-org/synapse/issues/12988
|
||||
logger.warning(
|
||||
"Exception while clearing lazy partial-state-room %s, retrying: %s",
|
||||
room_id,
|
||||
e,
|
||||
|
||||
@@ -20,7 +20,6 @@ from typing import Any, Callable, Dict, Generator, Optional
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
from matrix_common.versionstring import get_distribution_version_string
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from twisted.internet import defer, task
|
||||
from twisted.internet.defer import Deferred
|
||||
@@ -83,9 +82,6 @@ def unwrapFirstError(failure: Failure) -> Failure:
|
||||
return failure.value.subFailure # type: ignore[union-attr] # Issue in Twisted's annotations
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class Clock:
|
||||
"""
|
||||
@@ -114,7 +110,7 @@ class Clock:
|
||||
return int(self.time() * 1000)
|
||||
|
||||
def looping_call(
|
||||
self, f: Callable[P, object], msec: float, *args: P.args, **kwargs: P.kwargs
|
||||
self, f: Callable, msec: float, *args: Any, **kwargs: Any
|
||||
) -> LoopingCall:
|
||||
"""Call a function repeatedly.
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ GLOBAL_ROOT = ListNode["_Node"].create_root_node()
|
||||
|
||||
@wrap_as_background_process("LruCache._expire_old_entries")
|
||||
async def _expire_old_entries(
|
||||
clock: Clock, expiry_seconds: float, autotune_config: Optional[dict]
|
||||
clock: Clock, expiry_seconds: int, autotune_config: Optional[dict]
|
||||
) -> None:
|
||||
"""Walks the global cache list to find cache entries that haven't been
|
||||
accessed in the given number of seconds, or if a given memory threshold has been breached.
|
||||
|
||||
@@ -1579,8 +1579,8 @@ class RoomTestCase(unittest.HomeserverTestCase):
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
|
||||
self.assertEqual(room_id, channel.json_body["rooms"][0].get("room_id"))
|
||||
self.assertEqual("ж", channel.json_body["rooms"][0].get("name"))
|
||||
self.assertEqual(room_id, channel.json_body.get("rooms")[0].get("room_id"))
|
||||
self.assertEqual("ж", channel.json_body.get("rooms")[0].get("name"))
|
||||
|
||||
def test_single_room(self) -> None:
|
||||
"""Test that a single room can be requested correctly"""
|
||||
|
||||
@@ -1488,7 +1488,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
if channel.code != HTTPStatus.OK:
|
||||
raise HttpResponseException(
|
||||
channel.code, channel.result["reason"], channel.result["body"]
|
||||
channel.code, channel.result["reason"], channel.json_body
|
||||
)
|
||||
|
||||
# Set monthly active users to the limit
|
||||
|
||||
@@ -949,7 +949,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
|
||||
client_secret: str,
|
||||
next_link: Optional[str] = None,
|
||||
expect_code: int = 200,
|
||||
) -> Optional[str]:
|
||||
) -> str:
|
||||
"""Request a validation token to add an email address to a user's account
|
||||
|
||||
Args:
|
||||
@@ -959,8 +959,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
|
||||
expect_code: Expected return code of the call
|
||||
|
||||
Returns:
|
||||
The ID of the new threepid validation session, or None if the response
|
||||
did not contain a session ID.
|
||||
The ID of the new threepid validation session
|
||||
"""
|
||||
body = {"client_secret": client_secret, "email": email, "send_attempt": 1}
|
||||
if next_link:
|
||||
|
||||
@@ -153,22 +153,18 @@ class ProfileTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
self.assertEqual(channel.code, 400, channel.result)
|
||||
|
||||
def _get_displayname(self, name: Optional[str] = None) -> Optional[str]:
|
||||
def _get_displayname(self, name: Optional[str] = None) -> str:
|
||||
channel = self.make_request(
|
||||
"GET", "/profile/%s/displayname" % (name or self.owner,)
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.result)
|
||||
# FIXME: If a user has no displayname set, Synapse returns 200 and omits a
|
||||
# displayname from the response. This contradicts the spec, see #13137.
|
||||
return channel.json_body.get("displayname")
|
||||
return channel.json_body["displayname"]
|
||||
|
||||
def _get_avatar_url(self, name: Optional[str] = None) -> Optional[str]:
|
||||
def _get_avatar_url(self, name: Optional[str] = None) -> str:
|
||||
channel = self.make_request(
|
||||
"GET", "/profile/%s/avatar_url" % (name or self.owner,)
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.result)
|
||||
# FIXME: If a user has no avatar set, Synapse returns 200 and omits an
|
||||
# avatar_url from the response. This contradicts the spec, see #13137.
|
||||
return channel.json_body.get("avatar_url")
|
||||
|
||||
@unittest.override_config({"max_avatar_size": 50})
|
||||
|
||||
@@ -800,7 +800,7 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
|
||||
)
|
||||
expected_event_ids.append(channel.json_body["event_id"])
|
||||
|
||||
prev_token: Optional[str] = ""
|
||||
prev_token = ""
|
||||
found_event_ids: List[str] = []
|
||||
for _ in range(20):
|
||||
from_token = ""
|
||||
|
||||
@@ -43,7 +43,6 @@ from twisted.internet.defer import Deferred, fail, maybeDeferred, succeed
|
||||
from twisted.internet.error import DNSLookupError
|
||||
from twisted.internet.interfaces import (
|
||||
IAddress,
|
||||
IConsumer,
|
||||
IHostnameResolver,
|
||||
IProtocol,
|
||||
IPullProducer,
|
||||
@@ -54,7 +53,11 @@ from twisted.internet.interfaces import (
|
||||
ITransport,
|
||||
)
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
|
||||
from twisted.test.proto_helpers import (
|
||||
AccumulatingProtocol,
|
||||
MemoryReactor,
|
||||
MemoryReactorClock,
|
||||
)
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.resource import IResource
|
||||
from twisted.web.server import Request, Site
|
||||
@@ -93,7 +96,6 @@ class TimedOutException(Exception):
|
||||
"""
|
||||
|
||||
|
||||
@implementer(IConsumer)
|
||||
@attr.s(auto_attribs=True)
|
||||
class FakeChannel:
|
||||
"""
|
||||
@@ -102,7 +104,7 @@ class FakeChannel:
|
||||
"""
|
||||
|
||||
site: Union[Site, "FakeSite"]
|
||||
_reactor: MemoryReactorClock
|
||||
_reactor: MemoryReactor
|
||||
result: dict = attr.Factory(dict)
|
||||
_ip: str = "127.0.0.1"
|
||||
_producer: Optional[Union[IPullProducer, IPushProducer]] = None
|
||||
@@ -120,7 +122,7 @@ class FakeChannel:
|
||||
self._request = request
|
||||
|
||||
@property
|
||||
def json_body(self) -> JsonDict:
|
||||
def json_body(self):
|
||||
return json.loads(self.text_body)
|
||||
|
||||
@property
|
||||
@@ -138,7 +140,7 @@ class FakeChannel:
|
||||
return self.result.get("done", False)
|
||||
|
||||
@property
|
||||
def code(self) -> int:
|
||||
def code(self):
|
||||
if not self.result:
|
||||
raise Exception("No result yet.")
|
||||
return int(self.result["code"])
|
||||
@@ -158,7 +160,7 @@ class FakeChannel:
|
||||
self.result["reason"] = reason
|
||||
self.result["headers"] = headers
|
||||
|
||||
def write(self, content: bytes) -> None:
|
||||
def write(self, content):
|
||||
assert isinstance(content, bytes), "Should be bytes! " + repr(content)
|
||||
|
||||
if "body" not in self.result:
|
||||
@@ -166,16 +168,11 @@ class FakeChannel:
|
||||
|
||||
self.result["body"] += content
|
||||
|
||||
# Type ignore: mypy doesn't like the fact that producer isn't an IProducer.
|
||||
def registerProducer( # type: ignore[override]
|
||||
self,
|
||||
producer: Union[IPullProducer, IPushProducer],
|
||||
streaming: bool,
|
||||
) -> None:
|
||||
def registerProducer(self, producer, streaming):
|
||||
self._producer = producer
|
||||
self.producerStreaming = streaming
|
||||
|
||||
def _produce() -> None:
|
||||
def _produce():
|
||||
if self._producer:
|
||||
self._producer.resumeProducing()
|
||||
self._reactor.callLater(0.1, _produce)
|
||||
@@ -183,32 +180,31 @@ class FakeChannel:
|
||||
if not streaming:
|
||||
self._reactor.callLater(0.0, _produce)
|
||||
|
||||
def unregisterProducer(self) -> None:
|
||||
def unregisterProducer(self):
|
||||
if self._producer is None:
|
||||
return
|
||||
|
||||
self._producer = None
|
||||
|
||||
def requestDone(self, _self: Request) -> None:
|
||||
def requestDone(self, _self):
|
||||
self.result["done"] = True
|
||||
if isinstance(_self, SynapseRequest):
|
||||
assert _self.logcontext is not None
|
||||
self.resource_usage = _self.logcontext.get_resource_usage()
|
||||
|
||||
def getPeer(self) -> IAddress:
|
||||
def getPeer(self):
|
||||
# We give an address so that getClientAddress/getClientIP returns a non null entry,
|
||||
# causing us to record the MAU
|
||||
return address.IPv4Address("TCP", self._ip, 3423)
|
||||
|
||||
def getHost(self) -> IAddress:
|
||||
def getHost(self):
|
||||
# this is called by Request.__init__ to configure Request.host.
|
||||
return address.IPv4Address("TCP", "127.0.0.1", 8888)
|
||||
|
||||
def isSecure(self) -> bool:
|
||||
def isSecure(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def transport(self) -> "FakeChannel":
|
||||
def transport(self):
|
||||
return self
|
||||
|
||||
def await_result(self, timeout_ms: int = 1000) -> None:
|
||||
@@ -834,6 +830,7 @@ def setup_test_homeserver(
|
||||
|
||||
# Mock TLS
|
||||
hs.tls_server_context_factory = Mock()
|
||||
hs.tls_client_options_factory = Mock()
|
||||
|
||||
hs.setup()
|
||||
if homeserver_to_use == TestHomeServer:
|
||||
|
||||
@@ -148,12 +148,12 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
|
||||
_assert_counts(0, 0)
|
||||
_inject_actions(1, PlAIN_NOTIF)
|
||||
_assert_counts(1, 0)
|
||||
_rotate(1)
|
||||
_rotate(2)
|
||||
_assert_counts(1, 0)
|
||||
|
||||
_inject_actions(3, PlAIN_NOTIF)
|
||||
_assert_counts(2, 0)
|
||||
_rotate(3)
|
||||
_rotate(4)
|
||||
_assert_counts(2, 0)
|
||||
|
||||
_inject_actions(5, PlAIN_NOTIF)
|
||||
@@ -164,7 +164,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
|
||||
_assert_counts(0, 0)
|
||||
|
||||
_inject_actions(6, PlAIN_NOTIF)
|
||||
_rotate(6)
|
||||
_rotate(7)
|
||||
_assert_counts(1, 0)
|
||||
|
||||
self.get_success(
|
||||
@@ -180,13 +180,13 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
|
||||
|
||||
_inject_actions(8, HIGHLIGHT)
|
||||
_assert_counts(1, 1)
|
||||
_rotate(8)
|
||||
_rotate(9)
|
||||
_assert_counts(1, 1)
|
||||
|
||||
# Check that adding another notification and rotating after highlight
|
||||
# works.
|
||||
_inject_actions(10, PlAIN_NOTIF)
|
||||
_rotate(10)
|
||||
_rotate(11)
|
||||
_assert_counts(2, 1)
|
||||
|
||||
# Check that sending read receipts at different points results in the
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.types import RoomAlias, RoomID, UserID
|
||||
|
||||
@@ -64,3 +65,71 @@ class RoomStoreTestCase(HomeserverTestCase):
|
||||
self.assertIsNone(
|
||||
(self.get_success(self.store.get_room_with_stats("!uknown:test"))),
|
||||
)
|
||||
|
||||
|
||||
class RoomEventsStoreTestCase(HomeserverTestCase):
|
||||
def prepare(self, reactor, clock, hs):
|
||||
# Room events need the full datastore, for persist_event() and
|
||||
# get_room_state()
|
||||
self.store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self.event_factory = hs.get_event_factory()
|
||||
|
||||
self.room = RoomID.from_string("!abcde:test")
|
||||
|
||||
self.get_success(
|
||||
self.store.store_room(
|
||||
self.room.to_string(),
|
||||
room_creator_user_id="@creator:text",
|
||||
is_public=True,
|
||||
room_version=RoomVersions.V1,
|
||||
)
|
||||
)
|
||||
|
||||
def inject_room_event(self, **kwargs):
|
||||
self.get_success(
|
||||
self._storage_controllers.persistence.persist_event(
|
||||
self.event_factory.create_event(room_id=self.room.to_string(), **kwargs)
|
||||
)
|
||||
)
|
||||
|
||||
def STALE_test_room_name(self):
|
||||
name = "A-Room-Name"
|
||||
|
||||
self.inject_room_event(
|
||||
etype=EventTypes.Name, name=name, content={"name": name}, depth=1
|
||||
)
|
||||
|
||||
state = self.get_success(
|
||||
self._storage_controllers.state.get_current_state(
|
||||
room_id=self.room.to_string()
|
||||
)
|
||||
)
|
||||
|
||||
self.assertEqual(1, len(state))
|
||||
self.assertObjectHasAttributes(
|
||||
{"type": "m.room.name", "room_id": self.room.to_string(), "name": name},
|
||||
state[0],
|
||||
)
|
||||
|
||||
def STALE_test_room_topic(self):
|
||||
topic = "A place for things"
|
||||
|
||||
self.inject_room_event(
|
||||
etype=EventTypes.Topic, topic=topic, content={"topic": topic}, depth=1
|
||||
)
|
||||
|
||||
state = self.get_success(
|
||||
self._storage_controllers.state.get_current_state(
|
||||
room_id=self.room.to_string()
|
||||
)
|
||||
)
|
||||
|
||||
self.assertEqual(1, len(state))
|
||||
self.assertObjectHasAttributes(
|
||||
{"type": "m.room.topic", "room_id": self.room.to_string(), "topic": topic},
|
||||
state[0],
|
||||
)
|
||||
|
||||
# Not testing the various 'level' methods for now because there's lots
|
||||
# of them and need coalescing; see JIRA SPEC-11
|
||||
|
||||
@@ -195,8 +195,6 @@ class StateTestCase(unittest.TestCase):
|
||||
"get_state_resolution_handler",
|
||||
"get_account_validity_handler",
|
||||
"get_macaroon_generator",
|
||||
"get_instance_name",
|
||||
"get_simple_http_client",
|
||||
"hostname",
|
||||
]
|
||||
)
|
||||
|
||||
138
tests/utils.py
138
tests/utils.py
@@ -15,17 +15,12 @@
|
||||
|
||||
import atexit
|
||||
import os
|
||||
from typing import Any, Callable, Dict, List, Tuple, Union, overload
|
||||
|
||||
import attr
|
||||
from typing_extensions import Literal, ParamSpec
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import DEFAULT_ROOM_VERSION
|
||||
from synapse.logging.context import current_context, set_current_context
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import LoggingDatabaseConnection
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
@@ -55,11 +50,12 @@ SQLITE_PERSIST_DB = os.environ.get("SYNAPSE_TEST_PERSIST_SQLITE_DB") is not None
|
||||
POSTGRES_DBNAME_FOR_INITIAL_CREATE = "postgres"
|
||||
|
||||
|
||||
def setupdb() -> None:
|
||||
def setupdb():
|
||||
# If we're using PostgreSQL, set up the db once
|
||||
if USE_POSTGRES_FOR_TESTS:
|
||||
# create a PostgresEngine
|
||||
db_engine = create_engine({"name": "psycopg2", "args": {}})
|
||||
|
||||
# connect to postgres to create the base database.
|
||||
db_conn = db_engine.module.connect(
|
||||
user=POSTGRES_USER,
|
||||
@@ -68,7 +64,7 @@ def setupdb() -> None:
|
||||
password=POSTGRES_PASSWORD,
|
||||
dbname=POSTGRES_DBNAME_FOR_INITIAL_CREATE,
|
||||
)
|
||||
db_engine.attempt_to_set_autocommit(db_conn, autocommit=True)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,))
|
||||
cur.execute(
|
||||
@@ -86,11 +82,11 @@ def setupdb() -> None:
|
||||
port=POSTGRES_PORT,
|
||||
password=POSTGRES_PASSWORD,
|
||||
)
|
||||
logging_conn = LoggingDatabaseConnection(db_conn, db_engine, "tests")
|
||||
prepare_database(logging_conn, db_engine, None)
|
||||
logging_conn.close()
|
||||
db_conn = LoggingDatabaseConnection(db_conn, db_engine, "tests")
|
||||
prepare_database(db_conn, db_engine, None)
|
||||
db_conn.close()
|
||||
|
||||
def _cleanup() -> None:
|
||||
def _cleanup():
|
||||
db_conn = db_engine.module.connect(
|
||||
user=POSTGRES_USER,
|
||||
host=POSTGRES_HOST,
|
||||
@@ -98,7 +94,7 @@ def setupdb() -> None:
|
||||
password=POSTGRES_PASSWORD,
|
||||
dbname=POSTGRES_DBNAME_FOR_INITIAL_CREATE,
|
||||
)
|
||||
db_engine.attempt_to_set_autocommit(db_conn, autocommit=True)
|
||||
db_conn.autocommit = True
|
||||
cur = db_conn.cursor()
|
||||
cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,))
|
||||
cur.close()
|
||||
@@ -107,19 +103,7 @@ def setupdb() -> None:
|
||||
atexit.register(_cleanup)
|
||||
|
||||
|
||||
@overload
|
||||
def default_config(name: str, parse: Literal[False] = ...) -> Dict[str, object]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def default_config(name: str, parse: Literal[True]) -> HomeServerConfig:
|
||||
...
|
||||
|
||||
|
||||
def default_config(
|
||||
name: str, parse: bool = False
|
||||
) -> Union[Dict[str, object], HomeServerConfig]:
|
||||
def default_config(name, parse=False):
|
||||
"""
|
||||
Create a reasonable test config.
|
||||
"""
|
||||
@@ -197,122 +181,90 @@ def default_config(
|
||||
return config_dict
|
||||
|
||||
|
||||
def mock_getRawHeaders(headers=None): # type: ignore[no-untyped-def]
|
||||
def mock_getRawHeaders(headers=None):
|
||||
headers = headers if headers is not None else {}
|
||||
|
||||
def getRawHeaders(name, default=None): # type: ignore[no-untyped-def]
|
||||
# If the requested header is present, the real twisted function returns
|
||||
# List[str] if name is a str and List[bytes] if name is a bytes.
|
||||
# This mock doesn't support that behaviour.
|
||||
# Fortunately, none of the current callers of mock_getRawHeaders() provide a
|
||||
# headers dict, so we don't encounter this discrepancy in practice.
|
||||
def getRawHeaders(name, default=None):
|
||||
return headers.get(name, default)
|
||||
|
||||
return getRawHeaders
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class Timer:
|
||||
absolute_time: float
|
||||
callback: Callable[[], None]
|
||||
expired: bool
|
||||
|
||||
|
||||
# TODO: Make this generic over a ParamSpec?
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class Looper:
|
||||
func: Callable[..., Any]
|
||||
interval: float # seconds
|
||||
last: float
|
||||
args: Tuple[object, ...]
|
||||
kwargs: Dict[str, object]
|
||||
|
||||
|
||||
class MockClock:
|
||||
now = 1000.0
|
||||
now = 1000
|
||||
|
||||
def __init__(self) -> None:
|
||||
# Timers in no particular order
|
||||
self.timers: List[Timer] = []
|
||||
self.loopers: List[Looper] = []
|
||||
def __init__(self):
|
||||
# list of lists of [absolute_time, callback, expired] in no particular
|
||||
# order
|
||||
self.timers = []
|
||||
self.loopers = []
|
||||
|
||||
def time(self) -> float:
|
||||
def time(self):
|
||||
return self.now
|
||||
|
||||
def time_msec(self) -> int:
|
||||
return int(self.time() * 1000)
|
||||
def time_msec(self):
|
||||
return self.time() * 1000
|
||||
|
||||
def call_later(
|
||||
self,
|
||||
delay: float,
|
||||
callback: Callable[P, object],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> Timer:
|
||||
def call_later(self, delay, callback, *args, **kwargs):
|
||||
ctx = current_context()
|
||||
|
||||
def wrapped_callback() -> None:
|
||||
def wrapped_callback():
|
||||
set_current_context(ctx)
|
||||
callback(*args, **kwargs)
|
||||
|
||||
t = Timer(self.now + delay, wrapped_callback, False)
|
||||
t = [self.now + delay, wrapped_callback, False]
|
||||
self.timers.append(t)
|
||||
|
||||
return t
|
||||
|
||||
def looping_call(
|
||||
self,
|
||||
function: Callable[P, object],
|
||||
interval: float,
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> None:
|
||||
# This type-ignore should be redundant once we use a mypy release with
|
||||
# https://github.com/python/mypy/pull/12668.
|
||||
self.loopers.append(Looper(function, interval / 1000.0, self.now, args, kwargs)) # type: ignore[arg-type]
|
||||
def looping_call(self, function, interval, *args, **kwargs):
|
||||
self.loopers.append([function, interval / 1000.0, self.now, args, kwargs])
|
||||
|
||||
def cancel_call_later(self, timer: Timer, ignore_errs: bool = False) -> None:
|
||||
if timer.expired:
|
||||
def cancel_call_later(self, timer, ignore_errs=False):
|
||||
if timer[2]:
|
||||
if not ignore_errs:
|
||||
raise Exception("Cannot cancel an expired timer")
|
||||
|
||||
timer.expired = True
|
||||
timer[2] = True
|
||||
self.timers = [t for t in self.timers if t != timer]
|
||||
|
||||
# For unit testing
|
||||
def advance_time(self, secs: float) -> None:
|
||||
def advance_time(self, secs):
|
||||
self.now += secs
|
||||
|
||||
timers = self.timers
|
||||
self.timers = []
|
||||
|
||||
for t in timers:
|
||||
if t.expired:
|
||||
time, callback, expired = t
|
||||
|
||||
if expired:
|
||||
raise Exception("Timer already expired")
|
||||
|
||||
if self.now >= t.absolute_time:
|
||||
t.expired = True
|
||||
t.callback()
|
||||
if self.now >= time:
|
||||
t[2] = True
|
||||
callback()
|
||||
else:
|
||||
self.timers.append(t)
|
||||
|
||||
for looped in self.loopers:
|
||||
if looped.last + looped.interval < self.now:
|
||||
looped.func(*looped.args, **looped.kwargs)
|
||||
looped.last = self.now
|
||||
func, interval, last, args, kwargs = looped
|
||||
if last + interval < self.now:
|
||||
func(*args, **kwargs)
|
||||
looped[2] = self.now
|
||||
|
||||
def advance_time_msec(self, ms: float) -> None:
|
||||
def advance_time_msec(self, ms):
|
||||
self.advance_time(ms / 1000.0)
|
||||
|
||||
def time_bound_deferred(self, d, *args, **kwargs):
|
||||
# We don't bother timing things out for now.
|
||||
return d
|
||||
|
||||
async def create_room(hs: HomeServer, room_id: str, creator_id: str) -> None:
|
||||
|
||||
async def create_room(hs, room_id: str, creator_id: str):
|
||||
"""Creates and persist a creation event for the given room"""
|
||||
|
||||
persistence_store = hs.get_storage_controllers().persistence
|
||||
assert persistence_store is not None
|
||||
store = hs.get_datastores().main
|
||||
event_builder_factory = hs.get_event_builder_factory()
|
||||
event_creation_handler = hs.get_event_creation_handler()
|
||||
|
||||
Reference in New Issue
Block a user