Compare commits
30 Commits
shay/mx_ma
...
anoa/docs_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
793a5bfd12 | ||
|
|
ba3fd54bad | ||
|
|
b2df0716bc | ||
|
|
75dff3dc98 | ||
|
|
01e625513a | ||
|
|
873d467976 | ||
|
|
96e0cdbc5a | ||
|
|
9ce51a47f6 | ||
|
|
aa5f5ede33 | ||
|
|
d66d68f917 | ||
|
|
c4514b97db | ||
|
|
77dee1b451 | ||
|
|
5938928c59 | ||
|
|
db2edf5a65 | ||
|
|
13e4386710 | ||
|
|
bf2fea8f7d | ||
|
|
ae7858f184 | ||
|
|
01dcf7532d | ||
|
|
7e6598bcf6 | ||
|
|
8f5d2823df | ||
|
|
8d156ec0ba | ||
|
|
57fac2a234 | ||
|
|
3ae56d125c | ||
|
|
0d9eaa19fd | ||
|
|
0b684b59e5 | ||
|
|
629aa51743 | ||
|
|
5d3509dfda | ||
|
|
5a320baa45 | ||
|
|
f282d5fc11 | ||
|
|
ce6ecdd4b4 |
28
.ci/scripts/record_available_doc_versions.py
Executable file
28
.ci/scripts/record_available_doc_versions.py
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env python3
|
||||
# This script will write a json file to $OUTPUT_FILE that contains the name of
|
||||
# each available Synapse version with documentation.
|
||||
#
|
||||
# This script assumes that any top-level directory in the "gh-pages" branch is
|
||||
# named after a documentation version and contains documentation website files.
|
||||
|
||||
import os.path
|
||||
import json
|
||||
|
||||
OUTPUT_FILE = "versions.json"
|
||||
|
||||
# Determine the list of Synapse versions that have documentation.
|
||||
doc_versions = []
|
||||
for filepath in os.listdir():
|
||||
if os.path.isdir(filepath):
|
||||
doc_versions.append(filepath)
|
||||
|
||||
# Record the documentation versions in a json file, such that the
|
||||
# frontend javascript is aware of what versions exist.
|
||||
to_write = {
|
||||
"versions": doc_versions,
|
||||
"default_version": "latest",
|
||||
}
|
||||
|
||||
# Write the file.
|
||||
with open(OUTPUT_FILE, "w") as f:
|
||||
f.write(json.dumps(to_write))
|
||||
28
.github/workflows/docs.yaml
vendored
28
.github/workflows/docs.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
|
||||
jobs:
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
name: Build and deploy docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -63,3 +63,29 @@ jobs:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
list_available_versions:
|
||||
needs: pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Check out the current branch
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Save the script
|
||||
run: cp .ci/scripts/record_available_doc_versions.py /
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
|
||||
# Check out the gh-pages branch, which we'll be pushing the doc versions to
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
# Check out the gh-pages branch
|
||||
ref: 'gh-pages'
|
||||
|
||||
- name: Record the available documentation versions
|
||||
run: |
|
||||
# Download the script
|
||||
/record_available_doc_versions
|
||||
|
||||
3
.github/workflows/latest_deps.yml
vendored
3
.github/workflows/latest_deps.yml
vendored
@@ -32,12 +32,15 @@ jobs:
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.2.0b1"
|
||||
extras: "all"
|
||||
# Dump installed versions for debugging.
|
||||
- run: poetry run pip list > before.txt
|
||||
# Upgrade all runtime dependencies only. This is intended to mimic a fresh
|
||||
# `pip install matrix-synapse[all]` as closely as possible.
|
||||
- run: poetry update --no-dev
|
||||
- run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
trial:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
2
.github/workflows/twisted_trunk.yml
vendored
2
.github/workflows/twisted_trunk.yml
vendored
@@ -24,6 +24,8 @@ jobs:
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
- run: poetry run mypy
|
||||
|
||||
trial:
|
||||
|
||||
16
CHANGES.md
16
CHANGES.md
@@ -1,3 +1,17 @@
|
||||
Synapse 1.59.0
|
||||
==============
|
||||
|
||||
The non-standard `m.login.jwt` login type has been removed from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration.
|
||||
|
||||
|
||||
Synapse 1.58.0 (2022-05-03)
|
||||
===========================
|
||||
|
||||
As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61.
|
||||
|
||||
No significant changes since 1.58.0rc2.
|
||||
|
||||
|
||||
Synapse 1.58.0rc2 (2022-04-26)
|
||||
==============================
|
||||
|
||||
@@ -19,8 +33,6 @@ Internal Changes
|
||||
Synapse 1.58.0rc1 (2022-04-26)
|
||||
==============================
|
||||
|
||||
As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
|
||||
10
README.rst
10
README.rst
@@ -55,7 +55,7 @@ solutions. The hope is for Matrix to act as the building blocks for a new
|
||||
generation of fully open and interoperable messaging and VoIP apps for the
|
||||
internet.
|
||||
|
||||
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
|
||||
Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
|
||||
team, written in Python 3/Twisted.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
@@ -294,13 +294,13 @@ directory of your choice::
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies. We maintain a fixed development
|
||||
environment using [poetry](https://python-poetry.org/). First, install poetry. We recommend
|
||||
environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
|
||||
|
||||
pip install --user pipx
|
||||
pipx install poetry
|
||||
|
||||
as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
|
||||
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`
|
||||
(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
|
||||
for other installation methods.) Then ask poetry to create a virtual environment
|
||||
from the project and install Synapse's dependencies::
|
||||
|
||||
@@ -309,11 +309,11 @@ from the project and install Synapse's dependencies::
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
|
||||
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
|
||||
|
||||
poetry run ./demo/start.sh
|
||||
|
||||
(to stop, you can use `poetry run ./demo/stop.sh`)
|
||||
(to stop, you can use ``poetry run ./demo/stop.sh``)
|
||||
|
||||
See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
|
||||
for more information.
|
||||
|
||||
1
changelog.d/12273.bugfix
Normal file
1
changelog.d/12273.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse v1.48.0 where latest thread reply provided failed to include the proper bundled aggregations.
|
||||
1
changelog.d/12556.misc
Normal file
1
changelog.d/12556.misc
Normal file
@@ -0,0 +1 @@
|
||||
Release script: confirm the commit to be tagged before tagging.
|
||||
1
changelog.d/12570.bugfix
Normal file
1
changelog.d/12570.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse 1.57 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation.
|
||||
1
changelog.d/12576.misc
Normal file
1
changelog.d/12576.misc
Normal file
@@ -0,0 +1 @@
|
||||
Allow unused `#type: ignore` comments in bleeding edge CI jobs.
|
||||
1
changelog.d/12579.doc
Normal file
1
changelog.d/12579.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add missing linebreak to pipx install instructions.
|
||||
1
changelog.d/12580.bugfix
Normal file
1
changelog.d/12580.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long standing bug where status codes would almost always get logged as 200!, irrespective of the actual status code, when clients disconnect before a request has finished processing.
|
||||
1
changelog.d/12581.misc
Normal file
1
changelog.d/12581.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve docstrings for the receipts store.
|
||||
1
changelog.d/12582.misc
Normal file
1
changelog.d/12582.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use constants for read-receipts in tests.
|
||||
1
changelog.d/12587.misc
Normal file
1
changelog.d/12587.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect.
|
||||
1
changelog.d/12589.misc
Normal file
1
changelog.d/12589.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove special-case for `twisted` logger from default log config.
|
||||
1
changelog.d/12594.bugfix
Normal file
1
changelog.d/12594.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix race when persisting an event and deleting a room that could lead to outbound federation breaking.
|
||||
1
changelog.d/12596.removal
Normal file
1
changelog.d/12596.removal
Normal file
@@ -0,0 +1 @@
|
||||
Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069).
|
||||
2
changelog.d/12597.removal
Normal file
2
changelog.d/12597.removal
Normal file
@@ -0,0 +1,2 @@
|
||||
Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
|
||||
[MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778).
|
||||
1
changelog.d/12608.misc
Normal file
1
changelog.d/12608.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove redundant lines of config from `mypy.ini`.
|
||||
1
changelog.d/12612.bugfix
Normal file
1
changelog.d/12612.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a typo in the announcement text generated by the Synapse release development script.
|
||||
1
changelog.d/12613.removal
Normal file
1
changelog.d/12613.removal
Normal file
@@ -0,0 +1 @@
|
||||
Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk.
|
||||
1
changelog.d/12614.misc
Normal file
1
changelog.d/12614.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add extra debug logging to federation sender.
|
||||
1
changelog.d/12620.misc
Normal file
1
changelog.d/12620.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a consistency check on events which we read from the database.
|
||||
1
changelog.d/12624.misc
Normal file
1
changelog.d/12624.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove use of constantly library and switch to enums for EventRedactBehaviour. Contributed by @andrewdoh.
|
||||
1
changelog.d/12627.doc
Normal file
1
changelog.d/12627.doc
Normal file
@@ -0,0 +1 @@
|
||||
Fixes to the formatting of README.rst.
|
||||
6
debian/changelog
vendored
6
debian/changelog
vendored
@@ -1,3 +1,9 @@
|
||||
matrix-synapse-py3 (1.58.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.58.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 03 May 2022 10:52:58 +0100
|
||||
|
||||
matrix-synapse-py3 (1.58.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.58.0rc2.
|
||||
|
||||
@@ -17,9 +17,6 @@ follows:
|
||||
}
|
||||
```
|
||||
|
||||
Note that the login type of `m.login.jwt` is supported, but is deprecated. This
|
||||
will be removed in a future version of Synapse.
|
||||
|
||||
The `token` field should include the JSON web token with the following claims:
|
||||
|
||||
* A claim that encodes the local part of the user ID is required. By default,
|
||||
|
||||
@@ -62,13 +62,6 @@ loggers:
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
twisted:
|
||||
# We send the twisted logging directly to the file handler,
|
||||
# to work around https://github.com/matrix-org/synapse/issues/3471
|
||||
# when using "buffer" logger. Use "console" to log to stderr instead.
|
||||
handlers: [file]
|
||||
propagate: false
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ See the following for how to decode the dense data available from the default lo
|
||||
| NNNN | Total time waiting for response to DB queries across all parallel DB work from this request |
|
||||
| OOOO | Count of DB transactions performed |
|
||||
| PPPP | Response body size |
|
||||
| QQQQ | Response status code (prefixed with ! if the socket was closed before the response was generated) |
|
||||
| QQQQ | Response status code<br/>Suffixed with `!` if the socket was closed before the response was generated.<br/>A `499!` status code indicates that Synapse also cancelled request processing after the socket was closed.<br/> |
|
||||
| RRRR | Request |
|
||||
| SSSS | User-agent |
|
||||
| TTTT | Events fetched from DB to service this request (note that this does not include events fetched from the cache) |
|
||||
|
||||
56
mypy.ini
56
mypy.ini
@@ -241,98 +241,46 @@ disallow_untyped_defs = True
|
||||
[mypy-authlib.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-bcrypt]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-canonicaljson]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-constantly]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-daemonize]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-h11]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-hiredis]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-hyperlink]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-ijson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-importlib_metadata.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jaeger_client.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-josepy.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jwt.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-lxml]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-msgpack]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-nacl.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
# Note: WIP stubs available at
|
||||
# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr
|
||||
[mypy-netaddr]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-parameterized.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-phonenumbers.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-prometheus_client.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pymacaroons.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-pympler.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-redbaron.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-rust_python_jaeger_reporter.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-saml2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-sentry_sdk]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-service_identity.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-signedjson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-srvlookup.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-twisted.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-zope]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-incremental.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
4
poetry.lock
generated
4
poetry.lock
generated
@@ -1561,8 +1561,8 @@ url_preview = ["lxml"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7"
|
||||
content-hash = "3825cef058b8c9f520ef4b7acb92519be95db9a663a61c2e89a5fe431ed55655"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "2bda1a7cfc8cc02832b4a7d16bf7e1615cb05e0639bdb30688aadf692d851942"
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
|
||||
@@ -54,7 +54,7 @@ skip_gitignore = true
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.58.0rc2"
|
||||
version = "1.58.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -100,7 +100,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
||||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7"
|
||||
python = "^3.7.1"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
|
||||
@@ -89,13 +89,7 @@ def prepare() -> None:
|
||||
"""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
try:
|
||||
repo = git.Repo()
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
repo = get_repo_and_check_clean_checkout()
|
||||
|
||||
click.secho("Updating git repo...")
|
||||
repo.remote().fetch()
|
||||
@@ -171,9 +165,7 @@ def prepare() -> None:
|
||||
assert not parsed_new_version.is_devrelease
|
||||
assert not parsed_new_version.is_postrelease
|
||||
|
||||
release_branch_name = (
|
||||
f"release-v{parsed_new_version.major}.{parsed_new_version.minor}"
|
||||
)
|
||||
release_branch_name = get_release_branch_name(parsed_new_version)
|
||||
release_branch = find_ref(repo, release_branch_name)
|
||||
if release_branch:
|
||||
if release_branch.is_remote():
|
||||
@@ -274,13 +266,7 @@ def tag(gh_token: Optional[str]) -> None:
|
||||
"""Tags the release and generates a draft GitHub release"""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
try:
|
||||
repo = git.Repo()
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
repo = get_repo_and_check_clean_checkout()
|
||||
|
||||
click.secho("Updating git repo...")
|
||||
repo.remote().fetch()
|
||||
@@ -293,6 +279,15 @@ def tag(gh_token: Optional[str]) -> None:
|
||||
if tag_name in repo.tags:
|
||||
raise click.ClickException(f"Tag {tag_name} already exists!\n")
|
||||
|
||||
# Check we're on the right release branch
|
||||
release_branch = get_release_branch_name(current_version)
|
||||
if repo.active_branch.name != release_branch:
|
||||
click.echo(
|
||||
f"Need to be on the release branch ({release_branch}) before tagging. "
|
||||
f"Currently on ({repo.active_branch.name})."
|
||||
)
|
||||
click.get_current_context().abort()
|
||||
|
||||
# Get the appropriate changelogs and tag.
|
||||
changes = get_changes_for_version(current_version)
|
||||
|
||||
@@ -358,21 +353,15 @@ def tag(gh_token: Optional[str]) -> None:
|
||||
@cli.command()
|
||||
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
|
||||
def publish(gh_token: str) -> None:
|
||||
"""Publish release."""
|
||||
"""Publish release on GitHub."""
|
||||
|
||||
# Make sure we're in a git repo.
|
||||
try:
|
||||
repo = git.Repo()
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
get_repo_and_check_clean_checkout()
|
||||
|
||||
current_version = get_package_version()
|
||||
tag_name = f"v{current_version}"
|
||||
|
||||
if not click.confirm(f"Publish {tag_name}?", default=True):
|
||||
if not click.confirm(f"Publish release {tag_name} on GitHub?", default=True):
|
||||
return
|
||||
|
||||
# Publish the draft release
|
||||
@@ -406,6 +395,13 @@ def upload() -> None:
|
||||
current_version = get_package_version()
|
||||
tag_name = f"v{current_version}"
|
||||
|
||||
# Check we have the right tag checked out.
|
||||
repo = get_repo_and_check_clean_checkout()
|
||||
tag = repo.tag(f"refs/tags/{tag_name}")
|
||||
if repo.head.commit != tag.commit:
|
||||
click.echo("Tag {tag_name} (tag.commit) is not currently checked out!")
|
||||
click.get_current_context().abort()
|
||||
|
||||
pypi_asset_names = [
|
||||
f"matrix_synapse-{current_version}-py3-none-any.whl",
|
||||
f"matrix-synapse-{current_version}.tar.gz",
|
||||
@@ -438,7 +434,7 @@ def announce() -> None:
|
||||
f"""
|
||||
Hi everyone. Synapse {current_version} has just been released.
|
||||
|
||||
[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) |\
|
||||
[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \
|
||||
[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \
|
||||
[debs](https://packages.matrix.org/debian/) | \
|
||||
[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)"""
|
||||
@@ -469,6 +465,21 @@ def get_package_version() -> version.Version:
|
||||
return version.Version(version_string)
|
||||
|
||||
|
||||
def get_release_branch_name(version_number: version.Version) -> str:
|
||||
return f"release-v{version_number.major}.{version_number.minor}"
|
||||
|
||||
|
||||
def get_repo_and_check_clean_checkout() -> git.Repo:
|
||||
"""Get the project repo and check it's not got any uncommitted changes."""
|
||||
try:
|
||||
repo = git.Repo()
|
||||
except git.InvalidGitRepositoryError:
|
||||
raise click.ClickException("Not in Synapse repo.")
|
||||
if repo.is_dirty():
|
||||
raise click.ClickException("Uncommitted changes exist.")
|
||||
return repo
|
||||
|
||||
|
||||
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
|
||||
"""Find the branch/ref, looking first locally then in the remote."""
|
||||
if ref_name in repo.references:
|
||||
|
||||
@@ -110,13 +110,6 @@ loggers:
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
twisted:
|
||||
# We send the twisted logging directly to the file handler,
|
||||
# to work around https://github.com/matrix-org/synapse/issues/3471
|
||||
# when using "buffer" logger. Use "console" to log to stderr instead.
|
||||
handlers: [file]
|
||||
propagate: false
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
|
||||
|
||||
@@ -213,10 +213,17 @@ class _EventInternalMetadata:
|
||||
return self.outlier
|
||||
|
||||
def is_out_of_band_membership(self) -> bool:
|
||||
"""Whether this is an out of band membership, like an invite or an invite
|
||||
rejection. This is needed as those events are marked as outliers, but
|
||||
they still need to be processed as if they're new events (e.g. updating
|
||||
invite state in the database, relaying to clients, etc).
|
||||
"""Whether this event is an out-of-band membership.
|
||||
|
||||
OOB memberships are a special case of outlier events: they are membership events
|
||||
for federated rooms that we aren't full members of. Examples include invites
|
||||
received over federation, and rejections for such invites.
|
||||
|
||||
The concept of an OOB membership is needed because these events need to be
|
||||
processed as if they're new regular events (e.g. updating membership state in
|
||||
the database, relaying to clients via /sync, etc) despite being outliers.
|
||||
|
||||
See also https://matrix-org.github.io/synapse/develop/development/room-dag-concepts.html#out-of-band-membership-events.
|
||||
|
||||
(Added in synapse 0.99.0, so may be unreliable for events received before that)
|
||||
"""
|
||||
|
||||
@@ -426,13 +426,12 @@ class EventClientSerializer:
|
||||
|
||||
# Check if there are any bundled aggregations to include with the event.
|
||||
if bundle_aggregations:
|
||||
event_aggregations = bundle_aggregations.get(event.event_id)
|
||||
if event_aggregations:
|
||||
if event.event_id in bundle_aggregations:
|
||||
self._inject_bundled_aggregations(
|
||||
event,
|
||||
time_now,
|
||||
config,
|
||||
event_aggregations,
|
||||
bundle_aggregations,
|
||||
serialized_event,
|
||||
apply_edits=apply_edits,
|
||||
)
|
||||
@@ -471,7 +470,7 @@ class EventClientSerializer:
|
||||
event: EventBase,
|
||||
time_now: int,
|
||||
config: SerializeEventConfig,
|
||||
aggregations: "BundledAggregations",
|
||||
bundled_aggregations: Dict[str, "BundledAggregations"],
|
||||
serialized_event: JsonDict,
|
||||
apply_edits: bool,
|
||||
) -> None:
|
||||
@@ -481,22 +480,37 @@ class EventClientSerializer:
|
||||
event: The event being serialized.
|
||||
time_now: The current time in milliseconds
|
||||
config: Event serialization config
|
||||
aggregations: The bundled aggregation to serialize.
|
||||
bundled_aggregations: Bundled aggregations to be injected.
|
||||
A map from event_id to aggregation data. Must contain at least an
|
||||
entry for `event`.
|
||||
|
||||
While serializing the bundled aggregations this map may be searched
|
||||
again for additional events in a recursive manner.
|
||||
serialized_event: The serialized event which may be modified.
|
||||
apply_edits: Whether the content of the event should be modified to reflect
|
||||
any replacement in `aggregations.replace`.
|
||||
"""
|
||||
|
||||
# We have already checked that aggregations exist for this event.
|
||||
event_aggregations = bundled_aggregations[event.event_id]
|
||||
|
||||
# The JSON dictionary to be added under the unsigned property of the event
|
||||
# being serialized.
|
||||
serialized_aggregations = {}
|
||||
|
||||
if aggregations.annotations:
|
||||
serialized_aggregations[RelationTypes.ANNOTATION] = aggregations.annotations
|
||||
if event_aggregations.annotations:
|
||||
serialized_aggregations[
|
||||
RelationTypes.ANNOTATION
|
||||
] = event_aggregations.annotations
|
||||
|
||||
if aggregations.references:
|
||||
serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references
|
||||
if event_aggregations.references:
|
||||
serialized_aggregations[
|
||||
RelationTypes.REFERENCE
|
||||
] = event_aggregations.references
|
||||
|
||||
if aggregations.replace:
|
||||
if event_aggregations.replace:
|
||||
# If there is an edit, optionally apply it to the event.
|
||||
edit = aggregations.replace
|
||||
edit = event_aggregations.replace
|
||||
if apply_edits:
|
||||
self._apply_edit(event, serialized_event, edit)
|
||||
|
||||
@@ -507,19 +521,16 @@ class EventClientSerializer:
|
||||
"sender": edit.sender,
|
||||
}
|
||||
|
||||
# If this event is the start of a thread, include a summary of the replies.
|
||||
if aggregations.thread:
|
||||
thread = aggregations.thread
|
||||
# Include any threaded replies to this event.
|
||||
if event_aggregations.thread:
|
||||
thread = event_aggregations.thread
|
||||
|
||||
# Don't bundle aggregations as this could recurse forever.
|
||||
serialized_latest_event = serialize_event(
|
||||
thread.latest_event, time_now, config=config
|
||||
serialized_latest_event = self.serialize_event(
|
||||
thread.latest_event,
|
||||
time_now,
|
||||
config=config,
|
||||
bundle_aggregations=bundled_aggregations,
|
||||
)
|
||||
# Manually apply an edit, if one exists.
|
||||
if thread.latest_edit:
|
||||
self._apply_edit(
|
||||
thread.latest_event, serialized_latest_event, thread.latest_edit
|
||||
)
|
||||
|
||||
thread_summary = {
|
||||
"latest_event": serialized_latest_event,
|
||||
|
||||
@@ -343,9 +343,16 @@ class FederationSender(AbstractFederationSender):
|
||||
last_token, self._last_poked_id, limit=100
|
||||
)
|
||||
|
||||
logger.debug("Handling %s -> %s", last_token, next_token)
|
||||
logger.debug(
|
||||
"Handling %i -> %i: %i events to send (current id %i)",
|
||||
last_token,
|
||||
next_token,
|
||||
len(events),
|
||||
self._last_poked_id,
|
||||
)
|
||||
|
||||
if not events and next_token >= self._last_poked_id:
|
||||
logger.debug("All events processed")
|
||||
break
|
||||
|
||||
async def handle_event(event: EventBase) -> None:
|
||||
@@ -353,9 +360,53 @@ class FederationSender(AbstractFederationSender):
|
||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||
is_mine = self.is_mine_id(event.sender)
|
||||
if not is_mine and send_on_behalf_of is None:
|
||||
logger.debug("Not sending remote-origin event %s", event)
|
||||
return
|
||||
|
||||
# We also want to not send out-of-band membership events.
|
||||
#
|
||||
# OOB memberships are used in three (and a half) situations:
|
||||
#
|
||||
# (1) invite events which we have received over federation. Those
|
||||
# will have a `sender` on a different server, so will be
|
||||
# skipped by the "is_mine" test above anyway.
|
||||
#
|
||||
# (2) rejections of invites to federated rooms - either remotely
|
||||
# or locally generated. (Such rejections are normally
|
||||
# created via federation, in which case the remote server is
|
||||
# responsible for sending out the rejection. If that fails,
|
||||
# we'll create a leave event locally, but that's only really
|
||||
# for the benefit of the invited user - we don't have enough
|
||||
# information to send it out over federation).
|
||||
#
|
||||
# (2a) rescinded knocks. These are identical to rejected invites.
|
||||
#
|
||||
# (3) knock events which we have sent over federation. As with
|
||||
# invite rejections, the remote server should send them out to
|
||||
# the federation.
|
||||
#
|
||||
# So, in all the above cases, we want to ignore such events.
|
||||
#
|
||||
# OOB memberships are always(?) outliers anyway, so if we *don't*
|
||||
# ignore them, we'll get an exception further down when we try to
|
||||
# fetch the membership list for the room.
|
||||
#
|
||||
# Arguably, we could equivalently ignore all outliers here, since
|
||||
# in theory the only way for an outlier with a local `sender` to
|
||||
# exist is by being an OOB membership (via one of (2), (2a) or (3)
|
||||
# above).
|
||||
#
|
||||
if event.internal_metadata.is_out_of_band_membership():
|
||||
logger.debug("Not sending OOB membership event %s", event)
|
||||
return
|
||||
|
||||
# Finally, there are some other events that we should not send out
|
||||
# until someone asks for them. They are explicitly flagged as such
|
||||
# with `proactively_send: False`.
|
||||
if not event.internal_metadata.should_proactively_send():
|
||||
logger.debug(
|
||||
"Not sending event with proactively_send=false: %s", event
|
||||
)
|
||||
return
|
||||
|
||||
destinations: Optional[Set[str]] = None
|
||||
@@ -419,7 +470,10 @@ class FederationSender(AbstractFederationSender):
|
||||
"federation_sender"
|
||||
).observe((now - ts) / 1000)
|
||||
|
||||
async def handle_room_events(events: Iterable[EventBase]) -> None:
|
||||
async def handle_room_events(events: List[EventBase]) -> None:
|
||||
logger.debug(
|
||||
"Handling %i events in room %s", len(events), events[0].room_id
|
||||
)
|
||||
with Measure(self.clock, "handle_room_events"):
|
||||
for event in events:
|
||||
await handle_event(event)
|
||||
@@ -438,6 +492,7 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug("Successfully handled up to %i", next_token)
|
||||
await self.store.update_federation_out_pos("events", next_token)
|
||||
|
||||
if events:
|
||||
|
||||
@@ -164,7 +164,7 @@ class EventHandler:
|
||||
event.
|
||||
"""
|
||||
redact_behaviour = (
|
||||
EventRedactBehaviour.AS_IS if show_redacted else EventRedactBehaviour.REDACT
|
||||
EventRedactBehaviour.as_is if show_redacted else EventRedactBehaviour.redact
|
||||
)
|
||||
event = await self.store.get_event(
|
||||
event_id, check_room_id=room_id, redact_behaviour=redact_behaviour
|
||||
|
||||
@@ -316,7 +316,7 @@ class FederationHandler:
|
||||
|
||||
events_to_check = await self.store.get_events_as_list(
|
||||
event_ids_to_check,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
get_prev_content=False,
|
||||
)
|
||||
|
||||
@@ -1494,7 +1494,7 @@ class FederationHandler:
|
||||
|
||||
events = await self.store.get_events_as_list(
|
||||
batch,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
allow_rejected=True,
|
||||
)
|
||||
for event in events:
|
||||
|
||||
@@ -860,7 +860,7 @@ class FederationEventHandler:
|
||||
evs = await self._store.get_events(
|
||||
list(state_map.values()),
|
||||
get_prev_content=False,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
)
|
||||
event_map.update(evs)
|
||||
|
||||
|
||||
@@ -1407,7 +1407,7 @@ class EventCreationHandler:
|
||||
|
||||
original_event = await self.store.get_event(
|
||||
event.redacts,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
get_prev_content=False,
|
||||
allow_rejected=False,
|
||||
allow_none=True,
|
||||
@@ -1504,7 +1504,7 @@ class EventCreationHandler:
|
||||
|
||||
original_event = await self.store.get_event(
|
||||
event.redacts,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
get_prev_content=False,
|
||||
allow_rejected=False,
|
||||
allow_none=True,
|
||||
|
||||
@@ -44,8 +44,6 @@ logger = logging.getLogger(__name__)
|
||||
class _ThreadAggregation:
|
||||
# The latest event in the thread.
|
||||
latest_event: EventBase
|
||||
# The latest edit to the latest event in the thread.
|
||||
latest_edit: Optional[EventBase]
|
||||
# The total number of events in the thread.
|
||||
count: int
|
||||
# True if the current user has sent an event to the thread.
|
||||
@@ -295,7 +293,7 @@ class RelationsHandler:
|
||||
|
||||
for event_id, summary in summaries.items():
|
||||
if summary:
|
||||
thread_count, latest_thread_event, edit = summary
|
||||
thread_count, latest_thread_event = summary
|
||||
|
||||
# Subtract off the count of any ignored users.
|
||||
for ignored_user in ignored_users:
|
||||
@@ -340,7 +338,6 @@ class RelationsHandler:
|
||||
|
||||
results[event_id] = _ThreadAggregation(
|
||||
latest_event=latest_thread_event,
|
||||
latest_edit=edit,
|
||||
count=thread_count,
|
||||
# If there's a thread summary it must also exist in the
|
||||
# participated dictionary.
|
||||
@@ -359,8 +356,13 @@ class RelationsHandler:
|
||||
user_id: The user requesting the bundled aggregations.
|
||||
|
||||
Returns:
|
||||
A map of event ID to the bundled aggregation for the event. Not all
|
||||
events may have bundled aggregations in the results.
|
||||
A map of event ID to the bundled aggregations for the event.
|
||||
|
||||
Not all requested events may exist in the results (if they don't have
|
||||
bundled aggregations).
|
||||
|
||||
The results may include additional events which are related to the
|
||||
requested events.
|
||||
"""
|
||||
# De-duplicate events by ID to handle the same event requested multiple times.
|
||||
#
|
||||
@@ -369,22 +371,59 @@ class RelationsHandler:
|
||||
event.event_id: event for event in events if not event.is_state()
|
||||
}
|
||||
|
||||
# A map of event ID to the relation in that event, if there is one.
|
||||
relations_by_id: Dict[str, str] = {}
|
||||
for event_id, event in events_by_id.items():
|
||||
relates_to = event.content.get("m.relates_to")
|
||||
if isinstance(relates_to, collections.abc.Mapping):
|
||||
relation_type = relates_to.get("rel_type")
|
||||
if isinstance(relation_type, str):
|
||||
relations_by_id[event_id] = relation_type
|
||||
|
||||
# event ID -> bundled aggregation in non-serialized form.
|
||||
results: Dict[str, BundledAggregations] = {}
|
||||
|
||||
# Fetch any ignored users of the requesting user.
|
||||
ignored_users = await self._main_store.ignored_users(user_id)
|
||||
|
||||
# Threads are special as the latest event of a thread might cause additional
|
||||
# events to be fetched. Thus, we check those first!
|
||||
|
||||
# Fetch thread summaries (but only for the directly requested events).
|
||||
threads = await self.get_threads_for_events(
|
||||
# It is not valid to start a thread on an event which itself relates to another event.
|
||||
[eid for eid in events_by_id.keys() if eid not in relations_by_id],
|
||||
user_id,
|
||||
ignored_users,
|
||||
)
|
||||
for event_id, thread in threads.items():
|
||||
results.setdefault(event_id, BundledAggregations()).thread = thread
|
||||
|
||||
# If the latest event in a thread is not already being fetched,
|
||||
# add it. This ensures that the bundled aggregations for the
|
||||
# latest thread event is correct.
|
||||
latest_thread_event = thread.latest_event
|
||||
if latest_thread_event and latest_thread_event.event_id not in events_by_id:
|
||||
events_by_id[latest_thread_event.event_id] = latest_thread_event
|
||||
# Keep relations_by_id in sync with events_by_id:
|
||||
#
|
||||
# We know that the latest event in a thread has a thread relation
|
||||
# (as that is what makes it part of the thread).
|
||||
relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD
|
||||
|
||||
# Fetch other relations per event.
|
||||
for event in events_by_id.values():
|
||||
# Do not bundle aggregations for an event which represents an edit or an
|
||||
# annotation. It does not make sense for them to have related events.
|
||||
relates_to = event.content.get("m.relates_to")
|
||||
if isinstance(relates_to, collections.abc.Mapping):
|
||||
relation_type = relates_to.get("rel_type")
|
||||
if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE):
|
||||
continue
|
||||
# An event which is a replacement (ie edit) or annotation (ie, reaction)
|
||||
# may not have any other event related to it.
|
||||
#
|
||||
# XXX This is buggy, see https://github.com/matrix-org/synapse/issues/12566
|
||||
if relations_by_id.get(event.event_id) in (
|
||||
RelationTypes.ANNOTATION,
|
||||
RelationTypes.REPLACE,
|
||||
):
|
||||
continue
|
||||
|
||||
# Fetch any annotations (ie, reactions) to bundle with this event.
|
||||
annotations = await self.get_annotations_for_event(
|
||||
event.event_id, event.room_id, ignored_users=ignored_users
|
||||
)
|
||||
@@ -393,6 +432,7 @@ class RelationsHandler:
|
||||
event.event_id, BundledAggregations()
|
||||
).annotations = {"chunk": annotations}
|
||||
|
||||
# Fetch any references to bundle with this event.
|
||||
references, next_token = await self.get_relations_for_event(
|
||||
event.event_id,
|
||||
event,
|
||||
@@ -425,10 +465,4 @@ class RelationsHandler:
|
||||
for event_id, edit in edits.items():
|
||||
results.setdefault(event_id, BundledAggregations()).replace = edit
|
||||
|
||||
threads = await self.get_threads_for_events(
|
||||
events_by_id.keys(), user_id, ignored_users
|
||||
)
|
||||
for event_id, thread in threads.items():
|
||||
results.setdefault(event_id, BundledAggregations()).thread = thread
|
||||
|
||||
return results
|
||||
|
||||
@@ -43,6 +43,7 @@ from typing_extensions import Protocol
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer, interfaces
|
||||
from twisted.internet.defer import CancelledError
|
||||
from twisted.python import failure
|
||||
from twisted.web import resource
|
||||
from twisted.web.server import NOT_DONE_YET, Request
|
||||
@@ -82,6 +83,14 @@ HTML_ERROR_TEMPLATE = """<!DOCTYPE html>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# A fictional HTTP status code for requests where the client has disconnected and we
|
||||
# successfully cancelled the request. Used only for logging purposes. Clients will never
|
||||
# observe this code unless cancellations leak across requests or we raise a
|
||||
# `CancelledError` ourselves.
|
||||
# Analogous to nginx's 499 status code:
|
||||
# https://github.com/nginx/nginx/blob/release-1.21.6/src/http/ngx_http_request.h#L128-L134
|
||||
HTTP_STATUS_REQUEST_CANCELLED = 499
|
||||
|
||||
|
||||
def return_json_error(f: failure.Failure, request: SynapseRequest) -> None:
|
||||
"""Sends a JSON error response to clients."""
|
||||
@@ -93,6 +102,17 @@ def return_json_error(f: failure.Failure, request: SynapseRequest) -> None:
|
||||
error_dict = exc.error_dict()
|
||||
|
||||
logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg)
|
||||
elif f.check(CancelledError):
|
||||
error_code = HTTP_STATUS_REQUEST_CANCELLED
|
||||
error_dict = {"error": "Request cancelled", "errcode": Codes.UNKNOWN}
|
||||
|
||||
if not request._disconnected:
|
||||
logger.error(
|
||||
"Got cancellation before client disconnection from %r: %r",
|
||||
request.request_metrics.name,
|
||||
request,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
|
||||
)
|
||||
else:
|
||||
error_code = 500
|
||||
error_dict = {"error": "Internal server error", "errcode": Codes.UNKNOWN}
|
||||
@@ -155,6 +175,16 @@ def return_html_error(
|
||||
request,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
|
||||
)
|
||||
elif f.check(CancelledError):
|
||||
code = HTTP_STATUS_REQUEST_CANCELLED
|
||||
msg = "Request cancelled"
|
||||
|
||||
if not request._disconnected:
|
||||
logger.error(
|
||||
"Got cancellation before client disconnection when handling request %r",
|
||||
request,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
|
||||
)
|
||||
else:
|
||||
code = HTTPStatus.INTERNAL_SERVER_ERROR
|
||||
msg = "Internal server error"
|
||||
@@ -683,6 +713,9 @@ def respond_with_json(
|
||||
Returns:
|
||||
twisted.web.server.NOT_DONE_YET if the request is still active.
|
||||
"""
|
||||
# The response code must always be set, for logging purposes.
|
||||
request.setResponseCode(code)
|
||||
|
||||
# could alternatively use request.notifyFinish() and flip a flag when
|
||||
# the Deferred fires, but since the flag is RIGHT THERE it seems like
|
||||
# a waste.
|
||||
@@ -697,7 +730,6 @@ def respond_with_json(
|
||||
else:
|
||||
encoder = _encode_json_bytes
|
||||
|
||||
request.setResponseCode(code)
|
||||
request.setHeader(b"Content-Type", b"application/json")
|
||||
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
|
||||
|
||||
@@ -728,13 +760,15 @@ def respond_with_json_bytes(
|
||||
Returns:
|
||||
twisted.web.server.NOT_DONE_YET if the request is still active.
|
||||
"""
|
||||
# The response code must always be set, for logging purposes.
|
||||
request.setResponseCode(code)
|
||||
|
||||
if request._disconnected:
|
||||
logger.warning(
|
||||
"Not sending response to request %s, already disconnected.", request
|
||||
)
|
||||
return None
|
||||
|
||||
request.setResponseCode(code)
|
||||
request.setHeader(b"Content-Type", b"application/json")
|
||||
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
|
||||
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
|
||||
@@ -840,6 +874,9 @@ def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes) -> N
|
||||
code: The HTTP response code.
|
||||
html_bytes: The HTML bytes to use as the response body.
|
||||
"""
|
||||
# The response code must always be set, for logging purposes.
|
||||
request.setResponseCode(code)
|
||||
|
||||
# could alternatively use request.notifyFinish() and flip a flag when
|
||||
# the Deferred fires, but since the flag is RIGHT THERE it seems like
|
||||
# a waste.
|
||||
@@ -849,7 +886,6 @@ def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes) -> N
|
||||
)
|
||||
return None
|
||||
|
||||
request.setResponseCode(code)
|
||||
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
|
||||
request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
|
||||
|
||||
|
||||
@@ -882,9 +882,7 @@ class WhoamiRestServlet(RestServlet):
|
||||
|
||||
response = {
|
||||
"user_id": requester.user.to_string(),
|
||||
# MSC: https://github.com/matrix-org/matrix-doc/pull/3069
|
||||
# Entered spec in Matrix 1.2
|
||||
"org.matrix.msc3069.is_guest": bool(requester.is_guest),
|
||||
"is_guest": bool(requester.is_guest),
|
||||
}
|
||||
|
||||
|
||||
@@ -69,9 +69,7 @@ class LoginRestServlet(RestServlet):
|
||||
SSO_TYPE = "m.login.sso"
|
||||
TOKEN_TYPE = "m.login.token"
|
||||
JWT_TYPE = "org.matrix.login.jwt"
|
||||
JWT_TYPE_DEPRECATED = "m.login.jwt"
|
||||
APPSERVICE_TYPE = "m.login.application_service"
|
||||
APPSERVICE_TYPE_UNSTABLE = "uk.half-shot.msc2778.login.application_service"
|
||||
REFRESH_TOKEN_PARAM = "refresh_token"
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@@ -126,7 +124,6 @@ class LoginRestServlet(RestServlet):
|
||||
flows: List[JsonDict] = []
|
||||
if self.jwt_enabled:
|
||||
flows.append({"type": LoginRestServlet.JWT_TYPE})
|
||||
flows.append({"type": LoginRestServlet.JWT_TYPE_DEPRECATED})
|
||||
|
||||
if self.cas_enabled:
|
||||
# we advertise CAS for backwards compat, though MSC1721 renamed it
|
||||
@@ -156,7 +153,6 @@ class LoginRestServlet(RestServlet):
|
||||
flows.extend({"type": t} for t in self.auth_handler.get_supported_login_types())
|
||||
|
||||
flows.append({"type": LoginRestServlet.APPSERVICE_TYPE})
|
||||
flows.append({"type": LoginRestServlet.APPSERVICE_TYPE_UNSTABLE})
|
||||
|
||||
return 200, {"flows": flows}
|
||||
|
||||
@@ -175,10 +171,7 @@ class LoginRestServlet(RestServlet):
|
||||
)
|
||||
|
||||
try:
|
||||
if login_submission["type"] in (
|
||||
LoginRestServlet.APPSERVICE_TYPE,
|
||||
LoginRestServlet.APPSERVICE_TYPE_UNSTABLE,
|
||||
):
|
||||
if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE:
|
||||
appservice = self.auth.get_appservice_by_req(request)
|
||||
|
||||
if appservice.is_rate_limited():
|
||||
@@ -191,9 +184,9 @@ class LoginRestServlet(RestServlet):
|
||||
appservice,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
elif self.jwt_enabled and (
|
||||
login_submission["type"] == LoginRestServlet.JWT_TYPE
|
||||
or login_submission["type"] == LoginRestServlet.JWT_TYPE_DEPRECATED
|
||||
elif (
|
||||
self.jwt_enabled
|
||||
and login_submission["type"] == LoginRestServlet.JWT_TYPE
|
||||
):
|
||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
||||
result = await self._do_jwt_login(
|
||||
|
||||
@@ -800,7 +800,7 @@ class StateResolutionStore:
|
||||
|
||||
return self.store.get_events(
|
||||
event_ids,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
get_prev_content=False,
|
||||
allow_rejected=allow_rejected,
|
||||
)
|
||||
|
||||
@@ -47,6 +47,7 @@ from synapse.storage.database import (
|
||||
)
|
||||
from synapse.storage.databases.main.events_worker import EventCacheEntry
|
||||
from synapse.storage.databases.main.search import SearchEntry
|
||||
from synapse.storage.engines.postgres import PostgresEngine
|
||||
from synapse.storage.util.id_generators import AbstractStreamIdGenerator
|
||||
from synapse.storage.util.sequence import SequenceGenerator
|
||||
from synapse.types import StateMap, get_domain_from_id
|
||||
@@ -364,6 +365,20 @@ class PersistEventsStore:
|
||||
min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering
|
||||
max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering
|
||||
|
||||
# We check that the room still exists for events we're trying to
|
||||
# persist. This is to protect against races with deleting a room.
|
||||
#
|
||||
# Annoyingly SQLite doesn't support row level locking.
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
for room_id in {e.room_id for e, _ in events_and_contexts}:
|
||||
txn.execute(
|
||||
"SELECT room_version FROM rooms WHERE room_id = ? FOR SHARE",
|
||||
(room_id,),
|
||||
)
|
||||
row = txn.fetchone()
|
||||
if row is None:
|
||||
raise Exception(f"Room does not exist {room_id}")
|
||||
|
||||
# stream orderings should have been assigned by now
|
||||
assert min_stream_order
|
||||
assert max_stream_order
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
import logging
|
||||
import threading
|
||||
from enum import Enum, auto
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
@@ -30,7 +31,6 @@ from typing import (
|
||||
)
|
||||
|
||||
import attr
|
||||
from constantly import NamedConstant, Names
|
||||
from prometheus_client import Gauge
|
||||
from typing_extensions import Literal
|
||||
|
||||
@@ -150,14 +150,14 @@ class _EventRow:
|
||||
outlier: bool
|
||||
|
||||
|
||||
class EventRedactBehaviour(Names):
|
||||
class EventRedactBehaviour(Enum):
|
||||
"""
|
||||
What to do when retrieving a redacted event from the database.
|
||||
"""
|
||||
|
||||
AS_IS = NamedConstant()
|
||||
REDACT = NamedConstant()
|
||||
BLOCK = NamedConstant()
|
||||
as_is = auto()
|
||||
redact = auto()
|
||||
block = auto()
|
||||
|
||||
|
||||
class EventsWorkerStore(SQLBaseStore):
|
||||
@@ -327,7 +327,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
async def get_event(
|
||||
self,
|
||||
event_id: str,
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact,
|
||||
get_prev_content: bool = ...,
|
||||
allow_rejected: bool = ...,
|
||||
allow_none: Literal[False] = ...,
|
||||
@@ -339,7 +339,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
async def get_event(
|
||||
self,
|
||||
event_id: str,
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact,
|
||||
get_prev_content: bool = ...,
|
||||
allow_rejected: bool = ...,
|
||||
allow_none: Literal[True] = ...,
|
||||
@@ -350,7 +350,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
async def get_event(
|
||||
self,
|
||||
event_id: str,
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact,
|
||||
get_prev_content: bool = False,
|
||||
allow_rejected: bool = False,
|
||||
allow_none: bool = False,
|
||||
@@ -362,9 +362,9 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
event_id: The event_id of the event to fetch
|
||||
|
||||
redact_behaviour: Determine what to do with a redacted event. Possible values:
|
||||
* AS_IS - Return the full event body with no redacted content
|
||||
* REDACT - Return the event but with a redacted body
|
||||
* DISALLOW - Do not return redacted events (behave as per allow_none
|
||||
* as_is - Return the full event body with no redacted content
|
||||
* redact - Return the event but with a redacted body
|
||||
* block - Do not return redacted events (behave as per allow_none
|
||||
if the event is redacted)
|
||||
|
||||
get_prev_content: If True and event is a state event,
|
||||
@@ -406,7 +406,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
async def get_events(
|
||||
self,
|
||||
event_ids: Collection[str],
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact,
|
||||
get_prev_content: bool = False,
|
||||
allow_rejected: bool = False,
|
||||
) -> Dict[str, EventBase]:
|
||||
@@ -417,9 +417,9 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
|
||||
redact_behaviour: Determine what to do with a redacted event. Possible
|
||||
values:
|
||||
* AS_IS - Return the full event body with no redacted content
|
||||
* REDACT - Return the event but with a redacted body
|
||||
* DISALLOW - Do not return redacted events (omit them from the response)
|
||||
* as_is - Return the full event body with no redacted content
|
||||
* redact - Return the event but with a redacted body
|
||||
* block - Do not return redacted events (omit them from the response)
|
||||
|
||||
get_prev_content: If True and event is a state event,
|
||||
include the previous states content in the unsigned field.
|
||||
@@ -442,7 +442,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
async def get_events_as_list(
|
||||
self,
|
||||
event_ids: Collection[str],
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
|
||||
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact,
|
||||
get_prev_content: bool = False,
|
||||
allow_rejected: bool = False,
|
||||
) -> List[EventBase]:
|
||||
@@ -455,9 +455,9 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
event_ids: The event_ids of the events to fetch
|
||||
|
||||
redact_behaviour: Determine what to do with a redacted event. Possible values:
|
||||
* AS_IS - Return the full event body with no redacted content
|
||||
* REDACT - Return the event but with a redacted body
|
||||
* DISALLOW - Do not return redacted events (omit them from the response)
|
||||
* as_is - Return the full event body with no redacted content
|
||||
* redact - Return the event but with a redacted body
|
||||
* block - Do not return redacted events (omit them from the response)
|
||||
|
||||
get_prev_content: If True and event is a state event,
|
||||
include the previous states content in the unsigned field.
|
||||
@@ -568,10 +568,10 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
event = entry.event
|
||||
|
||||
if entry.redacted_event:
|
||||
if redact_behaviour == EventRedactBehaviour.BLOCK:
|
||||
if redact_behaviour == EventRedactBehaviour.block:
|
||||
# Skip this event
|
||||
continue
|
||||
elif redact_behaviour == EventRedactBehaviour.REDACT:
|
||||
elif redact_behaviour == EventRedactBehaviour.redact:
|
||||
event = entry.redacted_event
|
||||
|
||||
events.append(event)
|
||||
@@ -1094,6 +1094,18 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
original_ev.internal_metadata.stream_ordering = row.stream_ordering
|
||||
original_ev.internal_metadata.outlier = row.outlier
|
||||
|
||||
# Consistency check: if the content of the event has been modified in the
|
||||
# database, then the calculated event ID will not match the event id in the
|
||||
# database.
|
||||
if original_ev.event_id != event_id:
|
||||
# it's difficult to see what to do here. Pretty much all bets are off
|
||||
# if Synapse cannot rely on the consistency of its database.
|
||||
raise RuntimeError(
|
||||
f"Database corruption: Event {event_id} in room {d['room_id']} "
|
||||
f"from the database appears to have been modified (calculated "
|
||||
f"event id {original_ev.event_id})"
|
||||
)
|
||||
|
||||
event_map[event_id] = original_ev
|
||||
|
||||
# finally, we can decide whether each one needs redacting, and build
|
||||
|
||||
@@ -324,7 +324,12 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
)
|
||||
|
||||
def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]:
|
||||
# First we fetch all the state groups that should be deleted, before
|
||||
# We *immediately* delete the room from the rooms table. This ensures
|
||||
# that we don't race when persisting events (as that transaction checks
|
||||
# that the room exists).
|
||||
txn.execute("DELETE FROM rooms WHERE room_id = ?", (room_id,))
|
||||
|
||||
# Next, we fetch all the state groups that should be deleted, before
|
||||
# we delete that information.
|
||||
txn.execute(
|
||||
"""
|
||||
@@ -403,7 +408,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
"room_stats_state",
|
||||
"room_stats_current",
|
||||
"room_stats_earliest_token",
|
||||
"rooms",
|
||||
"stream_ordering_to_exterm",
|
||||
"users_in_public_rooms",
|
||||
"users_who_share_private_rooms",
|
||||
|
||||
@@ -122,10 +122,21 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
receipts = await self.get_receipts_for_room(room_id, ReceiptTypes.READ)
|
||||
return {r["user_id"] for r in receipts}
|
||||
|
||||
@cached(num_args=2)
|
||||
@cached()
|
||||
async def get_receipts_for_room(
|
||||
self, room_id: str, receipt_type: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Fetch the event IDs for the latest receipt for all users in a room with the given receipt type.
|
||||
|
||||
Args:
|
||||
room_id: The room ID to fetch the receipt for.
|
||||
receipt_type: The receipt type to fetch.
|
||||
|
||||
Returns:
|
||||
A list of dictionaries, one for each user ID. Each dictionary
|
||||
contains a user ID and the event ID of that user's latest receipt.
|
||||
"""
|
||||
return await self.db_pool.simple_select_list(
|
||||
table="receipts_linearized",
|
||||
keyvalues={"room_id": room_id, "receipt_type": receipt_type},
|
||||
@@ -133,10 +144,21 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
desc="get_receipts_for_room",
|
||||
)
|
||||
|
||||
@cached(num_args=3)
|
||||
@cached()
|
||||
async def get_last_receipt_event_id_for_user(
|
||||
self, user_id: str, room_id: str, receipt_type: str
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Fetch the event ID for the latest receipt in a room with the given receipt type.
|
||||
|
||||
Args:
|
||||
user_id: The user to fetch receipts for.
|
||||
room_id: The room ID to fetch the receipt for.
|
||||
receipt_type: The receipt type to fetch.
|
||||
|
||||
Returns:
|
||||
The event ID of the latest receipt, if one exists; otherwise `None`.
|
||||
"""
|
||||
return await self.db_pool.simple_select_one_onecol(
|
||||
table="receipts_linearized",
|
||||
keyvalues={
|
||||
@@ -149,10 +171,23 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
@cached(num_args=2)
|
||||
@cached()
|
||||
async def get_receipts_for_user(
|
||||
self, user_id: str, receipt_type: str
|
||||
) -> Dict[str, str]:
|
||||
"""
|
||||
Fetch the event IDs for the latest receipts sent by the given user.
|
||||
|
||||
Args:
|
||||
user_id: The user to fetch receipts for.
|
||||
receipt_type: The receipt type to fetch.
|
||||
|
||||
Returns:
|
||||
A map of room ID to the event ID of the latest receipt for that room.
|
||||
|
||||
If the user has not sent a receipt to a room then it will not appear
|
||||
in the returned dictionary.
|
||||
"""
|
||||
rows = await self.db_pool.simple_select_list(
|
||||
table="receipts_linearized",
|
||||
keyvalues={"user_id": user_id, "receipt_type": receipt_type},
|
||||
@@ -165,6 +200,17 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
async def get_receipts_for_user_with_orderings(
|
||||
self, user_id: str, receipt_type: str
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Fetch receipts for all rooms that the given user is joined to.
|
||||
|
||||
Args:
|
||||
user_id: The user to fetch receipts for.
|
||||
receipt_type: The receipt type to fetch.
|
||||
|
||||
Returns:
|
||||
A map of room ID to the latest receipt information.
|
||||
"""
|
||||
|
||||
def f(txn: LoggingTransaction) -> List[Tuple[str, str, int, int]]:
|
||||
sql = (
|
||||
"SELECT rl.room_id, rl.event_id,"
|
||||
@@ -241,7 +287,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
|
||||
return await self._get_linearized_receipts_for_room(room_id, to_key, from_key)
|
||||
|
||||
@cached(num_args=3, tree=True)
|
||||
@cached(tree=True)
|
||||
async def _get_linearized_receipts_for_room(
|
||||
self, room_id: str, to_key: int, from_key: Optional[int] = None
|
||||
) -> List[JsonDict]:
|
||||
@@ -541,7 +587,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
data: JsonDict,
|
||||
stream_id: int,
|
||||
) -> Optional[int]:
|
||||
"""Inserts a read-receipt into the database if it's newer than the current RR
|
||||
"""Inserts a receipt into the database if it's newer than the current one.
|
||||
|
||||
Returns:
|
||||
None if the RR is older than the current RR
|
||||
|
||||
@@ -445,8 +445,8 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
@cachedList(cached_method_name="get_thread_summary", list_name="event_ids")
|
||||
async def get_thread_summaries(
|
||||
self, event_ids: Collection[str]
|
||||
) -> Dict[str, Optional[Tuple[int, EventBase, Optional[EventBase]]]]:
|
||||
"""Get the number of threaded replies, the latest reply (if any), and the latest edit for that reply for the given event.
|
||||
) -> Dict[str, Optional[Tuple[int, EventBase]]]:
|
||||
"""Get the number of threaded replies and the latest reply (if any) for the given events.
|
||||
|
||||
Args:
|
||||
event_ids: Summarize the thread related to this event ID.
|
||||
@@ -458,7 +458,6 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
Each summary is a tuple of:
|
||||
The number of events in the thread.
|
||||
The most recent event in the thread.
|
||||
The most recent edit to the most recent event in the thread, if applicable.
|
||||
"""
|
||||
|
||||
def _get_thread_summaries_txn(
|
||||
@@ -544,9 +543,6 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
|
||||
latest_events = await self.get_events(latest_event_ids.values()) # type: ignore[attr-defined]
|
||||
|
||||
# Check to see if any of those events are edited.
|
||||
latest_edits = await self.get_applicable_edits(latest_event_ids.values())
|
||||
|
||||
# Map to the event IDs to the thread summary.
|
||||
#
|
||||
# There might not be a summary due to there not being a thread or
|
||||
@@ -557,8 +553,7 @@ class RelationsWorkerStore(SQLBaseStore):
|
||||
|
||||
summary = None
|
||||
if latest_event:
|
||||
latest_edit = latest_edits.get(latest_event_id)
|
||||
summary = (counts[parent_event_id], latest_event, latest_edit)
|
||||
summary = (counts[parent_event_id], latest_event)
|
||||
summaries[parent_event_id] = summary
|
||||
|
||||
return summaries
|
||||
|
||||
@@ -494,11 +494,11 @@ class SearchStore(SearchBackgroundUpdateStore):
|
||||
|
||||
results = list(filter(lambda row: row["room_id"] in room_ids, results))
|
||||
|
||||
# We set redact_behaviour to BLOCK here to prevent redacted events being returned in
|
||||
# We set redact_behaviour to block here to prevent redacted events being returned in
|
||||
# search results (which is a data leak)
|
||||
events = await self.get_events_as_list( # type: ignore[attr-defined]
|
||||
[r["event_id"] for r in results],
|
||||
redact_behaviour=EventRedactBehaviour.BLOCK,
|
||||
redact_behaviour=EventRedactBehaviour.block,
|
||||
)
|
||||
|
||||
event_map = {ev.event_id: ev for ev in events}
|
||||
@@ -652,11 +652,11 @@ class SearchStore(SearchBackgroundUpdateStore):
|
||||
|
||||
results = list(filter(lambda row: row["room_id"] in room_ids, results))
|
||||
|
||||
# We set redact_behaviour to BLOCK here to prevent redacted events being returned in
|
||||
# We set redact_behaviour to block here to prevent redacted events being returned in
|
||||
# search results (which is a data leak)
|
||||
events = await self.get_events_as_list( # type: ignore[attr-defined]
|
||||
[r["event_id"] for r in results],
|
||||
redact_behaviour=EventRedactBehaviour.BLOCK,
|
||||
redact_behaviour=EventRedactBehaviour.block,
|
||||
)
|
||||
|
||||
event_map = {ev.event_id: ev for ev in events}
|
||||
|
||||
@@ -48,7 +48,7 @@ class SignatureWorkerStore(EventsWorkerStore):
|
||||
"""
|
||||
events = await self.get_events(
|
||||
event_ids,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
allow_rejected=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -943,7 +943,7 @@ class EventsPersistenceStorage:
|
||||
dropped_events = await self.main_store.get_events(
|
||||
dropped_extrems,
|
||||
allow_rejected=True,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
)
|
||||
|
||||
new_senders = {get_domain_from_id(e.sender) for e, _ in events_context}
|
||||
@@ -974,7 +974,7 @@ class EventsPersistenceStorage:
|
||||
prev_events = await self.main_store.get_events(
|
||||
new_events,
|
||||
allow_rejected=True,
|
||||
redact_behaviour=EventRedactBehaviour.AS_IS,
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
)
|
||||
events_to_check = prev_events.values()
|
||||
|
||||
|
||||
@@ -201,4 +201,16 @@ class CasHandlerTestCase(HomeserverTestCase):
|
||||
|
||||
def _mock_request():
|
||||
"""Returns a mock which will stand in as a SynapseRequest"""
|
||||
return Mock(spec=["getClientIP", "getHeader", "_disconnected"])
|
||||
mock = Mock(
|
||||
spec=[
|
||||
"finish",
|
||||
"getClientIP",
|
||||
"getHeader",
|
||||
"setHeader",
|
||||
"setResponseCode",
|
||||
"write",
|
||||
]
|
||||
)
|
||||
# `_disconnected` musn't be another `Mock`, otherwise it will be truthy.
|
||||
mock._disconnected = False
|
||||
return mock
|
||||
|
||||
@@ -30,11 +30,9 @@ from tests.server import FakeChannel
|
||||
from tests.test_utils import make_awaitable
|
||||
from tests.unittest import override_config
|
||||
|
||||
# (possibly experimental) login flows we expect to appear in the list after the normal
|
||||
# ones
|
||||
# Login flows we expect to appear in the list after the normal ones.
|
||||
ADDITIONAL_LOGIN_FLOWS = [
|
||||
{"type": "m.login.application_service"},
|
||||
{"type": "uk.half-shot.msc2778.login.application_service"},
|
||||
]
|
||||
|
||||
# a mock instance which the dummy auth providers delegate to, so we can see what's going
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
from typing import List
|
||||
|
||||
from synapse.api.constants import ReadReceiptEventFields
|
||||
from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from tests import unittest
|
||||
@@ -35,7 +35,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$1435641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@rikj:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
"hidden": True,
|
||||
@@ -56,7 +56,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$1435641916hfgh4394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@me:server.org": {
|
||||
"ts": 1436451550453,
|
||||
"hidden": True,
|
||||
@@ -72,7 +72,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$1435641916hfgh4394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@me:server.org": {
|
||||
"ts": 1436451550453,
|
||||
ReadReceiptEventFields.MSC2285_HIDDEN: True,
|
||||
@@ -92,7 +92,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$1dgdgrd5641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@rikj:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
"hidden": True,
|
||||
@@ -111,7 +111,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$1dgdgrd5641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@user:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
}
|
||||
@@ -130,7 +130,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$14356419edgd14394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@rikj:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
"hidden": True,
|
||||
@@ -138,7 +138,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
}
|
||||
},
|
||||
"$1435641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@user:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
}
|
||||
@@ -153,7 +153,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$1435641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@user:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
}
|
||||
@@ -171,9 +171,9 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
[
|
||||
{
|
||||
"content": {
|
||||
"$14356419ggffg114394fHBLK:matrix.org": {"m.read": {}},
|
||||
"$14356419ggffg114394fHBLK:matrix.org": {ReceiptTypes.READ: {}},
|
||||
"$1435641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@user:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
}
|
||||
@@ -187,9 +187,9 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
[
|
||||
{
|
||||
"content": {
|
||||
"$14356419ggffg114394fHBLK:matrix.org": {"m.read": {}},
|
||||
"$14356419ggffg114394fHBLK:matrix.org": {ReceiptTypes.READ: {}},
|
||||
"$1435641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@user:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
}
|
||||
@@ -209,7 +209,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
"content": {
|
||||
"$143564gdfg6114394fHBLK:matrix.org": {},
|
||||
"$1435641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@user:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
}
|
||||
@@ -225,7 +225,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
"content": {
|
||||
"$143564gdfg6114394fHBLK:matrix.org": {},
|
||||
"$1435641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@user:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
}
|
||||
@@ -244,7 +244,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$14356419edgd14394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@rikj:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
"hidden": True,
|
||||
@@ -258,7 +258,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$1435641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@user:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
}
|
||||
@@ -273,7 +273,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$1435641916114394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@user:jki.re": {
|
||||
"ts": 1436451550453,
|
||||
}
|
||||
@@ -297,7 +297,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"content": {
|
||||
"$14356419edgd14394fHBLK:matrix.org": {
|
||||
"m.read": {
|
||||
ReceiptTypes.READ: {
|
||||
"@rikj:jki.re": "string",
|
||||
}
|
||||
},
|
||||
|
||||
@@ -349,4 +349,16 @@ class SamlHandlerTestCase(HomeserverTestCase):
|
||||
|
||||
def _mock_request():
|
||||
"""Returns a mock which will stand in as a SynapseRequest"""
|
||||
return Mock(spec=["getClientIP", "getHeader", "_disconnected"])
|
||||
mock = Mock(
|
||||
spec=[
|
||||
"finish",
|
||||
"getClientIP",
|
||||
"getHeader",
|
||||
"setHeader",
|
||||
"setResponseCode",
|
||||
"write",
|
||||
]
|
||||
)
|
||||
# `_disconnected` musn't be another `Mock`, otherwise it will be truthy.
|
||||
mock._disconnected = False
|
||||
return mock
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api.constants import ReceiptTypes
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
|
||||
from ._base import BaseSlavedStoreTestCase
|
||||
@@ -26,9 +27,13 @@ class SlavedReceiptTestCase(BaseSlavedStoreTestCase):
|
||||
STORE_TYPE = SlavedReceiptsStore
|
||||
|
||||
def test_receipt(self):
|
||||
self.check("get_receipts_for_user", [USER_ID, "m.read"], {})
|
||||
self.check("get_receipts_for_user", [USER_ID, ReceiptTypes.READ], {})
|
||||
self.get_success(
|
||||
self.master_store.insert_receipt(ROOM_ID, "m.read", USER_ID, [EVENT_ID], {})
|
||||
self.master_store.insert_receipt(
|
||||
ROOM_ID, ReceiptTypes.READ, USER_ID, [EVENT_ID], {}
|
||||
)
|
||||
)
|
||||
self.replicate()
|
||||
self.check("get_receipts_for_user", [USER_ID, "m.read"], {ROOM_ID: EVENT_ID})
|
||||
self.check(
|
||||
"get_receipts_for_user", [USER_ID, ReceiptTypes.READ], {ROOM_ID: EVENT_ID}
|
||||
)
|
||||
|
||||
@@ -520,8 +520,6 @@ class WhoamiTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
# MSC3069 entered spec in Matrix 1.2 but maintained compatibility
|
||||
"org.matrix.msc3069.is_guest": False,
|
||||
"is_guest": False,
|
||||
},
|
||||
)
|
||||
@@ -540,8 +538,6 @@ class WhoamiTestCase(unittest.HomeserverTestCase):
|
||||
{
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
# MSC3069 entered spec in Matrix 1.2 but maintained compatibility
|
||||
"org.matrix.msc3069.is_guest": True,
|
||||
"is_guest": True,
|
||||
},
|
||||
)
|
||||
@@ -564,8 +560,6 @@ class WhoamiTestCase(unittest.HomeserverTestCase):
|
||||
whoami,
|
||||
{
|
||||
"user_id": user_id,
|
||||
# MSC3069 entered spec in Matrix 1.2 but maintained compatibility
|
||||
"org.matrix.msc3069.is_guest": False,
|
||||
"is_guest": False,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -81,11 +81,9 @@ TEST_CLIENT_REDIRECT_URL = 'https://x?<ab c>&q"+%3D%2B"="fö%26=o"'
|
||||
# the query params in TEST_CLIENT_REDIRECT_URL
|
||||
EXPECTED_CLIENT_REDIRECT_URL_PARAMS = [("<ab c>", ""), ('q" =+"', '"fö&=o"')]
|
||||
|
||||
# (possibly experimental) login flows we expect to appear in the list after the normal
|
||||
# ones
|
||||
# Login flows we expect to appear in the list after the normal ones.
|
||||
ADDITIONAL_LOGIN_FLOWS = [
|
||||
{"type": "m.login.application_service"},
|
||||
{"type": "uk.half-shot.msc2778.login.application_service"},
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -1029,7 +1029,106 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
|
||||
bundled_aggregations.get("latest_event"),
|
||||
)
|
||||
|
||||
self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 9)
|
||||
self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 10)
|
||||
|
||||
def test_thread_with_bundled_aggregations_for_latest(self) -> None:
|
||||
"""
|
||||
Bundled aggregations should get applied to the latest thread event.
|
||||
"""
|
||||
self._send_relation(RelationTypes.THREAD, "m.room.test")
|
||||
channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
|
||||
thread_2 = channel.json_body["event_id"]
|
||||
|
||||
self._send_relation(
|
||||
RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_2
|
||||
)
|
||||
|
||||
def assert_thread(bundled_aggregations: JsonDict) -> None:
|
||||
self.assertEqual(2, bundled_aggregations.get("count"))
|
||||
self.assertTrue(bundled_aggregations.get("current_user_participated"))
|
||||
# The latest thread event has some fields that don't matter.
|
||||
self.assert_dict(
|
||||
{
|
||||
"content": {
|
||||
"m.relates_to": {
|
||||
"event_id": self.parent_id,
|
||||
"rel_type": RelationTypes.THREAD,
|
||||
}
|
||||
},
|
||||
"event_id": thread_2,
|
||||
"sender": self.user_id,
|
||||
"type": "m.room.test",
|
||||
},
|
||||
bundled_aggregations.get("latest_event"),
|
||||
)
|
||||
# Check the unsigned field on the latest event.
|
||||
self.assert_dict(
|
||||
{
|
||||
"m.relations": {
|
||||
RelationTypes.ANNOTATION: {
|
||||
"chunk": [
|
||||
{"type": "m.reaction", "key": "a", "count": 1},
|
||||
]
|
||||
},
|
||||
}
|
||||
},
|
||||
bundled_aggregations["latest_event"].get("unsigned"),
|
||||
)
|
||||
|
||||
self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 10)
|
||||
|
||||
def test_nested_thread(self) -> None:
|
||||
"""
|
||||
Ensure that a nested thread gets ignored by bundled aggregations, as
|
||||
those are forbidden.
|
||||
"""
|
||||
|
||||
# Start a thread.
|
||||
channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
|
||||
reply_event_id = channel.json_body["event_id"]
|
||||
|
||||
# Disable the validation to pretend this came over federation, since it is
|
||||
# not an event the Client-Server API will allow..
|
||||
with patch(
|
||||
"synapse.handlers.message.EventCreationHandler._validate_event_relation",
|
||||
new=lambda self, event: make_awaitable(None),
|
||||
):
|
||||
# Create a sub-thread off the thread, which is not allowed.
|
||||
self._send_relation(
|
||||
RelationTypes.THREAD, "m.room.test", parent_id=reply_event_id
|
||||
)
|
||||
|
||||
# Fetch the thread root, to get the bundled aggregation for the thread.
|
||||
relations_from_event = self._get_bundled_aggregations()
|
||||
|
||||
# Ensure that requesting the room messages also does not return the sub-thread.
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
f"/rooms/{self.room}/messages?dir=b",
|
||||
access_token=self.user_token,
|
||||
)
|
||||
self.assertEqual(200, channel.code, channel.json_body)
|
||||
event = self._find_event_in_chunk(channel.json_body["chunk"])
|
||||
relations_from_messages = event["unsigned"]["m.relations"]
|
||||
|
||||
# Check the bundled aggregations from each point.
|
||||
for aggregations, desc in (
|
||||
(relations_from_event, "/event"),
|
||||
(relations_from_messages, "/messages"),
|
||||
):
|
||||
# The latest event should have bundled aggregations.
|
||||
self.assertIn(RelationTypes.THREAD, aggregations, desc)
|
||||
thread_summary = aggregations[RelationTypes.THREAD]
|
||||
self.assertIn("latest_event", thread_summary, desc)
|
||||
self.assertEqual(
|
||||
thread_summary["latest_event"]["event_id"], reply_event_id, desc
|
||||
)
|
||||
|
||||
# The latest event should not have any bundled aggregations (since the
|
||||
# only relation to it is another thread, which is invalid).
|
||||
self.assertNotIn(
|
||||
"m.relations", thread_summary["latest_event"]["unsigned"], desc
|
||||
)
|
||||
|
||||
def test_thread_edit_latest_event(self) -> None:
|
||||
"""Test that editing the latest event in a thread works."""
|
||||
@@ -1049,6 +1148,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
|
||||
content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body},
|
||||
parent_id=threaded_event_id,
|
||||
)
|
||||
edit_event_id = channel.json_body["event_id"]
|
||||
|
||||
# Fetch the thread root, to get the bundled aggregation for the thread.
|
||||
relations_dict = self._get_bundled_aggregations()
|
||||
@@ -1061,6 +1161,12 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
|
||||
self.assertIn("latest_event", thread_summary)
|
||||
latest_event_in_thread = thread_summary["latest_event"]
|
||||
self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!")
|
||||
# The latest event in the thread should have the edit appear under the
|
||||
# bundled aggregations.
|
||||
self.assertDictContainsSubset(
|
||||
{"event_id": edit_event_id, "sender": "@alice:test"},
|
||||
latest_event_in_thread["unsigned"]["m.relations"][RelationTypes.REPLACE],
|
||||
)
|
||||
|
||||
def test_aggregation_get_event_for_annotation(self) -> None:
|
||||
"""Test that annotations do not get bundled aggregations included
|
||||
|
||||
@@ -24,6 +24,7 @@ from synapse.api.constants import (
|
||||
EventContentFields,
|
||||
EventTypes,
|
||||
ReadReceiptEventFields,
|
||||
ReceiptTypes,
|
||||
RelationTypes,
|
||||
)
|
||||
from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync
|
||||
@@ -560,7 +561,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase):
|
||||
self._check_unread_count(1)
|
||||
|
||||
# Send a read receipt to tell the server we've read the latest event.
|
||||
body = json.dumps({"m.read": res["event_id"]}).encode("utf8")
|
||||
body = json.dumps({ReceiptTypes.READ: res["event_id"]}).encode("utf8")
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/rooms/%s/read_markers" % self.room_id,
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
import json
|
||||
from contextlib import contextmanager
|
||||
from typing import Generator, Tuple
|
||||
from typing import Generator, List, Tuple
|
||||
from unittest import mock
|
||||
|
||||
from twisted.enterprise.adbapi import ConnectionPool
|
||||
@@ -21,6 +21,7 @@ from twisted.internet.defer import CancelledError, Deferred, ensureDeferred
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.api.room_versions import EventFormatVersions, RoomVersions
|
||||
from synapse.events import make_event_from_dict
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.rest import admin
|
||||
from synapse.rest.client import login, room
|
||||
@@ -49,23 +50,28 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
)
|
||||
|
||||
for idx, (rid, eid) in enumerate(
|
||||
self.event_ids: List[str] = []
|
||||
for idx, rid in enumerate(
|
||||
(
|
||||
("room1", "event10"),
|
||||
("room1", "event11"),
|
||||
("room1", "event12"),
|
||||
("room2", "event20"),
|
||||
"room1",
|
||||
"room1",
|
||||
"room1",
|
||||
"room2",
|
||||
)
|
||||
):
|
||||
event_json = {"type": f"test {idx}", "room_id": rid}
|
||||
event = make_event_from_dict(event_json, room_version=RoomVersions.V4)
|
||||
event_id = event.event_id
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
"events",
|
||||
{
|
||||
"event_id": eid,
|
||||
"event_id": event_id,
|
||||
"room_id": rid,
|
||||
"topological_ordering": idx,
|
||||
"stream_ordering": idx,
|
||||
"type": "test",
|
||||
"type": event.type,
|
||||
"processed": True,
|
||||
"outlier": False,
|
||||
},
|
||||
@@ -75,21 +81,22 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
|
||||
self.store.db_pool.simple_insert(
|
||||
"event_json",
|
||||
{
|
||||
"event_id": eid,
|
||||
"event_id": event_id,
|
||||
"room_id": rid,
|
||||
"json": json.dumps({"type": "test", "room_id": rid}),
|
||||
"json": json.dumps(event_json),
|
||||
"internal_metadata": "{}",
|
||||
"format_version": 3,
|
||||
},
|
||||
)
|
||||
)
|
||||
self.event_ids.append(event_id)
|
||||
|
||||
def test_simple(self):
|
||||
with LoggingContext(name="test") as ctx:
|
||||
res = self.get_success(
|
||||
self.store.have_seen_events("room1", ["event10", "event19"])
|
||||
self.store.have_seen_events("room1", [self.event_ids[0], "event19"])
|
||||
)
|
||||
self.assertEqual(res, {"event10"})
|
||||
self.assertEqual(res, {self.event_ids[0]})
|
||||
|
||||
# that should result in a single db query
|
||||
self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
|
||||
@@ -97,19 +104,21 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
|
||||
# a second lookup of the same events should cause no queries
|
||||
with LoggingContext(name="test") as ctx:
|
||||
res = self.get_success(
|
||||
self.store.have_seen_events("room1", ["event10", "event19"])
|
||||
self.store.have_seen_events("room1", [self.event_ids[0], "event19"])
|
||||
)
|
||||
self.assertEqual(res, {"event10"})
|
||||
self.assertEqual(res, {self.event_ids[0]})
|
||||
self.assertEqual(ctx.get_resource_usage().db_txn_count, 0)
|
||||
|
||||
def test_query_via_event_cache(self):
|
||||
# fetch an event into the event cache
|
||||
self.get_success(self.store.get_event("event10"))
|
||||
self.get_success(self.store.get_event(self.event_ids[0]))
|
||||
|
||||
# looking it up should now cause no db hits
|
||||
with LoggingContext(name="test") as ctx:
|
||||
res = self.get_success(self.store.have_seen_events("room1", ["event10"]))
|
||||
self.assertEqual(res, {"event10"})
|
||||
res = self.get_success(
|
||||
self.store.have_seen_events("room1", [self.event_ids[0]])
|
||||
)
|
||||
self.assertEqual(res, {self.event_ids[0]})
|
||||
self.assertEqual(ctx.get_resource_usage().db_txn_count, 0)
|
||||
|
||||
|
||||
@@ -167,7 +176,6 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase):
|
||||
self.store: EventsWorkerStore = hs.get_datastores().main
|
||||
|
||||
self.room_id = f"!room:{hs.hostname}"
|
||||
self.event_ids = [f"event{i}" for i in range(20)]
|
||||
|
||||
self._populate_events()
|
||||
|
||||
@@ -190,8 +198,14 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase):
|
||||
)
|
||||
)
|
||||
|
||||
self.event_ids = [f"event{i}" for i in range(20)]
|
||||
for idx, event_id in enumerate(self.event_ids):
|
||||
self.event_ids: List[str] = []
|
||||
for idx in range(20):
|
||||
event_json = {
|
||||
"type": f"test {idx}",
|
||||
"room_id": self.room_id,
|
||||
}
|
||||
event = make_event_from_dict(event_json, room_version=RoomVersions.V4)
|
||||
event_id = event.event_id
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_upsert(
|
||||
"events",
|
||||
@@ -201,7 +215,7 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase):
|
||||
"room_id": self.room_id,
|
||||
"topological_ordering": idx,
|
||||
"stream_ordering": idx,
|
||||
"type": "test",
|
||||
"type": event.type,
|
||||
"processed": True,
|
||||
"outlier": False,
|
||||
},
|
||||
@@ -213,12 +227,13 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase):
|
||||
{"event_id": event_id},
|
||||
{
|
||||
"room_id": self.room_id,
|
||||
"json": json.dumps({"type": "test", "room_id": self.room_id}),
|
||||
"json": json.dumps(event_json),
|
||||
"internal_metadata": "{}",
|
||||
"format_version": EventFormatVersions.V3,
|
||||
},
|
||||
)
|
||||
)
|
||||
self.event_ids.append(event_id)
|
||||
|
||||
@contextmanager
|
||||
def _outage(self) -> Generator[None, None, None]:
|
||||
|
||||
Reference in New Issue
Block a user