Compare commits
10 Commits
devon/medi
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9a62b2d47a | ||
|
|
5f587dfd38 | ||
|
|
a4ec96ca34 | ||
|
|
02dca7c67a | ||
|
|
dbf5b0be67 | ||
|
|
b2f12d22e4 | ||
|
|
d67e9c5367 | ||
|
|
2b5c6239de | ||
|
|
9b8eebbe4e | ||
|
|
5ced4efe1d |
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
2
.github/workflows/fix_lint.yaml
vendored
2
.github/workflows/fix_lint.yaml
vendored
@@ -44,6 +44,6 @@ jobs:
|
||||
- run: cargo fmt
|
||||
continue-on-error: true
|
||||
|
||||
- uses: stefanzweifel/git-auto-commit-action@b863ae1933cb653a53c021fe36dbb774e1fb9403 # v5.2.0
|
||||
- uses: stefanzweifel/git-auto-commit-action@e348103e9026cc0eee72ae06630dbe30c8bf7a79 # v5.1.0
|
||||
with:
|
||||
commit_message: "Attempt to fix linting"
|
||||
|
||||
4
.github/workflows/release-artifacts.yml
vendored
4
.github/workflows/release-artifacts.yml
vendored
@@ -203,7 +203,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
|
||||
- name: Build a tarball for the debs
|
||||
# We need to merge all the debs uploads into one folder, then compress
|
||||
# that.
|
||||
@@ -213,7 +213,7 @@ jobs:
|
||||
tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
# Pinned to work around https://github.com/softprops/action-gh-release/issues/445
|
||||
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v0.1.15
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
2
.github/workflows/triage_labelled.yml
vendored
2
.github/workflows/triage_labelled.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
if: >
|
||||
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
|
||||
steps:
|
||||
- uses: actions/add-to-project@5b1a254a3546aef88e0a7724a77a623fa2e47c36 # main (v1.0.2 + 10 commits)
|
||||
- uses: actions/add-to-project@280af8ae1f83a494cfad2cb10f02f6d13529caa9 # main (v1.0.2 + 10 commits)
|
||||
id: add_project
|
||||
with:
|
||||
project-url: "https://github.com/orgs/matrix-org/projects/67"
|
||||
|
||||
46
CHANGES.md
46
CHANGES.md
@@ -1,3 +1,49 @@
|
||||
# Synapse 1.129.0 (2025-05-06)
|
||||
|
||||
No significant changes since 1.129.0rc2.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.129.0rc2 (2025-04-30)
|
||||
|
||||
Synapse 1.129.0rc1 was never formally released due to regressions discovered during the release process. 1.129.0rc2 fixes those regressions by reverting the affected PRs.
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Revert the slow background update introduced by [\#18068](https://github.com/element-hq/synapse/issues/18068) in v1.128.0. ([\#18372](https://github.com/element-hq/synapse/issues/18372))
|
||||
- Revert "Add total event, unencrypted message, and e2ee event counts to stats reporting", added in v1.129.0rc1. ([\#18373](https://github.com/element-hq/synapse/issues/18373))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.129.0rc1 (2025-04-15)
|
||||
|
||||
### Features
|
||||
|
||||
- Add `passthrough_authorization_parameters` in OIDC configuration to allow passing parameters to the authorization grant URL. ([\#18232](https://github.com/element-hq/synapse/issues/18232))
|
||||
- Add `total_event_count`, `total_message_count`, and `total_e2ee_event_count` fields to the homeserver usage statistics. ([\#18260](https://github.com/element-hq/synapse/issues/18260))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix `force_tracing_for_users` config when using delegated auth. ([\#18334](https://github.com/element-hq/synapse/issues/18334))
|
||||
- Fix the token introspection cache logging access tokens when MAS integration is in use. ([\#18335](https://github.com/element-hq/synapse/issues/18335))
|
||||
- Stop caching introspection failures when delegating auth to MAS. ([\#18339](https://github.com/element-hq/synapse/issues/18339))
|
||||
- Fix `ExternalIDReuse` exception after migrating to MAS on workers with a high traffic. ([\#18342](https://github.com/element-hq/synapse/issues/18342))
|
||||
- Fix minor performance regression caused by tracking of room participation. Regressed in v1.128.0. ([\#18345](https://github.com/element-hq/synapse/issues/18345))
|
||||
|
||||
### Updates to the Docker image
|
||||
|
||||
- Optimize the build of the complement-synapse image. ([\#18294](https://github.com/element-hq/synapse/issues/18294))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Disable statement timeout during room purge. ([\#18133](https://github.com/element-hq/synapse/issues/18133))
|
||||
- Add cache to storage functions used to auth requests when using delegated auth. ([\#18337](https://github.com/element-hq/synapse/issues/18337))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.128.0 (2025-04-08)
|
||||
|
||||
No significant changes since 1.128.0rc1.
|
||||
|
||||
8
Cargo.lock
generated
8
Cargo.lock
generated
@@ -13,9 +13,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.98"
|
||||
version = "1.0.97"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
|
||||
checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
@@ -316,9 +316,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.12.3"
|
||||
version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7079e412e909af5d6be7c04a7f29f6a2837a080410e1c529c9dee2c367383db4"
|
||||
checksum = "4b78e4983ba15bc62833a0e0941d965bc03690163f1127864f1408db25063466"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Disable statement timeout during room purge.
|
||||
@@ -1 +0,0 @@
|
||||
Add `passthrough_authorization_parameters` in OIDC configuration to allow to pass parameters to the authorization grant URL.
|
||||
@@ -1 +0,0 @@
|
||||
Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider.
|
||||
@@ -1 +0,0 @@
|
||||
In configure_workers_and_start.py, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`.
|
||||
@@ -1 +0,0 @@
|
||||
Optimize the build of the workers image.
|
||||
@@ -1 +0,0 @@
|
||||
In start_for_complement.sh, replace some external program calls with shell builtins.
|
||||
@@ -1 +0,0 @@
|
||||
Optimize the build of the complement-synapse image.
|
||||
@@ -1 +0,0 @@
|
||||
When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly.
|
||||
@@ -1 +0,0 @@
|
||||
Fix typo in docs about the `push` config option. Contributed by @HarHarLinks.
|
||||
@@ -1 +0,0 @@
|
||||
Fix `force_tracing_for_users` config when using delegated auth.
|
||||
@@ -1 +0,0 @@
|
||||
Fix the token introspection cache logging access tokens when MAS integration is in use.
|
||||
@@ -1 +0,0 @@
|
||||
Add cache to storage functions used to auth requests when using delegated auth.
|
||||
@@ -1 +0,0 @@
|
||||
Stop caching introspection failures when delegating auth to MAS.
|
||||
@@ -1 +0,0 @@
|
||||
Fix `ExternalIDReuse` exception after migrating to MAS on workers with a high traffic.
|
||||
@@ -1 +0,0 @@
|
||||
Fix minor performance regression caused by tracking of room participation. Regressed in v1.128.0.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for handling `GET /devices/` on workers.
|
||||
@@ -1 +0,0 @@
|
||||
Allow `/rooms/` admin API to be run on workers.
|
||||
@@ -1 +0,0 @@
|
||||
Fix longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers.
|
||||
@@ -1 +0,0 @@
|
||||
Minor performance improvements to the notifier.
|
||||
@@ -1 +0,0 @@
|
||||
Slight performance increase when using the ratelimiter.
|
||||
@@ -1 +0,0 @@
|
||||
Allow client & media admin apis to coexist.
|
||||
18
debian/changelog
vendored
18
debian/changelog
vendored
@@ -1,3 +1,21 @@
|
||||
matrix-synapse-py3 (1.129.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.129.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 06 May 2025 12:22:11 +0100
|
||||
|
||||
matrix-synapse-py3 (1.129.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.129.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 30 Apr 2025 13:13:16 +0000
|
||||
|
||||
matrix-synapse-py3 (1.129.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.129.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Apr 2025 10:47:43 -0600
|
||||
|
||||
matrix-synapse-py3 (1.128.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.128.0.
|
||||
|
||||
@@ -3,37 +3,18 @@
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
ARG PYTHON_VERSION=3.12
|
||||
|
||||
# first of all, we create a base image with dependencies which we can copy into the
|
||||
# first of all, we create a base image with an nginx which we can copy into the
|
||||
# target image. For repeated rebuilds, this is much faster than apt installing
|
||||
# each time.
|
||||
|
||||
FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base
|
||||
|
||||
# Tell apt to keep downloaded package files, as we're using cache mounts.
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
|
||||
FROM docker.io/library/debian:${DEBIAN_VERSION}-slim AS deps_base
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
|
||||
nginx-light
|
||||
|
||||
RUN \
|
||||
# remove default page
|
||||
rm /etc/nginx/sites-enabled/default && \
|
||||
# have nginx log to stderr/out
|
||||
ln -sf /dev/stdout /var/log/nginx/access.log && \
|
||||
ln -sf /dev/stderr /var/log/nginx/error.log
|
||||
|
||||
# --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache
|
||||
# (mounted as --mount=type=cache) and the target directory.
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --link-mode=copy --prefix="/uv/usr/local" supervisor~=4.2
|
||||
|
||||
RUN mkdir -p /uv/etc/supervisor/conf.d
|
||||
redis-server nginx-light
|
||||
|
||||
# Similarly, a base to copy the redis server from.
|
||||
#
|
||||
@@ -46,16 +27,31 @@ FROM docker.io/library/redis:7-${DEBIAN_VERSION} AS redis_base
|
||||
# now build the final image, based on the the regular Synapse docker image
|
||||
FROM $FROM
|
||||
|
||||
# Copy over dependencies
|
||||
# Install supervisord with uv pip instead of apt, to avoid installing a second
|
||||
# copy of python.
|
||||
# --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache
|
||||
# (mounted as --mount=type=cache) and the target directory.
|
||||
RUN \
|
||||
--mount=type=bind,from=ghcr.io/astral-sh/uv:0.6.8,source=/uv,target=/uv \
|
||||
--mount=type=cache,target=/root/.cache/uv \
|
||||
/uv pip install --link-mode=copy --prefix="/usr/local" supervisor~=4.2
|
||||
|
||||
RUN mkdir -p /etc/supervisor/conf.d
|
||||
|
||||
# Copy over redis and nginx
|
||||
COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
|
||||
COPY --from=deps_base /uv /
|
||||
|
||||
COPY --from=deps_base /usr/sbin/nginx /usr/sbin
|
||||
COPY --from=deps_base /usr/share/nginx /usr/share/nginx
|
||||
COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
|
||||
COPY --from=deps_base /etc/nginx /etc/nginx
|
||||
COPY --from=deps_base /var/log/nginx /var/log/nginx
|
||||
# chown to allow non-root user to write to http-*-temp-path dirs
|
||||
COPY --from=deps_base --chown=www-data:root /var/lib/nginx /var/lib/nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
RUN mkdir /var/log/nginx /var/lib/nginx
|
||||
RUN chown www-data /var/lib/nginx
|
||||
|
||||
# have nginx log to stderr/out
|
||||
RUN ln -sf /dev/stdout /var/log/nginx/access.log
|
||||
RUN ln -sf /dev/stderr /var/log/nginx/error.log
|
||||
|
||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||
COPY ./docker/conf-workers/* /conf/
|
||||
@@ -74,4 +70,4 @@ FROM $FROM
|
||||
# Replace the healthcheck with one which checks *all* the workers. The script
|
||||
# is generated by configure_workers_and_start.py.
|
||||
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
|
||||
CMD ["/healthcheck.sh"]
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
|
||||
@@ -58,4 +58,4 @@ ENTRYPOINT ["/start_for_complement.sh"]
|
||||
|
||||
# Update the healthcheck to have a shorter check interval
|
||||
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
|
||||
CMD ["/healthcheck.sh"]
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
|
||||
@@ -9,7 +9,7 @@ echo " Args: $*"
|
||||
echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR"
|
||||
|
||||
function log {
|
||||
d=$(printf '%(%Y-%m-%d %H:%M:%S)T,%.3s\n' ${EPOCHREALTIME/./ })
|
||||
d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
|
||||
echo "$d $*"
|
||||
}
|
||||
|
||||
@@ -103,11 +103,12 @@ fi
|
||||
# Note that both the key and certificate are in PEM format (not DER).
|
||||
|
||||
# First generate a configuration file to set up a Subject Alternative Name.
|
||||
echo "\
|
||||
cat > /conf/server.tls.conf <<EOF
|
||||
.include /etc/ssl/openssl.cnf
|
||||
|
||||
[SAN]
|
||||
subjectAltName=DNS:${SERVER_NAME}" > /conf/server.tls.conf
|
||||
subjectAltName=DNS:${SERVER_NAME}
|
||||
EOF
|
||||
|
||||
# Generate an RSA key
|
||||
openssl genrsa -out /conf/server.tls.key 2048
|
||||
@@ -122,8 +123,8 @@ openssl x509 -req -in /conf/server.tls.csr \
|
||||
-out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN
|
||||
|
||||
# Assert that we have a Subject Alternative Name in the certificate.
|
||||
# (the test will exit with 1 here if there isn't a SAN in the certificate.)
|
||||
[[ $(openssl x509 -in /conf/server.tls.crt -noout -text) == *DNS:* ]]
|
||||
# (grep will exit with 1 here if there isn't a SAN in the certificate.)
|
||||
openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS:
|
||||
|
||||
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
|
||||
export SYNAPSE_TLS_KEY=/conf/server.tls.key
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/local/bin/python
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
#
|
||||
@@ -376,11 +376,9 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
#
|
||||
# We use append mode in case the files have already been written to by something else
|
||||
# (for instance, as part of the instructions in a dockerfile).
|
||||
exists = os.path.isfile(dst)
|
||||
with open(dst, "a") as outfile:
|
||||
# In case the existing file doesn't end with a newline
|
||||
if exists:
|
||||
outfile.write("\n")
|
||||
outfile.write("\n")
|
||||
|
||||
outfile.write(rendered)
|
||||
|
||||
@@ -606,7 +604,7 @@ def generate_base_homeserver_config() -> None:
|
||||
# start.py already does this for us, so just call that.
|
||||
# note that this script is copied in in the official, monolith dockerfile
|
||||
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
|
||||
subprocess.run([sys.executable, "/start.py", "migrate_config"], check=True)
|
||||
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
||||
|
||||
|
||||
def parse_worker_types(
|
||||
@@ -1000,7 +998,6 @@ def generate_worker_files(
|
||||
"/healthcheck.sh",
|
||||
healthcheck_urls=healthcheck_urls,
|
||||
)
|
||||
os.chmod("/healthcheck.sh", 0o755)
|
||||
|
||||
# Ensure the logging directory exists
|
||||
log_dir = data_dir + "/logs"
|
||||
|
||||
@@ -23,7 +23,6 @@ such as [Github][github-idp].
|
||||
[auth0]: https://auth0.com/
|
||||
[authentik]: https://goauthentik.io/
|
||||
[lemonldap]: https://lemonldap-ng.org/
|
||||
[pocket-id]: https://pocket-id.org/
|
||||
[okta]: https://www.okta.com/
|
||||
[dex-idp]: https://github.com/dexidp/dex
|
||||
[keycloak-idp]: https://www.keycloak.org/docs/latest/server_admin/#sso-protocols
|
||||
@@ -625,32 +624,6 @@ oidc_providers:
|
||||
|
||||
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
|
||||
|
||||
### Pocket ID
|
||||
|
||||
[Pocket ID][pocket-id] is a simple OIDC provider that allows users to authenticate with their passkeys.
|
||||
1. Go to `OIDC Clients`
|
||||
2. Click on `Add OIDC Client`
|
||||
3. Add a name, for example `Synapse`
|
||||
4. Add `"https://auth.example.org/_synapse/client/oidc/callback` to `Callback URLs` # Replace `auth.example.org` with your domain
|
||||
5. Click on `Save`
|
||||
6. Note down your `Client ID` and `Client secret`, these will be used later
|
||||
|
||||
Synapse config:
|
||||
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: pocket_id
|
||||
idp_name: Pocket ID
|
||||
issuer: "https://auth.example.org/" # Replace with your domain
|
||||
client_id: "your-client-id" # Replace with the "Client ID" you noted down before
|
||||
client_secret: "your-client-secret" # Replace with the "Client secret" you noted down before
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
```
|
||||
|
||||
### Shibboleth with OIDC Plugin
|
||||
|
||||
[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities.
|
||||
|
||||
@@ -4018,7 +4018,7 @@ This option has a number of sub-options. They are as follows:
|
||||
* `include_content`: Clients requesting push notifications can either have the body of
|
||||
the message sent in the notification poke along with other details
|
||||
like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
If clients choose to have the body sent, this option controls whether the
|
||||
If clients choose the to have the body sent, this option controls whether the
|
||||
notification request includes the content of the event (other details
|
||||
like the sender are still included). If `event_id_only` is enabled, it
|
||||
has no effect.
|
||||
|
||||
@@ -249,7 +249,6 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
|
||||
^/_matrix/client/(r0|v3|unstable)/capabilities$
|
||||
^/_matrix/client/(r0|v3|unstable)/notifications$
|
||||
^/_synapse/admin/v1/rooms/
|
||||
|
||||
# Encryption requests
|
||||
^/_matrix/client/(r0|v3|unstable)/keys/query$
|
||||
@@ -281,7 +280,6 @@ Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
||||
^/_matrix/client/unstable/org.matrix.msc4140/delayed_events
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/devices/
|
||||
|
||||
# Account data requests
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/tags
|
||||
|
||||
19
poetry.lock
generated
19
poetry.lock
generated
@@ -2053,19 +2053,18 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyopenssl"
|
||||
version = "25.0.0"
|
||||
version = "24.3.0"
|
||||
description = "Python wrapper module around the OpenSSL library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pyOpenSSL-25.0.0-py3-none-any.whl", hash = "sha256:424c247065e46e76a37411b9ab1782541c23bb658bf003772c3405fbaa128e90"},
|
||||
{file = "pyopenssl-25.0.0.tar.gz", hash = "sha256:cd2cef799efa3936bb08e8ccb9433a575722b9dd986023f1cabc4ae64e9dac16"},
|
||||
{file = "pyOpenSSL-24.3.0-py3-none-any.whl", hash = "sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a"},
|
||||
{file = "pyopenssl-24.3.0.tar.gz", hash = "sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
cryptography = ">=41.0.5,<45"
|
||||
typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"]
|
||||
@@ -2957,14 +2956,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-jsonschema"
|
||||
version = "4.23.0.20241208"
|
||||
version = "4.23.0.20240813"
|
||||
description = "Typing stubs for jsonschema"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"},
|
||||
{file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"},
|
||||
{file = "types-jsonschema-4.23.0.20240813.tar.gz", hash = "sha256:c93f48206f209a5bc4608d295ac39f172fb98b9e24159ce577dbd25ddb79a1c0"},
|
||||
{file = "types_jsonschema-4.23.0.20240813-py3-none-any.whl", hash = "sha256:be283e23f0b87547316c2ee6b0fd36d95ea30e921db06478029e10b5b6aa6ac3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3008,14 +3007,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-psycopg2"
|
||||
version = "2.9.21.20250318"
|
||||
version = "2.9.21.20250121"
|
||||
description = "Typing stubs for psycopg2"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "types_psycopg2-2.9.21.20250318-py3-none-any.whl", hash = "sha256:7296d111ad950bbd2fc979a1ab0572acae69047f922280e77db657c00d2c79c0"},
|
||||
{file = "types_psycopg2-2.9.21.20250318.tar.gz", hash = "sha256:eb6eac5bfb16adfd5f16b818918b9e26a40ede147e0f2bbffdf53a6ef7025a87"},
|
||||
{file = "types_psycopg2-2.9.21.20250121-py3-none-any.whl", hash = "sha256:b890dc6f5a08b6433f0ff73a4ec9a834deedad3e914f2a4a6fd43df021f745f1"},
|
||||
{file = "types_psycopg2-2.9.21.20250121.tar.gz", hash = "sha256:2b0e2cd0f3747af1ae25a7027898716d80209604770ef3cbf350fe055b9c349b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.128.0"
|
||||
version = "1.129.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
||||
@@ -20,7 +20,8 @@
|
||||
#
|
||||
#
|
||||
|
||||
from typing import Dict, Hashable, Optional, Tuple
|
||||
from collections import OrderedDict
|
||||
from typing import Hashable, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.config.ratelimiting import RatelimitSettings
|
||||
@@ -79,14 +80,12 @@ class Ratelimiter:
|
||||
self.store = store
|
||||
self._limiter_name = cfg.key
|
||||
|
||||
# A dictionary representing the token buckets tracked by this rate
|
||||
# An ordered dictionary representing the token buckets tracked by this rate
|
||||
# limiter. Each entry maps a key of arbitrary type to a tuple representing:
|
||||
# * The number of tokens currently in the bucket,
|
||||
# * The time point when the bucket was last completely empty, and
|
||||
# * The rate_hz (leak rate) of this particular bucket.
|
||||
self.actions: Dict[Hashable, Tuple[float, float, float]] = {}
|
||||
|
||||
self.clock.looping_call(self._prune_message_counts, 60 * 1000)
|
||||
self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict()
|
||||
|
||||
def _get_key(
|
||||
self, requester: Optional[Requester], key: Optional[Hashable]
|
||||
@@ -170,6 +169,9 @@ class Ratelimiter:
|
||||
rate_hz = rate_hz if rate_hz is not None else self.rate_hz
|
||||
burst_count = burst_count if burst_count is not None else self.burst_count
|
||||
|
||||
# Remove any expired entries
|
||||
self._prune_message_counts(time_now_s)
|
||||
|
||||
# Check if there is an existing count entry for this key
|
||||
action_count, time_start, _ = self._get_action_counts(key, time_now_s)
|
||||
|
||||
@@ -244,12 +246,13 @@ class Ratelimiter:
|
||||
action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s)
|
||||
self.actions[key] = (action_count + n_actions, time_start, rate_hz)
|
||||
|
||||
def _prune_message_counts(self) -> None:
|
||||
def _prune_message_counts(self, time_now_s: float) -> None:
|
||||
"""Remove message count entries that have not exceeded their defined
|
||||
rate_hz limit
|
||||
"""
|
||||
time_now_s = self.clock.time()
|
||||
|
||||
Args:
|
||||
time_now_s: The current time
|
||||
"""
|
||||
# We create a copy of the key list here as the dictionary is modified during
|
||||
# the loop
|
||||
for key in list(self.actions.keys()):
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
#
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, List, cast
|
||||
from typing import Dict, List
|
||||
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
@@ -51,8 +51,8 @@ from synapse.http.server import JsonResource, OptionsResource
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.rest import ClientRestResource, admin
|
||||
from synapse.rest.admin import AdminRestResource, register_servlets_for_media_repo
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyResource
|
||||
from synapse.rest.synapse.client import build_synapse_client_resource_tree
|
||||
@@ -190,11 +190,7 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
resources.update(build_synapse_client_resource_tree(self))
|
||||
resources["/.well-known"] = well_known_resource(self)
|
||||
admin_res = resources.get("/_synapse/admin")
|
||||
if admin_res is not None:
|
||||
admin.register_servlets(self, cast(JsonResource, admin_res))
|
||||
else:
|
||||
resources["/_synapse/admin"] = AdminRestResource(self)
|
||||
|
||||
elif name == "federation":
|
||||
resources[FEDERATION_PREFIX] = TransportLayerServer(self)
|
||||
elif name == "media":
|
||||
@@ -203,21 +199,15 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
# We need to serve the admin servlets for media on the
|
||||
# worker.
|
||||
admin_res = resources.get("/_synapse/admin")
|
||||
if admin_res is not None:
|
||||
register_servlets_for_media_repo(
|
||||
self, cast(JsonResource, admin_res)
|
||||
)
|
||||
else:
|
||||
admin_resource = JsonResource(self, canonical_json=False)
|
||||
register_servlets_for_media_repo(self, admin_resource)
|
||||
resources["/_synapse/admin"] = admin_resource
|
||||
admin_resource = JsonResource(self, canonical_json=False)
|
||||
register_servlets_for_media_repo(self, admin_resource)
|
||||
|
||||
resources.update(
|
||||
{
|
||||
MEDIA_R0_PREFIX: media_repo,
|
||||
MEDIA_V3_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
"/_synapse/admin": admin_resource,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -66,6 +66,7 @@ from synapse.types import (
|
||||
from synapse.util.async_helpers import (
|
||||
timeout_deferred,
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.stringutils import shortstr
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
@@ -519,22 +520,20 @@ class Notifier:
|
||||
users = users or []
|
||||
rooms = rooms or []
|
||||
|
||||
user_streams: Set[_NotifierUserStream] = set()
|
||||
with Measure(self.clock, "on_new_event"):
|
||||
user_streams: Set[_NotifierUserStream] = set()
|
||||
|
||||
log_kv(
|
||||
{
|
||||
"waking_up_explicit_users": len(users),
|
||||
"waking_up_explicit_rooms": len(rooms),
|
||||
"users": shortstr(users),
|
||||
"rooms": shortstr(rooms),
|
||||
"stream": stream_key,
|
||||
"stream_id": new_token,
|
||||
}
|
||||
)
|
||||
log_kv(
|
||||
{
|
||||
"waking_up_explicit_users": len(users),
|
||||
"waking_up_explicit_rooms": len(rooms),
|
||||
"users": shortstr(users),
|
||||
"rooms": shortstr(rooms),
|
||||
"stream": stream_key,
|
||||
"stream_id": new_token,
|
||||
}
|
||||
)
|
||||
|
||||
# Only calculate which user streams to wake up if there are, in fact,
|
||||
# any user streams registered.
|
||||
if self.user_to_user_stream or self.room_to_user_streams:
|
||||
for user in users:
|
||||
user_stream = self.user_to_user_stream.get(str(user))
|
||||
if user_stream is not None:
|
||||
@@ -566,25 +565,25 @@ class Notifier:
|
||||
# We resolve all these deferreds in one go so that we only need to
|
||||
# call `PreserveLoggingContext` once, as it has a bunch of overhead
|
||||
# (to calculate performance stats)
|
||||
if listeners:
|
||||
with PreserveLoggingContext():
|
||||
for listener in listeners:
|
||||
listener.callback(current_token)
|
||||
with PreserveLoggingContext():
|
||||
for listener in listeners:
|
||||
listener.callback(current_token)
|
||||
|
||||
if user_streams:
|
||||
users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
|
||||
users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams))
|
||||
|
||||
self.notify_replication()
|
||||
self.notify_replication()
|
||||
|
||||
# Notify appservices.
|
||||
try:
|
||||
self.appservice_handler.notify_interested_services_ephemeral(
|
||||
stream_key,
|
||||
new_token,
|
||||
users,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error notifying application services of ephemeral events")
|
||||
# Notify appservices.
|
||||
try:
|
||||
self.appservice_handler.notify_interested_services_ephemeral(
|
||||
stream_key,
|
||||
new_token,
|
||||
users,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Error notifying application services of ephemeral events"
|
||||
)
|
||||
|
||||
def on_new_replication_data(self) -> None:
|
||||
"""Used to inform replication listeners that something has happened
|
||||
|
||||
@@ -205,12 +205,6 @@ class HttpPusher(Pusher):
|
||||
if self._is_processing:
|
||||
return
|
||||
|
||||
# Check if we are trying, but failing, to contact the pusher. If so, we
|
||||
# don't try and start processing immediately and instead wait for the
|
||||
# retry loop to try again later (which is controlled by the timer).
|
||||
if self.failing_since and self.timed_call and self.timed_call.active():
|
||||
return
|
||||
|
||||
run_as_background_process("httppush.process", self._process)
|
||||
|
||||
async def _process(self) -> None:
|
||||
|
||||
@@ -275,9 +275,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
"""
|
||||
Register all the admin servlets.
|
||||
"""
|
||||
RoomRestServlet(hs).register(http_server)
|
||||
|
||||
# Admin servlets below may not work on workers.
|
||||
# Admin servlets aren't registered on workers.
|
||||
if hs.config.worker.worker_app is not None:
|
||||
return
|
||||
|
||||
@@ -285,6 +283,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
BlockRoomRestServlet(hs).register(http_server)
|
||||
ListRoomRestServlet(hs).register(http_server)
|
||||
RoomStateRestServlet(hs).register(http_server)
|
||||
RoomRestServlet(hs).register(http_server)
|
||||
RoomRestV2Servlet(hs).register(http_server)
|
||||
RoomMembersRestServlet(hs).register(http_server)
|
||||
DeleteRoomStatusByDeleteIdRestServlet(hs).register(http_server)
|
||||
|
||||
@@ -143,11 +143,11 @@ class DeviceRestServlet(RestServlet):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.device_handler = handler
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
self._msc3852_enabled = hs.config.experimental.msc3852_enabled
|
||||
self._msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled
|
||||
self._is_main_process = hs.config.worker.worker_app is None
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, device_id: str
|
||||
@@ -179,14 +179,6 @@ class DeviceRestServlet(RestServlet):
|
||||
async def on_DELETE(
|
||||
self, request: SynapseRequest, device_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# Can only be run on main process, as changes to device lists must
|
||||
# happen on main.
|
||||
if not self._is_main_process:
|
||||
error_message = "DELETE on /devices/ must be routed to main process"
|
||||
logger.error(error_message)
|
||||
raise SynapseError(500, error_message)
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
try:
|
||||
@@ -231,14 +223,6 @@ class DeviceRestServlet(RestServlet):
|
||||
async def on_PUT(
|
||||
self, request: SynapseRequest, device_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# Can only be run on main process, as changes to device lists must
|
||||
# happen on main.
|
||||
if not self._is_main_process:
|
||||
error_message = "PUT on /devices/ must be routed to main process"
|
||||
logger.error(error_message)
|
||||
raise SynapseError(500, error_message)
|
||||
assert isinstance(self.device_handler, DeviceHandler)
|
||||
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
body = parse_and_validate_json_object_from_request(request, self.PutBody)
|
||||
@@ -601,9 +585,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
):
|
||||
DeleteDevicesRestServlet(hs).register(http_server)
|
||||
DevicesRestServlet(hs).register(http_server)
|
||||
DeviceRestServlet(hs).register(http_server)
|
||||
|
||||
if hs.config.worker.worker_app is None:
|
||||
DeviceRestServlet(hs).register(http_server)
|
||||
if hs.config.experimental.msc2697_enabled:
|
||||
DehydratedDeviceServlet(hs).register(http_server)
|
||||
ClaimDehydratedDeviceServlet(hs).register(http_server)
|
||||
|
||||
@@ -24,7 +24,7 @@ from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState
|
||||
from synapse.api.errors import Codes, StoreError, SynapseError
|
||||
from synapse.api.errors import Codes, LimitExceededError, StoreError, SynapseError
|
||||
from synapse.api.filtering import FilterCollection
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
@@ -248,8 +248,9 @@ class SyncRestServlet(RestServlet):
|
||||
await self._server_notices_sender.on_user_syncing(user.to_string())
|
||||
|
||||
# ignore the presence update if the ratelimit is exceeded but do not pause the request
|
||||
allowed, _ = await self._presence_per_user_limiter.can_do_action(requester)
|
||||
if not allowed:
|
||||
try:
|
||||
await self._presence_per_user_limiter.ratelimit(requester, pause=0.0)
|
||||
except LimitExceededError:
|
||||
affect_presence = False
|
||||
logger.debug("User set_presence ratelimit exceeded; ignoring it.")
|
||||
else:
|
||||
|
||||
@@ -1693,93 +1693,6 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
|
||||
columns=["user_id", "room_id"],
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
"populate_participant_bg_update", self._populate_participant
|
||||
)
|
||||
|
||||
async def _populate_participant(self, progress: JsonDict, batch_size: int) -> int:
|
||||
"""
|
||||
Background update to populate column `participant` on `room_memberships` table
|
||||
|
||||
A 'participant' is someone who is currently joined to a room and has sent at least
|
||||
one `m.room.message` or `m.room.encrypted` event.
|
||||
|
||||
This background update will set the `participant` column across all rows in
|
||||
`room_memberships` based on the user's *current* join status, and if
|
||||
they've *ever* sent a message or encrypted event. Therefore one should
|
||||
never assume the `participant` column's value is based solely on whether
|
||||
the user participated in a previous "session" (where a "session" is defined
|
||||
as a period between the user joining and leaving). See
|
||||
https://github.com/element-hq/synapse/pull/18068#discussion_r1931070291
|
||||
for further detail.
|
||||
"""
|
||||
stream_token = progress.get("last_stream_token", None)
|
||||
|
||||
def _get_max_stream_token_txn(txn: LoggingTransaction) -> int:
|
||||
sql = """
|
||||
SELECT event_stream_ordering from room_memberships
|
||||
ORDER BY event_stream_ordering DESC
|
||||
LIMIT 1;
|
||||
"""
|
||||
txn.execute(sql)
|
||||
res = txn.fetchone()
|
||||
if not res or not res[0]:
|
||||
return 0
|
||||
return res[0]
|
||||
|
||||
def _background_populate_participant_txn(
|
||||
txn: LoggingTransaction, stream_token: str
|
||||
) -> None:
|
||||
sql = """
|
||||
UPDATE room_memberships
|
||||
SET participant = True
|
||||
FROM (
|
||||
SELECT DISTINCT c.state_key, e.room_id
|
||||
FROM current_state_events AS c
|
||||
INNER JOIN events AS e ON c.room_id = e.room_id
|
||||
WHERE c.membership = 'join'
|
||||
AND c.state_key = e.sender
|
||||
AND (
|
||||
e.type = 'm.room.message'
|
||||
OR e.type = 'm.room.encrypted'
|
||||
)
|
||||
) AS subquery
|
||||
WHERE room_memberships.user_id = subquery.state_key
|
||||
AND room_memberships.room_id = subquery.room_id
|
||||
AND room_memberships.event_stream_ordering <= ?
|
||||
AND room_memberships.event_stream_ordering > ?;
|
||||
"""
|
||||
batch = int(stream_token) - _POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE
|
||||
txn.execute(sql, (stream_token, batch))
|
||||
|
||||
if stream_token is None:
|
||||
stream_token = await self.db_pool.runInteraction(
|
||||
"_get_max_stream_token", _get_max_stream_token_txn
|
||||
)
|
||||
|
||||
if stream_token < 0:
|
||||
await self.db_pool.updates._end_background_update(
|
||||
"populate_participant_bg_update"
|
||||
)
|
||||
return _POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"_background_populate_participant_txn",
|
||||
_background_populate_participant_txn,
|
||||
stream_token,
|
||||
)
|
||||
|
||||
progress["last_stream_token"] = (
|
||||
stream_token - _POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE
|
||||
)
|
||||
await self.db_pool.runInteraction(
|
||||
"populate_participant_bg_update",
|
||||
self.db_pool.updates._background_update_progress_txn,
|
||||
"populate_participant_bg_update",
|
||||
progress,
|
||||
)
|
||||
return _POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE
|
||||
|
||||
async def _background_add_membership_profile(
|
||||
self, progress: JsonDict, batch_size: int
|
||||
) -> int:
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
#
|
||||
#
|
||||
|
||||
SCHEMA_VERSION = 91 # remember to update the list below when updating
|
||||
SCHEMA_VERSION = 92 # remember to update the list below when updating
|
||||
"""Represents the expectations made by the codebase about the database schema
|
||||
|
||||
This should be incremented whenever the codebase changes its requirements on the
|
||||
@@ -162,6 +162,12 @@ Changes in SCHEMA_VERSION = 89
|
||||
Changes in SCHEMA_VERSION = 90
|
||||
- Add a column `participant` to `room_memberships` table
|
||||
- Add background update to delete unreferenced state groups.
|
||||
|
||||
Changes in SCHEMA_VERSION = 91
|
||||
- Add a `sha256` column to the `local_media_repository` and `remote_media_cache` tables.
|
||||
|
||||
Changes in SCHEMA_VERSION = 92
|
||||
- Cleaned up a trigger that was added in #18260 and then reverted.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -13,8 +13,4 @@
|
||||
|
||||
-- Add a column `participant` to `room_memberships` table to track whether a room member has sent
|
||||
-- a `m.room.message` or `m.room.encrypted` event into a room they are a member of
|
||||
ALTER TABLE room_memberships ADD COLUMN participant BOOLEAN DEFAULT FALSE;
|
||||
|
||||
-- Add a background update to populate `participant` column
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(9001, 'populate_participant_bg_update', '{}');
|
||||
ALTER TABLE room_memberships ADD COLUMN participant BOOLEAN DEFAULT FALSE;
|
||||
@@ -0,0 +1,16 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Removes the trigger that was added in #18260 and then reverted
|
||||
DROP TRIGGER IF EXISTS event_stats_increment_counts_trigger ON events;
|
||||
DROP FUNCTION IF EXISTS event_stats_increment_counts();
|
||||
@@ -0,0 +1,16 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Removes the trigger that was added in #18260 and then reverted
|
||||
DROP TRIGGER IF EXISTS event_stats_events_insert_trigger;
|
||||
DROP TRIGGER IF EXISTS event_stats_events_delete_trigger;
|
||||
@@ -0,0 +1,17 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
-- Remove the background update if it was scheduled, as it is not rollback-safe
|
||||
-- See https://github.com/element-hq/synapse/issues/18356 for context
|
||||
DELETE FROM background_updates
|
||||
WHERE update_name = 'populate_participant_bg_update';
|
||||
@@ -220,7 +220,9 @@ class TestRatelimiter(unittest.HomeserverTestCase):
|
||||
|
||||
self.assertIn("test_id_1", limiter.actions)
|
||||
|
||||
self.reactor.advance(60)
|
||||
self.get_success_or_raise(
|
||||
limiter.can_do_action(None, key="test_id_2", _time_now_s=10)
|
||||
)
|
||||
|
||||
self.assertNotIn("test_id_1", limiter.actions)
|
||||
|
||||
|
||||
@@ -1167,81 +1167,3 @@ class HTTPPusherTests(HomeserverTestCase):
|
||||
self.assertEqual(
|
||||
self.push_attempts[0][2]["notification"]["counts"]["unread"], 1
|
||||
)
|
||||
|
||||
def test_push_backoff(self) -> None:
|
||||
"""
|
||||
The HTTP pusher will backoff correctly if it fails to contact the pusher.
|
||||
"""
|
||||
|
||||
# Register the user who gets notified
|
||||
user_id = self.register_user("user", "pass")
|
||||
access_token = self.login("user", "pass")
|
||||
|
||||
# Register the user who sends the message
|
||||
other_user_id = self.register_user("otheruser", "pass")
|
||||
other_access_token = self.login("otheruser", "pass")
|
||||
|
||||
# Register the pusher
|
||||
user_tuple = self.get_success(
|
||||
self.hs.get_datastores().main.get_user_by_access_token(access_token)
|
||||
)
|
||||
assert user_tuple is not None
|
||||
device_id = user_tuple.device_id
|
||||
|
||||
self.get_success(
|
||||
self.hs.get_pusherpool().add_or_update_pusher(
|
||||
user_id=user_id,
|
||||
device_id=device_id,
|
||||
kind="http",
|
||||
app_id="m.http",
|
||||
app_display_name="HTTP Push Notifications",
|
||||
device_display_name="pushy push",
|
||||
pushkey="a@example.com",
|
||||
lang=None,
|
||||
data={"url": "http://example.com/_matrix/push/v1/notify"},
|
||||
)
|
||||
)
|
||||
|
||||
# Create a room with the other user
|
||||
room = self.helper.create_room_as(user_id, tok=access_token)
|
||||
self.helper.join(room=room, user=other_user_id, tok=other_access_token)
|
||||
|
||||
# The other user sends some messages
|
||||
self.helper.send(room, body="Message 1", tok=other_access_token)
|
||||
|
||||
# One push was attempted to be sent
|
||||
self.assertEqual(len(self.push_attempts), 1)
|
||||
self.assertEqual(
|
||||
self.push_attempts[0][1], "http://example.com/_matrix/push/v1/notify"
|
||||
)
|
||||
self.assertEqual(
|
||||
self.push_attempts[0][2]["notification"]["content"]["body"], "Message 1"
|
||||
)
|
||||
self.push_attempts[0][0].callback({})
|
||||
self.pump()
|
||||
|
||||
# Send another message, this time it fails
|
||||
self.helper.send(room, body="Message 2", tok=other_access_token)
|
||||
self.assertEqual(len(self.push_attempts), 2)
|
||||
self.push_attempts[1][0].errback(Exception("couldn't connect"))
|
||||
self.pump()
|
||||
|
||||
# Sending yet another message doesn't trigger a push immediately
|
||||
self.helper.send(room, body="Message 3", tok=other_access_token)
|
||||
self.pump()
|
||||
self.assertEqual(len(self.push_attempts), 2)
|
||||
|
||||
# .. but waiting for a bit will cause more pushes
|
||||
self.reactor.advance(10)
|
||||
self.assertEqual(len(self.push_attempts), 3)
|
||||
self.assertEqual(
|
||||
self.push_attempts[2][2]["notification"]["content"]["body"], "Message 2"
|
||||
)
|
||||
self.push_attempts[2][0].callback({})
|
||||
self.pump()
|
||||
|
||||
self.assertEqual(len(self.push_attempts), 4)
|
||||
self.assertEqual(
|
||||
self.push_attempts[3][2]["notification"]["content"]["body"], "Message 3"
|
||||
)
|
||||
self.push_attempts[3][0].callback({})
|
||||
|
||||
Reference in New Issue
Block a user