Compare commits
131 Commits
erikj/effi
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1be16c16a2 | ||
|
|
9de615b3aa | ||
|
|
c9282baf03 | ||
|
|
1940d990a3 | ||
|
|
1cd0715a0f | ||
|
|
dcd3698e1f | ||
|
|
b85c3485b1 | ||
|
|
8c56e18e47 | ||
|
|
23f88f9c59 | ||
|
|
020ff1afe3 | ||
|
|
7064b4bcf3 | ||
|
|
18279631e9 | ||
|
|
85118420a2 | ||
|
|
ec662bbe41 | ||
|
|
4adaba9acf | ||
|
|
7cd79ce051 | ||
|
|
86ecd341ec | ||
|
|
873971a8b9 | ||
|
|
da162cbe4e | ||
|
|
19a1cda084 | ||
|
|
dffe095642 | ||
|
|
3b3fed7229 | ||
|
|
3f17178728 | ||
|
|
803f63df1c | ||
|
|
0ba17777be | ||
|
|
69048f7b48 | ||
|
|
8aa5479986 | ||
|
|
b657e89005 | ||
|
|
bc72d803d5 | ||
|
|
6d7c63fcc6 | ||
|
|
7dbac123f9 | ||
|
|
d6ae4041a4 | ||
|
|
358896e1b8 | ||
|
|
79c349dfb8 | ||
|
|
1e5a0e07a7 | ||
|
|
35d260d065 | ||
|
|
07c0875aa5 | ||
|
|
406ff3eb62 | ||
|
|
bd558a6dc3 | ||
|
|
2d15e39684 | ||
|
|
54317d34b7 | ||
|
|
6130afb862 | ||
|
|
0aba4a4eaa | ||
|
|
54a51ff6c1 | ||
|
|
eb0dbab15b | ||
|
|
0377cb4fab | ||
|
|
8a4fb7a6ba | ||
|
|
8c3bcea2da | ||
|
|
4513b36a75 | ||
|
|
47c629bb27 | ||
|
|
ad3f43be9a | ||
|
|
4347473946 | ||
|
|
29638220ab | ||
|
|
837f28ce74 | ||
|
|
4ce32ade5a | ||
|
|
6fc411c7bf | ||
|
|
e21ff0f048 | ||
|
|
b80ff1602e | ||
|
|
d834a80a12 | ||
|
|
9ff84bccbb | ||
|
|
614efc488b | ||
|
|
7f4b413690 | ||
|
|
efd4d06d76 | ||
|
|
dac97642e4 | ||
|
|
0328b56468 | ||
|
|
4581809846 | ||
|
|
3dfe5c0270 | ||
|
|
8e09b8aecb | ||
|
|
a476d5048b | ||
|
|
f3dc6dc19f | ||
|
|
8af3f33d84 | ||
|
|
81a6f8c9ae | ||
|
|
9d3713d6d5 | ||
|
|
b57630c507 | ||
|
|
340f08c6f7 | ||
|
|
8da3c2185b | ||
|
|
eca592b121 | ||
|
|
34b5db1fbc | ||
|
|
ec8499206e | ||
|
|
4f6da0dba0 | ||
|
|
84ae2e3f6f | ||
|
|
d98a43d922 | ||
|
|
0a5f4f7665 | ||
|
|
f0a860908b | ||
|
|
9c462f18a4 | ||
|
|
4f5bccbbba | ||
|
|
01a45869f0 | ||
|
|
ca5d5de79b | ||
|
|
a51b0862a1 | ||
|
|
8fe1fd906a | ||
|
|
90ad836ed8 | ||
|
|
5eb3fd785b | ||
|
|
7cbb2a00d1 | ||
|
|
a4102d2a5f | ||
|
|
190c990a76 | ||
|
|
b7695ac388 | ||
|
|
1fb5a7ad5d | ||
|
|
fa2c116bef | ||
|
|
e02f4b7de2 | ||
|
|
21407c6709 | ||
|
|
ae55cc1e6b | ||
|
|
0c6142c4a1 | ||
|
|
fee0195b27 | ||
|
|
76b2218599 | ||
|
|
ea4ece3fcc | ||
|
|
68b2611783 | ||
|
|
a719b703d9 | ||
|
|
a461f1f846 | ||
|
|
f9f3e89354 | ||
|
|
f98f4f2e16 | ||
|
|
58f8305114 | ||
|
|
96529c4236 | ||
|
|
6dc019d9dd | ||
|
|
8d2a5586f7 | ||
|
|
76e392b0fa | ||
|
|
d4ea465496 | ||
|
|
8ebfd577e2 | ||
|
|
dbee081d14 | ||
|
|
99b7b801c3 | ||
|
|
641ff9ef7e | ||
|
|
05f8dada8b | ||
|
|
654902a758 | ||
|
|
4a711bf379 | ||
|
|
fc566cdf0a | ||
|
|
3b6208b835 | ||
|
|
3b8348b06e | ||
|
|
5c7364fea5 | ||
|
|
f08d05dd2c | ||
|
|
e1fa42249c | ||
|
|
fc1e534e41 | ||
|
|
835174180b |
14
.github/workflows/docker.yml
vendored
14
.github/workflows/docker.yml
vendored
@@ -29,6 +29,16 @@ jobs:
|
||||
- name: Inspect builder
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Extract version from pyproject.toml
|
||||
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell
|
||||
shell: bash
|
||||
run: |
|
||||
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
@@ -61,7 +71,9 @@ jobs:
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
labels: |
|
||||
gitsha1=${{ github.sha }}
|
||||
org.opencontainers.image.version=${{ env.SYNAPSE_VERSION }}
|
||||
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
5
.github/workflows/twisted_trunk.yml
vendored
5
.github/workflows/twisted_trunk.yml
vendored
@@ -5,6 +5,9 @@ on:
|
||||
- cron: 0 8 * * *
|
||||
|
||||
workflow_dispatch:
|
||||
# NB: inputs are only present when this workflow is dispatched manually.
|
||||
# (The default below is the default field value in the form to trigger
|
||||
# a manual dispatch). Otherwise the inputs will evaluate to null.
|
||||
inputs:
|
||||
twisted_ref:
|
||||
description: Commit, branch or tag to checkout from upstream Twisted.
|
||||
@@ -49,7 +52,7 @@ jobs:
|
||||
extras: "all"
|
||||
- run: |
|
||||
poetry remove twisted
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }}
|
||||
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref || 'trunk' }}
|
||||
poetry install --no-interaction --extras "all test"
|
||||
- name: Remove warn_unused_ignores from mypy config
|
||||
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
|
||||
|
||||
283
CHANGES.md
283
CHANGES.md
@@ -1,3 +1,286 @@
|
||||
# Synapse 1.91.2 (2023-09-06)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258))
|
||||
|
||||
|
||||
# Synapse 1.91.1 (2023-09-04)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a performance regression introduced in Synapse 1.91.0 where event persistence would cause an excessive linear growth in CPU usage. ([\#16220](https://github.com/matrix-org/synapse/issues/16220))
|
||||
|
||||
|
||||
# Synapse 1.91.0 (2023-08-30)
|
||||
|
||||
No significant changes since 1.91.0rc1.
|
||||
|
||||
|
||||
# Synapse 1.91.0rc1 (2023-08-23)
|
||||
|
||||
### Features
|
||||
|
||||
- Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870))
|
||||
- Implements a task scheduler for resumable potentially long running tasks. ([\#15891](https://github.com/matrix-org/synapse/issues/15891))
|
||||
- Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030))
|
||||
- Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094))
|
||||
- Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. ([\#16114](https://github.com/matrix-org/synapse/issues/16114))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). ([\#16052](https://github.com/matrix-org/synapse/issues/16052))
|
||||
- Fix a long-standing bu in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. ([\#16080](https://github.com/matrix-org/synapse/issues/16080))
|
||||
- Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. ([\#16116](https://github.com/matrix-org/synapse/issues/16116))
|
||||
- Filter out user agent references to the sliding sync proxy and rust-sdk from the user_daily_visits table to ensure that Element X can be represented fully. ([\#16124](https://github.com/matrix-org/synapse/issues/16124))
|
||||
- User constent and 3-PID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. ([\#16127](https://github.com/matrix-org/synapse/issues/16127), [\#16134](https://github.com/matrix-org/synapse/issues/16134))
|
||||
- Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. ([\#16133](https://github.com/matrix-org/synapse/issues/16133), [\#16169](https://github.com/matrix-org/synapse/issues/16169))
|
||||
- Fix performance degredation when there are a lot of in-flight replication requests. ([\#16148](https://github.com/matrix-org/synapse/issues/16148))
|
||||
- Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. ([\#16156](https://github.com/matrix-org/synapse/issues/16156), [\#16164](https://github.com/matrix-org/synapse/issues/16164))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Structured logging docs: add a link to explain the ELK stack ([\#16091](https://github.com/matrix-org/synapse/issues/16091))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Update dehydrated devices implementation. ([\#16010](https://github.com/matrix-org/synapse/issues/16010))
|
||||
- Fix database performance of read/write worker locks. ([\#16061](https://github.com/matrix-org/synapse/issues/16061))
|
||||
- Fix building the nix development environment on MacOS systems. ([\#16063](https://github.com/matrix-org/synapse/issues/16063))
|
||||
- Override global statement timeout when creating indexes in Postgres. ([\#16085](https://github.com/matrix-org/synapse/issues/16085))
|
||||
- Fix the type annotation on `run_db_interaction` in the Module API. ([\#16089](https://github.com/matrix-org/synapse/issues/16089))
|
||||
- Clean-up the presence code. ([\#16092](https://github.com/matrix-org/synapse/issues/16092))
|
||||
- Run `pyupgrade` for Python 3.8+. ([\#16110](https://github.com/matrix-org/synapse/issues/16110))
|
||||
- Rename pagination and purge locks and add comments to explain why they exist and how they work. ([\#16112](https://github.com/matrix-org/synapse/issues/16112))
|
||||
- Attempt to fix the twisted trunk job. ([\#16115](https://github.com/matrix-org/synapse/issues/16115))
|
||||
- Cache token introspection response from OIDC provider. ([\#16117](https://github.com/matrix-org/synapse/issues/16117))
|
||||
- Add cache to `get_server_keys_json_for_remote`. ([\#16123](https://github.com/matrix-org/synapse/issues/16123))
|
||||
- Add an admin endpoint to allow authorizing server to signal token revocations. ([\#16125](https://github.com/matrix-org/synapse/issues/16125))
|
||||
- Add response time metrics for introspection requests for delegated auth. ([\#16131](https://github.com/matrix-org/synapse/issues/16131))
|
||||
- MSC3861: allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. ([\#16132](https://github.com/matrix-org/synapse/issues/16132))
|
||||
- Increase performance of read/write locks. ([\#16149](https://github.com/matrix-org/synapse/issues/16149))
|
||||
- Improve presence tests. ([\#16150](https://github.com/matrix-org/synapse/issues/16150), [\#16151](https://github.com/matrix-org/synapse/issues/16151), [\#16158](https://github.com/matrix-org/synapse/issues/16158))
|
||||
- Raised the poetry-core version cap to 1.7.0. ([\#16152](https://github.com/matrix-org/synapse/issues/16152))
|
||||
- Fix assertion in user directory unit tests. ([\#16157](https://github.com/matrix-org/synapse/issues/16157))
|
||||
- Reduce scope of locks when paginating to alleviate DB contention. ([\#16159](https://github.com/matrix-org/synapse/issues/16159))
|
||||
- Reduce DB contention on worker locks. ([\#16160](https://github.com/matrix-org/synapse/issues/16160))
|
||||
- Task scheduler: mark task as active if we are scheduling as soon as possible. ([\#16165](https://github.com/matrix-org/synapse/issues/16165))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump click from 8.1.6 to 8.1.7. ([\#16145](https://github.com/matrix-org/synapse/issues/16145))
|
||||
* Bump gitpython from 3.1.31 to 3.1.32. ([\#16103](https://github.com/matrix-org/synapse/issues/16103))
|
||||
* Bump ijson from 3.2.1 to 3.2.3. ([\#16143](https://github.com/matrix-org/synapse/issues/16143))
|
||||
* Bump isort from 5.11.5 to 5.12.0. ([\#16108](https://github.com/matrix-org/synapse/issues/16108))
|
||||
* Bump log from 0.4.19 to 0.4.20. ([\#16109](https://github.com/matrix-org/synapse/issues/16109))
|
||||
* Bump pygithub from 1.59.0 to 1.59.1. ([\#16144](https://github.com/matrix-org/synapse/issues/16144))
|
||||
* Bump sentry-sdk from 1.28.1 to 1.29.2. ([\#16142](https://github.com/matrix-org/synapse/issues/16142))
|
||||
* Bump serde from 1.0.183 to 1.0.184. ([\#16139](https://github.com/matrix-org/synapse/issues/16139))
|
||||
* Bump txredisapi from 1.4.9 to 1.4.10. ([\#16107](https://github.com/matrix-org/synapse/issues/16107))
|
||||
* Bump types-bleach from 6.0.0.3 to 6.0.0.4. ([\#16106](https://github.com/matrix-org/synapse/issues/16106))
|
||||
* Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105))
|
||||
* Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146))
|
||||
|
||||
# Synapse 1.91.0rc1 (2023-08-23)
|
||||
|
||||
### Features
|
||||
|
||||
- Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870))
|
||||
- Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030))
|
||||
- Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094))
|
||||
- Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. ([\#16114](https://github.com/matrix-org/synapse/issues/16114))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). ([\#16052](https://github.com/matrix-org/synapse/issues/16052))
|
||||
- Fix a long-standing bug in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. ([\#16080](https://github.com/matrix-org/synapse/issues/16080))
|
||||
- Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. ([\#16116](https://github.com/matrix-org/synapse/issues/16116))
|
||||
- Filter out user agent references to the sliding sync proxy and rust-sdk from the `user_daily_visits` table to ensure that Element X can be represented fully. ([\#16124](https://github.com/matrix-org/synapse/issues/16124))
|
||||
- User constent and third-party ID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. ([\#16127](https://github.com/matrix-org/synapse/issues/16127), [\#16134](https://github.com/matrix-org/synapse/issues/16134))
|
||||
- Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. ([\#16133](https://github.com/matrix-org/synapse/issues/16133), [\#16169](https://github.com/matrix-org/synapse/issues/16169))
|
||||
- Fix performance degredation when there are a lot of in-flight replication requests. ([\#16148](https://github.com/matrix-org/synapse/issues/16148))
|
||||
- Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. ([\#16156](https://github.com/matrix-org/synapse/issues/16156), [\#16164](https://github.com/matrix-org/synapse/issues/16164))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Structured logging docs: add a link to explain the ELK stack ([\#16091](https://github.com/matrix-org/synapse/issues/16091))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Update dehydrated devices implementation. ([\#16010](https://github.com/matrix-org/synapse/issues/16010))
|
||||
- Fix database performance of read/write worker locks. ([\#16061](https://github.com/matrix-org/synapse/issues/16061))
|
||||
- Fix building the nix development environment on MacOS systems. ([\#16063](https://github.com/matrix-org/synapse/issues/16063))
|
||||
- Override global statement timeout when creating indexes in Postgres. ([\#16085](https://github.com/matrix-org/synapse/issues/16085))
|
||||
- Fix the type annotation on `run_db_interaction` in the Module API. ([\#16089](https://github.com/matrix-org/synapse/issues/16089))
|
||||
- Clean-up the presence code. ([\#16092](https://github.com/matrix-org/synapse/issues/16092))
|
||||
- Run `pyupgrade` for Python 3.8+. ([\#16110](https://github.com/matrix-org/synapse/issues/16110))
|
||||
- Rename pagination and purge locks and add comments to explain why they exist and how they work. ([\#16112](https://github.com/matrix-org/synapse/issues/16112))
|
||||
- Attempt to fix the twisted trunk job. ([\#16115](https://github.com/matrix-org/synapse/issues/16115))
|
||||
- Cache token introspection response from OIDC provider. ([\#16117](https://github.com/matrix-org/synapse/issues/16117))
|
||||
- Add cache to `get_server_keys_json_for_remote`. ([\#16123](https://github.com/matrix-org/synapse/issues/16123))
|
||||
- Add an admin endpoint to allow authorizing server to signal token revocations. ([\#16125](https://github.com/matrix-org/synapse/issues/16125))
|
||||
- Add response time metrics for introspection requests for delegated auth. ([\#16131](https://github.com/matrix-org/synapse/issues/16131))
|
||||
- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. ([\#16132](https://github.com/matrix-org/synapse/issues/16132))
|
||||
- Increase performance of read/write locks. ([\#16149](https://github.com/matrix-org/synapse/issues/16149))
|
||||
- Improve presence tests. ([\#16150](https://github.com/matrix-org/synapse/issues/16150), [\#16151](https://github.com/matrix-org/synapse/issues/16151), [\#16158](https://github.com/matrix-org/synapse/issues/16158))
|
||||
- Raised the poetry-core version cap to 1.7.0. ([\#16152](https://github.com/matrix-org/synapse/issues/16152))
|
||||
- Fix assertion in user directory unit tests. ([\#16157](https://github.com/matrix-org/synapse/issues/16157))
|
||||
- Reduce scope of locks when paginating to alleviate DB contention. ([\#16159](https://github.com/matrix-org/synapse/issues/16159))
|
||||
- Reduce DB contention on worker locks. ([\#16160](https://github.com/matrix-org/synapse/issues/16160))
|
||||
- Task scheduler: mark task as active if we are scheduling as soon as possible. ([\#16165](https://github.com/matrix-org/synapse/issues/16165))
|
||||
- Implements a task scheduler for resumable potentially long running tasks. ([\#15891](https://github.com/matrix-org/synapse/issues/15891))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump click from 8.1.6 to 8.1.7. ([\#16145](https://github.com/matrix-org/synapse/issues/16145))
|
||||
* Bump gitpython from 3.1.31 to 3.1.32. ([\#16103](https://github.com/matrix-org/synapse/issues/16103))
|
||||
* Bump ijson from 3.2.1 to 3.2.3. ([\#16143](https://github.com/matrix-org/synapse/issues/16143))
|
||||
* Bump isort from 5.11.5 to 5.12.0. ([\#16108](https://github.com/matrix-org/synapse/issues/16108))
|
||||
* Bump log from 0.4.19 to 0.4.20. ([\#16109](https://github.com/matrix-org/synapse/issues/16109))
|
||||
* Bump pygithub from 1.59.0 to 1.59.1. ([\#16144](https://github.com/matrix-org/synapse/issues/16144))
|
||||
* Bump sentry-sdk from 1.28.1 to 1.29.2. ([\#16142](https://github.com/matrix-org/synapse/issues/16142))
|
||||
* Bump serde from 1.0.183 to 1.0.184. ([\#16139](https://github.com/matrix-org/synapse/issues/16139))
|
||||
* Bump txredisapi from 1.4.9 to 1.4.10. ([\#16107](https://github.com/matrix-org/synapse/issues/16107))
|
||||
* Bump types-bleach from 6.0.0.3 to 6.0.0.4. ([\#16106](https://github.com/matrix-org/synapse/issues/16106))
|
||||
* Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105))
|
||||
* Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146))
|
||||
|
||||
# Synapse 1.90.0 (2023-08-15)
|
||||
|
||||
No significant changes since 1.90.0rc1.
|
||||
|
||||
|
||||
# Synapse 1.90.0rc1 (2023-08-08)
|
||||
|
||||
### Features
|
||||
|
||||
- Scope transaction IDs to devices (implement [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970)). ([\#15629](https://github.com/matrix-org/synapse/issues/15629))
|
||||
- Remove old rows from the `cache_invalidation_stream_by_instance` table automatically (this table is unused in SQLite). ([\#15868](https://github.com/matrix-org/synapse/issues/15868))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a long-standing bug where purging history and paginating simultaneously could lead to database corruption when using workers. ([\#15791](https://github.com/matrix-org/synapse/issues/15791))
|
||||
- Fix a long-standing bug where profile endpoint returned a 404 when the user's display name was empty. ([\#16012](https://github.com/matrix-org/synapse/issues/16012))
|
||||
- Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms. ([\#16043](https://github.com/matrix-org/synapse/issues/16043))
|
||||
- Fix long-standing bug with deletion in dehydrated devices v2. ([\#16046](https://github.com/matrix-org/synapse/issues/16046))
|
||||
|
||||
### Updates to the Docker image
|
||||
|
||||
- Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa. ([\#15972](https://github.com/matrix-org/synapse/issues/15972), [\#16009](https://github.com/matrix-org/synapse/issues/16009))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Add a internal documentation page describing the ["streams" used within Synapse](https://matrix-org.github.io/synapse/v1.90/development/synapse_architecture/streams.html). ([\#16015](https://github.com/matrix-org/synapse/issues/16015))
|
||||
- Clarify comment on the keys/upload over replication enpoint. ([\#16016](https://github.com/matrix-org/synapse/issues/16016))
|
||||
- Do not expose Admin API in caddy reverse proxy example. Contributed by @NilsIrl. ([\#16027](https://github.com/matrix-org/synapse/issues/16027))
|
||||
|
||||
### Deprecations and Removals
|
||||
|
||||
- Remove support for legacy application service paths. ([\#15964](https://github.com/matrix-org/synapse/issues/15964))
|
||||
- Move support for application service query parameter authorization behind a configuration option. ([\#16017](https://github.com/matrix-org/synapse/issues/16017))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Update SQL queries to inline boolean parameters as supported in SQLite 3.27. ([\#15525](https://github.com/matrix-org/synapse/issues/15525))
|
||||
- Allow for the configuration of the backoff algorithm for federation destinations. ([\#15754](https://github.com/matrix-org/synapse/issues/15754))
|
||||
- Allow modules to check whether the current worker is configured to run background tasks. ([\#15991](https://github.com/matrix-org/synapse/issues/15991))
|
||||
- Update support for [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958) to match the latest revision of the MSC. ([\#15992](https://github.com/matrix-org/synapse/issues/15992))
|
||||
- Allow modules to schedule delayed background calls. ([\#15993](https://github.com/matrix-org/synapse/issues/15993))
|
||||
- Properly overwrite the `redacts` content-property for forwards-compatibility with room versions 1 through 10. ([\#16013](https://github.com/matrix-org/synapse/issues/16013))
|
||||
- Fix building the nix development environment on MacOS systems. ([\#16019](https://github.com/matrix-org/synapse/issues/16019))
|
||||
- Remove leading and trailing spaces when setting a display name. ([\#16031](https://github.com/matrix-org/synapse/issues/16031))
|
||||
- Combine duplicated code. ([\#16023](https://github.com/matrix-org/synapse/issues/16023))
|
||||
- Collect additional metrics from `ResponseCache` for eviction. ([\#16028](https://github.com/matrix-org/synapse/issues/16028))
|
||||
- Fix endpoint improperly declaring support for MSC3814. ([\#16068](https://github.com/matrix-org/synapse/issues/16068))
|
||||
- Drop backwards compat hack for event serialization. ([\#16069](https://github.com/matrix-org/synapse/issues/16069))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Update PyYAML to 6.0.1. ([\#16011](https://github.com/matrix-org/synapse/issues/16011))
|
||||
* Bump cryptography from 41.0.2 to 41.0.3. ([\#16048](https://github.com/matrix-org/synapse/issues/16048))
|
||||
* Bump furo from 2023.5.20 to 2023.7.26. ([\#16077](https://github.com/matrix-org/synapse/issues/16077))
|
||||
* Bump immutabledict from 2.2.4 to 3.0.0. ([\#16034](https://github.com/matrix-org/synapse/issues/16034))
|
||||
* Update certifi to 2023.7.22 and pygments to 2.15.1. ([\#16044](https://github.com/matrix-org/synapse/issues/16044))
|
||||
* Bump jsonschema from 4.18.3 to 4.19.0. ([\#16081](https://github.com/matrix-org/synapse/issues/16081))
|
||||
* Bump phonenumbers from 8.13.14 to 8.13.18. ([\#16076](https://github.com/matrix-org/synapse/issues/16076))
|
||||
* Bump regex from 1.9.1 to 1.9.3. ([\#16073](https://github.com/matrix-org/synapse/issues/16073))
|
||||
* Bump serde from 1.0.171 to 1.0.175. ([\#15982](https://github.com/matrix-org/synapse/issues/15982))
|
||||
* Bump serde from 1.0.175 to 1.0.179. ([\#16033](https://github.com/matrix-org/synapse/issues/16033))
|
||||
* Bump serde from 1.0.179 to 1.0.183. ([\#16074](https://github.com/matrix-org/synapse/issues/16074))
|
||||
* Bump serde_json from 1.0.103 to 1.0.104. ([\#16032](https://github.com/matrix-org/synapse/issues/16032))
|
||||
* Bump service-identity from 21.1.0 to 23.1.0. ([\#16038](https://github.com/matrix-org/synapse/issues/16038))
|
||||
* Bump types-commonmark from 0.9.2.3 to 0.9.2.4. ([\#16037](https://github.com/matrix-org/synapse/issues/16037))
|
||||
* Bump types-jsonschema from 4.17.0.8 to 4.17.0.10. ([\#16036](https://github.com/matrix-org/synapse/issues/16036))
|
||||
* Bump types-netaddr from 0.8.0.8 to 0.8.0.9. ([\#16035](https://github.com/matrix-org/synapse/issues/16035))
|
||||
* Bump types-opentracing from 2.4.10.5 to 2.4.10.6. ([\#16078](https://github.com/matrix-org/synapse/issues/16078))
|
||||
* Bump types-setuptools from 68.0.0.0 to 68.0.0.3. ([\#16079](https://github.com/matrix-org/synapse/issues/16079))
|
||||
|
||||
# Synapse 1.89.0 (2023-08-01)
|
||||
|
||||
No significant changes since 1.89.0rc1.
|
||||
|
||||
|
||||
# Synapse 1.89.0rc1 (2023-07-25)
|
||||
|
||||
### Features
|
||||
|
||||
- Add Unix Socket support for HTTP Replication Listeners. [Document and provide usage instructions](https://matrix-org.github.io/synapse/v1.89/usage/configuration/config_documentation.html#listeners) for utilizing Unix sockets in Synapse. Contributed by Jason Little. ([\#15708](https://github.com/matrix-org/synapse/issues/15708), [\#15924](https://github.com/matrix-org/synapse/issues/15924))
|
||||
- Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009). ([\#15911](https://github.com/matrix-org/synapse/issues/15911))
|
||||
- Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820). ([\#15912](https://github.com/matrix-org/synapse/issues/15912))
|
||||
- Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. ([\#15913](https://github.com/matrix-org/synapse/issues/15913), [\#15969](https://github.com/matrix-org/synapse/issues/15969))
|
||||
- Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly, H-Shay and poljar. ([\#15929](https://github.com/matrix-org/synapse/issues/15929))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a long-standing bug where remote invites weren't correctly pushed. ([\#15820](https://github.com/matrix-org/synapse/issues/15820))
|
||||
- Fix background schema updates failing over a large upgrade gap. ([\#15887](https://github.com/matrix-org/synapse/issues/15887))
|
||||
- Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting. ([\#15925](https://github.com/matrix-org/synapse/issues/15925))
|
||||
- Fixed deploy annotations in the provided Grafana dashboard config, so that it shows for any homeserver and not just matrix.org. Contributed by @wrjlewis. ([\#15957](https://github.com/matrix-org/synapse/issues/15957))
|
||||
- Ensure a long state res does not starve CPU by occasionally yielding to the reactor. ([\#15960](https://github.com/matrix-org/synapse/issues/15960))
|
||||
- Properly handle redactions of creation events. ([\#15973](https://github.com/matrix-org/synapse/issues/15973))
|
||||
- Fix a bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. ([\#15975](https://github.com/matrix-org/synapse/issues/15975))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Better clarify how to run a worker instance (pass both configs). ([\#15921](https://github.com/matrix-org/synapse/issues/15921))
|
||||
- Improve [the documentation](https://matrix-org.github.io/synapse/v1.89/admin_api/user_admin_api.html#login-as-a-user) for the login as a user admin API. ([\#15938](https://github.com/matrix-org/synapse/issues/15938))
|
||||
- Fix broken Arch Linux package link. Contributed by @SnipeXandrej. ([\#15981](https://github.com/matrix-org/synapse/issues/15981))
|
||||
|
||||
### Deprecations and Removals
|
||||
|
||||
- Remove support for calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15928](https://github.com/matrix-org/synapse/issues/15928))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it. ([\#15884](https://github.com/matrix-org/synapse/issues/15884))
|
||||
- Document which Python version runs on a given Linux distribution so we can more easily clean up later. ([\#15909](https://github.com/matrix-org/synapse/issues/15909))
|
||||
- Add details to warning in log when we fail to fetch an alias. ([\#15922](https://github.com/matrix-org/synapse/issues/15922))
|
||||
- Remove unneeded `__init__`. ([\#15926](https://github.com/matrix-org/synapse/issues/15926))
|
||||
- Fix bug with read/write lock implementation. This is currently unused so has no observable effects. ([\#15933](https://github.com/matrix-org/synapse/issues/15933), [\#15958](https://github.com/matrix-org/synapse/issues/15958))
|
||||
- Unbreak the nix development environment by pinning the Rust version to 1.70.0. ([\#15940](https://github.com/matrix-org/synapse/issues/15940))
|
||||
- Update presence metrics to differentiate remote vs local users. ([\#15952](https://github.com/matrix-org/synapse/issues/15952))
|
||||
- Stop reading from column `user_id` of table `profiles`. ([\#15955](https://github.com/matrix-org/synapse/issues/15955))
|
||||
- Build packages for Debian Trixie. ([\#15961](https://github.com/matrix-org/synapse/issues/15961))
|
||||
- Reduce the amount of state we pull out. ([\#15968](https://github.com/matrix-org/synapse/issues/15968))
|
||||
- Speed up updating state in large rooms. ([\#15971](https://github.com/matrix-org/synapse/issues/15971))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump anyhow from 1.0.71 to 1.0.72. ([\#15949](https://github.com/matrix-org/synapse/issues/15949))
|
||||
* Bump click from 8.1.3 to 8.1.6. ([\#15984](https://github.com/matrix-org/synapse/issues/15984))
|
||||
* Bump cryptography from 41.0.1 to 41.0.2. ([\#15943](https://github.com/matrix-org/synapse/issues/15943))
|
||||
* Bump jsonschema from 4.17.3 to 4.18.3. ([\#15948](https://github.com/matrix-org/synapse/issues/15948))
|
||||
* Bump pillow from 9.4.0 to 10.0.0. ([\#15986](https://github.com/matrix-org/synapse/issues/15986))
|
||||
* Bump prometheus-client from 0.17.0 to 0.17.1. ([\#15945](https://github.com/matrix-org/synapse/issues/15945))
|
||||
* Bump pydantic from 1.10.10 to 1.10.11. ([\#15946](https://github.com/matrix-org/synapse/issues/15946))
|
||||
* Bump pygithub from 1.58.2 to 1.59.0. ([\#15834](https://github.com/matrix-org/synapse/issues/15834))
|
||||
* Bump pyo3-log from 0.8.2 to 0.8.3. ([\#15951](https://github.com/matrix-org/synapse/issues/15951))
|
||||
* Bump sentry-sdk from 1.26.0 to 1.28.1. ([\#15985](https://github.com/matrix-org/synapse/issues/15985))
|
||||
* Bump serde_json from 1.0.100 to 1.0.103. ([\#15950](https://github.com/matrix-org/synapse/issues/15950))
|
||||
* Bump types-pillow from 9.5.0.4 to 10.0.0.1. ([\#15932](https://github.com/matrix-org/synapse/issues/15932))
|
||||
* Bump types-requests from 2.31.0.1 to 2.31.0.2. ([\#15983](https://github.com/matrix-org/synapse/issues/15983))
|
||||
* Bump typing-extensions from 4.5.0 to 4.7.1. ([\#15947](https://github.com/matrix-org/synapse/issues/15947))
|
||||
|
||||
# Synapse 1.88.0 (2023-07-18)
|
||||
|
||||
This release
|
||||
|
||||
34
Cargo.lock
generated
34
Cargo.lock
generated
@@ -132,9 +132,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.19"
|
||||
version = "0.4.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
|
||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
@@ -291,9 +291,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.9.1"
|
||||
version = "1.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575"
|
||||
checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -303,9 +303,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.3.2"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf"
|
||||
checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -314,9 +314,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.7.3"
|
||||
version = "0.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846"
|
||||
checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
@@ -332,29 +332,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.171"
|
||||
version = "1.0.184"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9"
|
||||
checksum = "2c911f4b04d7385c9035407a4eff5903bf4fe270fa046fda448b69e797f4fff0"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.171"
|
||||
version = "1.0.184"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682"
|
||||
checksum = "c1df27f5b29406ada06609b2e2f77fb34f6dbb104a457a671cc31dbed237e09e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.25",
|
||||
"syn 2.0.28",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.103"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b"
|
||||
checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -386,9 +386,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.25"
|
||||
version = "2.0.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2"
|
||||
checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
12
book.toml
12
book.toml
@@ -34,6 +34,14 @@ additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
"docs/website_files/version-picker.css",
|
||||
]
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
additional-js = [
|
||||
"docs/website_files/table-of-contents.js",
|
||||
"docs/website_files/version-picker.js",
|
||||
"docs/website_files/version.js",
|
||||
]
|
||||
theme = "docs/website_files/theme"
|
||||
|
||||
[preprocessor.schema_versions]
|
||||
command = "./scripts-dev/schema_versions.py"
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little.
|
||||
@@ -1 +0,0 @@
|
||||
Fix long-standing bug where remote invites weren't correctly pushed.
|
||||
@@ -1 +0,0 @@
|
||||
Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it.
|
||||
@@ -1 +0,0 @@
|
||||
Fix background schema updates failing over a large upgrade gap.
|
||||
@@ -1 +0,0 @@
|
||||
Document which Python version runs on a given Linux distribution so we can more easily clean up later.
|
||||
@@ -1 +0,0 @@
|
||||
Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009).
|
||||
@@ -1 +0,0 @@
|
||||
Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820).
|
||||
@@ -1 +0,0 @@
|
||||
Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`.
|
||||
@@ -1 +0,0 @@
|
||||
Better clarify how to run a worker instance (pass both configs).
|
||||
@@ -1 +0,0 @@
|
||||
Add details to warning in log when we fail to fetch an alias.
|
||||
@@ -1 +0,0 @@
|
||||
Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting.
|
||||
@@ -1 +0,0 @@
|
||||
Remove unneeded `__init__`.
|
||||
@@ -1 +0,0 @@
|
||||
Remove support for calling the `/register` endpoint with an unspecced `user` property for application services.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug with read/write lock implementation. This is currently unused so has no observable effects.
|
||||
@@ -1 +0,0 @@
|
||||
Improve the documentation for the login as a user admin API.
|
||||
@@ -1 +0,0 @@
|
||||
Unbreak the nix development environment by pinning the Rust version to 1.70.0.
|
||||
@@ -1 +0,0 @@
|
||||
Update presence metrics to differentiate remote vs local users.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug with read/write lock implementation. This is currently unused so has no observable effects.
|
||||
@@ -1 +0,0 @@
|
||||
Ensure a long state res does not starve CPU by occasionally yielding to the reactor.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce the amount of state we pull out.
|
||||
@@ -1 +0,0 @@
|
||||
Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`.
|
||||
@@ -769,7 +769,7 @@ def main(server_url, identity_server_url, username, token, config_path):
|
||||
global CONFIG_JSON
|
||||
CONFIG_JSON = config_path # bit cheeky, but just overwrite the global
|
||||
try:
|
||||
with open(config_path, "r") as config:
|
||||
with open(config_path) as config:
|
||||
syn_cmd.config = json.load(config)
|
||||
try:
|
||||
http_client.verbose = "on" == syn_cmd.config["verbose"]
|
||||
|
||||
@@ -63,7 +63,7 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"enable": true,
|
||||
"expr": "changes(process_start_time_seconds{instance=\"matrix.org\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"matrix.org\",job=\"synapse\"}",
|
||||
"expr": "changes(process_start_time_seconds{instance=\"$instance\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"$instance\",job=\"synapse\"}",
|
||||
"iconColor": "purple",
|
||||
"name": "deploys",
|
||||
"titleFormat": "Deployed {{version}}"
|
||||
|
||||
48
debian/changelog
vendored
48
debian/changelog
vendored
@@ -1,3 +1,51 @@
|
||||
matrix-synapse-py3 (1.91.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.91.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Sep 2023 14:59:30 +0000
|
||||
|
||||
matrix-synapse-py3 (1.91.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.91.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 04 Sep 2023 14:03:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.91.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.91.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 30 Aug 2023 11:18:10 +0100
|
||||
|
||||
matrix-synapse-py3 (1.91.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.91.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 23 Aug 2023 09:47:18 -0700
|
||||
|
||||
matrix-synapse-py3 (1.90.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.90.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Aug 2023 11:17:34 +0100
|
||||
|
||||
matrix-synapse-py3 (1.90.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.90.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Aug 2023 15:29:34 +0100
|
||||
|
||||
matrix-synapse-py3 (1.89.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.89.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Aug 2023 11:07:15 +0100
|
||||
|
||||
matrix-synapse-py3 (1.89.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.89.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Jul 2023 14:31:07 +0200
|
||||
|
||||
matrix-synapse-py3 (1.88.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.88.0.
|
||||
|
||||
@@ -861,7 +861,7 @@ def generate_worker_files(
|
||||
# Then a worker config file
|
||||
convert(
|
||||
"/conf/worker.yaml.j2",
|
||||
"/conf/workers/{name}.yaml".format(name=worker_name),
|
||||
f"/conf/workers/{worker_name}.yaml",
|
||||
**worker_config,
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
|
||||
@@ -82,7 +82,7 @@ def generate_config_from_template(
|
||||
with open(filename) as handle:
|
||||
value = handle.read()
|
||||
else:
|
||||
log("Generating a random secret for {}".format(secret))
|
||||
log(f"Generating a random secret for {secret}")
|
||||
value = codecs.encode(os.urandom(32), "hex").decode()
|
||||
with open(filename, "w") as handle:
|
||||
handle.write(value)
|
||||
|
||||
@@ -97,6 +97,7 @@
|
||||
- [Cancellation](development/synapse_architecture/cancellation.md)
|
||||
- [Log Contexts](log_contexts.md)
|
||||
- [Replication](replication.md)
|
||||
- [Streams](development/synapse_architecture/streams.md)
|
||||
- [TCP Replication](tcp_replication.md)
|
||||
- [Faster remote joins](development/synapse_architecture/faster_joins.md)
|
||||
- [Internal Documentation](development/internal_documentation/README.md)
|
||||
|
||||
@@ -146,6 +146,7 @@ Body parameters:
|
||||
- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator,
|
||||
granting them access to the Admin API, among other things.
|
||||
- `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged.
|
||||
- `locked` - **bool**, optional. If unspecified, locked state will be left unchanged.
|
||||
|
||||
Note: the `password` field must also be set if both of the following are true:
|
||||
- `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user)
|
||||
@@ -218,6 +219,8 @@ The following parameters should be set in the URL:
|
||||
**or** displaynames that contain this value.
|
||||
- `guests` - string representing a bool - Is optional and if `false` will **exclude** guest users.
|
||||
Defaults to `true` to include guest users.
|
||||
- `admins` - Optional flag to filter admins. If `true`, only admins are queried. If `false`, admins are excluded from
|
||||
the query. When the flag is absent (the default), **both** admins and non-admins are included in the search results.
|
||||
- `deactivated` - string representing a bool - Is optional and if `true` will **include** deactivated users.
|
||||
Defaults to `false` to exclude deactivated users.
|
||||
- `limit` - string representing a positive integer - Is optional but is used for pagination,
|
||||
|
||||
157
docs/development/synapse_architecture/streams.md
Normal file
157
docs/development/synapse_architecture/streams.md
Normal file
@@ -0,0 +1,157 @@
|
||||
## Streams
|
||||
|
||||
Synapse has a concept of "streams", which are roughly described in [`id_generators.py`](
|
||||
https://github.com/matrix-org/synapse/blob/develop/synapse/storage/util/id_generators.py
|
||||
).
|
||||
Generally speaking, streams are a series of notifications that something in Synapse's database has changed that the application might need to respond to.
|
||||
For example:
|
||||
|
||||
- The events stream reports new events (PDUs) that Synapse creates, or that Synapse accepts from another homeserver.
|
||||
- The account data stream reports changes to users' [account data](https://spec.matrix.org/v1.7/client-server-api/#client-config).
|
||||
- The to-device stream reports when a device has a new [to-device message](https://spec.matrix.org/v1.7/client-server-api/#send-to-device-messaging).
|
||||
|
||||
See [`synapse.replication.tcp.streams`](
|
||||
https://github.com/matrix-org/synapse/blob/develop/synapse/replication/tcp/streams/__init__.py
|
||||
) for the full list of streams.
|
||||
|
||||
It is very helpful to understand the streams mechanism when working on any part of Synapse that needs to respond to changes—especially if those changes are made by different workers.
|
||||
To that end, let's describe streams formally, paraphrasing from the docstring of [`AbstractStreamIdGenerator`](
|
||||
https://github.com/matrix-org/synapse/blob/a719b703d9bd0dade2565ddcad0e2f3a7a9d4c37/synapse/storage/util/id_generators.py#L96
|
||||
).
|
||||
|
||||
### Definition
|
||||
|
||||
A stream is an append-only log `T1, T2, ..., Tn, ...` of facts[^1] which grows over time.
|
||||
Only "writers" can add facts to a stream, and there may be multiple writers.
|
||||
|
||||
Each fact has an ID, called its "stream ID".
|
||||
Readers should only process facts in ascending stream ID order.
|
||||
|
||||
Roughly speaking, each stream is backed by a database table.
|
||||
It should have a `stream_id` (or similar) bigint column holding stream IDs, plus additional columns as necessary to describe the fact.
|
||||
Typically, a fact is expressed with a single row in its backing table.[^2]
|
||||
Within a stream, no two facts may have the same stream_id.
|
||||
|
||||
> _Aside_. Some additional notes on streams' backing tables.
|
||||
>
|
||||
> 1. Rich would like to [ditch the backing tables](https://github.com/matrix-org/synapse/issues/13456).
|
||||
> 2. The backing tables may have other uses.
|
||||
> For example, the events table serves backs the events stream, and is read when processing new events.
|
||||
> But old rows are read from the table all the time, whenever Synapse needs to lookup some facts about an event.
|
||||
> 3. Rich suspects that sometimes the stream is backed by multiple tables, so the stream proper is the union of those tables.
|
||||
|
||||
Stream writers can "reserve" a stream ID, and then later mark it as having being completed.
|
||||
Stream writers need to track the completion of each stream fact.
|
||||
In the happy case, completion means a fact has been written to the stream table.
|
||||
But unhappy cases (e.g. transaction rollback due to an error) also count as completion.
|
||||
Once completed, the rows written with that stream ID are fixed, and no new rows
|
||||
will be inserted with that ID.
|
||||
|
||||
### Current stream ID
|
||||
|
||||
For any given stream reader (including writers themselves), we may define a per-writer current stream ID:
|
||||
|
||||
> The current stream ID _for a writer W_ is the largest stream ID such that
|
||||
> all transactions added by W with equal or smaller ID have completed.
|
||||
|
||||
Similarly, there is a "linear" notion of current stream ID:
|
||||
|
||||
> The "linear" current stream ID is the largest stream ID such that
|
||||
> all facts (added by any writer) with equal or smaller ID have completed.
|
||||
|
||||
Because different stream readers A and B learn about new facts at different times, A and B may disagree about current stream IDs.
|
||||
Put differently: we should think of stream readers as being independent of each other, proceeding through a stream of facts at different rates.
|
||||
|
||||
**NB.** For both senses of "current", that if a writer opens a transaction that never completes, the current stream ID will never advance beyond that writer's last written stream ID.
|
||||
|
||||
For single-writer streams, the per-writer current ID and the linear current ID are the same.
|
||||
Both senses of current ID are monotonic, but they may "skip" or jump over IDs because facts complete out of order.
|
||||
|
||||
|
||||
_Example_.
|
||||
Consider a single-writer stream which is initially at ID 1.
|
||||
|
||||
| Action | Current stream ID | Notes |
|
||||
|------------|-------------------|-------------------------------------------------|
|
||||
| | 1 | |
|
||||
| Reserve 2 | 1 | |
|
||||
| Reserve 3 | 1 | |
|
||||
| Complete 3 | 1 | current ID unchanged, waiting for 2 to complete |
|
||||
| Complete 2 | 3 | current ID jumps from 1 -> 3 |
|
||||
| Reserve 4 | 3 | |
|
||||
| Reserve 5 | 3 | |
|
||||
| Reserve 6 | 3 | |
|
||||
| Complete 5 | 3 | |
|
||||
| Complete 4 | 5 | current ID jumps 3->5, even though 6 is pending |
|
||||
| Complete 6 | 6 | |
|
||||
|
||||
|
||||
### Multi-writer streams
|
||||
|
||||
There are two ways to view a multi-writer stream.
|
||||
|
||||
1. Treat it as a collection of distinct single-writer streams, one
|
||||
for each writer.
|
||||
2. Treat it as a single stream.
|
||||
|
||||
The single stream (option 2) is conceptually simpler, and easier to represent (a single stream id).
|
||||
However, it requires each reader to know about the entire set of writers, to ensures that readers don't erroneously advance their current stream position too early and miss a fact from an unknown writer.
|
||||
In contrast, multiple parallel streams (option 1) are more complex, requiring more state to represent (map from writer to stream id).
|
||||
The payoff for doing so is that readers can "peek" ahead to facts that completed on one writer no matter the state of the others, reducing latency.
|
||||
|
||||
Note that a multi-writer stream can be viewed in both ways.
|
||||
For example, the events stream is treated as multiple single-writer streams (option 1) by the sync handler, so that events are sent to clients as soon as possible.
|
||||
But the background process that works through events treats them as a single linear stream.
|
||||
|
||||
Another useful example is the cache invalidation stream.
|
||||
The facts this stream holds are instructions to "you should now invalidate these cache entries".
|
||||
We only ever treat this as a multiple single-writer streams as there is no important ordering between cache invalidations.
|
||||
(Invalidations are self-contained facts; and the invalidations commute/are idempotent).
|
||||
|
||||
### Writing to streams
|
||||
|
||||
Writers need to track:
|
||||
- track their current position (i.e. its own per-writer stream ID).
|
||||
- their facts currently awaiting completion.
|
||||
|
||||
At startup,
|
||||
- the current position of that writer can be found by querying the database (which suggests that facts need to be written to the database atomically, in a transaction); and
|
||||
- there are no facts awaiting completion.
|
||||
|
||||
To reserve a stream ID, call [`nextval`](https://www.postgresql.org/docs/current/functions-sequence.html) on the appropriate postgres sequence.
|
||||
|
||||
To write a fact to the stream: insert the appropriate rows to the appropriate backing table.
|
||||
|
||||
To complete a fact, first remove it from your map of facts currently awaiting completion.
|
||||
Then, if no earlier fact is awaiting completion, the writer can advance its current position in that stream.
|
||||
Upon doing so it should emit an `RDATA` message[^3], once for every fact between the old and the new stream ID.
|
||||
|
||||
### Subscribing to streams
|
||||
|
||||
Readers need to track the current position of every writer.
|
||||
|
||||
At startup, they can find this by contacting each writer with a `REPLICATE` message,
|
||||
requesting that all writers reply describing their current position in their streams.
|
||||
Writers reply with a `POSITION` message.
|
||||
|
||||
To learn about new facts, readers should listen for `RDATA` messages and process them to respond to the new fact.
|
||||
The `RDATA` itself is not a self-contained representation of the fact;
|
||||
readers will have to query the stream tables for the full details.
|
||||
Readers must also advance their record of the writer's current position for that stream.
|
||||
|
||||
# Summary
|
||||
|
||||
In a nutshell: we have an append-only log with a "buffer/scratchpad" at the end where we have to wait for the sequence to be linear and contiguous.
|
||||
|
||||
|
||||
---
|
||||
|
||||
[^1]: we use the word _fact_ here for two reasons.
|
||||
Firstly, the word "event" is already heavily overloaded (PDUs, EDUs, account data, ...) and we don't need to make that worse.
|
||||
Secondly, "fact" emphasises that the things we append to a stream cannot change after the fact.
|
||||
|
||||
[^2]: A fact might be expressed with 0 rows, e.g. if we opened a transaction to persist an event, but failed and rolled the transaction back before marking the fact as completed.
|
||||
In principle a fact might be expressed with 2 or more rows; if so, each of those rows should share the fact's stream ID.
|
||||
|
||||
[^3]: This communication used to happen directly with the writers [over TCP](../../tcp_replication.md);
|
||||
nowadays it's done via Redis's Pubsub.
|
||||
@@ -95,7 +95,7 @@ matrix.example.com {
|
||||
}
|
||||
|
||||
example.com:8448 {
|
||||
reverse_proxy localhost:8008
|
||||
reverse_proxy /_matrix/* localhost:8008
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -135,8 +135,8 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi
|
||||
|
||||
#### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
<https://archlinux.org/packages/community/x86_64/matrix-synapse/>, which should pull in most of
|
||||
The quickest way to get up and running with ArchLinux is probably with the package provided by ArchLinux
|
||||
<https://archlinux.org/packages/extra/x86_64/matrix-synapse/>, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
A structured logging system can be useful when your logs are destined for a
|
||||
machine to parse and process. By maintaining its machine-readable characteristics,
|
||||
it enables more efficient searching and aggregations when consumed by software
|
||||
such as the "ELK stack".
|
||||
such as the [ELK stack](https://opensource.com/article/18/9/open-source-log-aggregation-tools).
|
||||
|
||||
Synapse's structured logging system is configured via the file that Synapse's
|
||||
`log_config` config option points to. The file should include a formatter which
|
||||
|
||||
@@ -88,6 +88,21 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.90.0
|
||||
|
||||
## App service query parameter authorization is now a configuration option
|
||||
|
||||
Synapse v1.81.0 deprecated application service authorization via query parameters as this is
|
||||
considered insecure - and from Synapse v1.71.0 forwards the application service token has also been sent via
|
||||
[the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)], making the insecure
|
||||
query parameter authorization redundant. Since removing the ability to continue to use query parameters could break
|
||||
backwards compatibility it has now been put behind a configuration option, `use_appservice_legacy_authorization`.
|
||||
This option defaults to false, but can be activated by adding
|
||||
```yaml
|
||||
use_appservice_legacy_authorization: true
|
||||
```
|
||||
to your configuration.
|
||||
|
||||
# Upgrading to v1.89.0
|
||||
|
||||
## Removal of unspecced `user` property for `/register`
|
||||
@@ -97,7 +112,6 @@ The standard `username` property should be used instead. See the
|
||||
[Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions)
|
||||
for more information.
|
||||
|
||||
|
||||
# Upgrading to v1.88.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
||||
@@ -1242,6 +1242,14 @@ like sending a federation transaction.
|
||||
* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts.
|
||||
* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts.
|
||||
|
||||
The following options control the retry logic when communicating with a specific homeserver destination.
|
||||
Unlike the previous configuration options, these values apply across all requests
|
||||
for a given destination and the state of the backoff is stored in the database.
|
||||
|
||||
* `destination_min_retry_interval`: the initial backoff, after the first request fails. Defaults to 10m.
|
||||
* `destination_retry_multiplier`: how much we multiply the backoff by after each subsequent fail. Defaults to 2.
|
||||
* `destination_max_retry_interval`: a cap on the backoff. Defaults to a week.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
federation:
|
||||
@@ -1250,6 +1258,9 @@ federation:
|
||||
max_long_retry_delay: 100s
|
||||
max_short_retries: 5
|
||||
max_long_retries: 20
|
||||
destination_min_retry_interval: 30s
|
||||
destination_retry_multiplier: 5
|
||||
destination_max_retry_interval: 12h
|
||||
```
|
||||
---
|
||||
## Caching
|
||||
@@ -2837,6 +2848,20 @@ Example configuration:
|
||||
```yaml
|
||||
track_appservice_user_ips: true
|
||||
```
|
||||
---
|
||||
### `use_appservice_legacy_authorization`
|
||||
|
||||
Whether to send the application service access tokens via the `access_token` query parameter
|
||||
per older versions of the Matrix specification. Defaults to false. Set to true to enable sending
|
||||
access tokens via a query parameter.
|
||||
|
||||
**Enabling this option is considered insecure and is not recommended. **
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
use_appservice_legacy_authorization: true
|
||||
```
|
||||
|
||||
---
|
||||
### `macaroon_secret_key`
|
||||
|
||||
@@ -3000,6 +3025,16 @@ enable SAML login. You can either put your entire pysaml config inline using the
|
||||
option, or you can specify a path to a psyaml config file with the sub-option `config_path`.
|
||||
This setting has the following sub-options:
|
||||
|
||||
* `idp_name`: A user-facing name for this identity provider, which is used to
|
||||
offer the user a choice of login mechanisms.
|
||||
* `idp_icon`: An optional icon for this identity provider, which is presented
|
||||
by clients and Synapse's own IdP picker page. If given, must be an
|
||||
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
|
||||
obtain such an MXC URI is to upload an image to an (unencrypted) room
|
||||
and then copy the "url" from the source of the event.)
|
||||
* `idp_brand`: An optional brand for this identity provider, allowing clients
|
||||
to style the login flow according to the identity provider in question.
|
||||
See the [spec](https://spec.matrix.org/latest/) for possible options here.
|
||||
* `sp_config`: the configuration for the pysaml2 Service Provider. See pysaml2 docs for format of config.
|
||||
Default values will be used for the `entityid` and `service` settings,
|
||||
so it is not normally necessary to specify them unless you need to
|
||||
@@ -3151,7 +3186,7 @@ Options for each entry include:
|
||||
|
||||
* `idp_icon`: An optional icon for this identity provider, which is presented
|
||||
by clients and Synapse's own IdP picker page. If given, must be an
|
||||
MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to
|
||||
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
|
||||
obtain such an MXC URI is to upload an image to an (unencrypted) room
|
||||
and then copy the "url" from the source of the event.)
|
||||
|
||||
@@ -3169,6 +3204,14 @@ Options for each entry include:
|
||||
|
||||
* `client_secret`: oauth2 client secret to use. May be omitted if
|
||||
`client_secret_jwt_key` is given, or if `client_auth_method` is 'none'.
|
||||
Must be omitted if `client_secret_path` is specified.
|
||||
|
||||
* `client_secret_path`: path to the oauth2 client secret to use. With that
|
||||
it's not necessary to leak secrets into the config file itself.
|
||||
Mutually exclusive with `client_secret`. Can be omitted if
|
||||
`client_secret_jwt_key` is specified.
|
||||
|
||||
*Added in Synapse 1.91.0.*
|
||||
|
||||
* `client_secret_jwt_key`: Alternative to client_secret: details of a key used
|
||||
to create a JSON Web Token to be used as an OAuth2 client secret. If
|
||||
@@ -3366,6 +3409,16 @@ Enable Central Authentication Service (CAS) for registration and login.
|
||||
Has the following sub-options:
|
||||
* `enabled`: Set this to true to enable authorization against a CAS server.
|
||||
Defaults to false.
|
||||
* `idp_name`: A user-facing name for this identity provider, which is used to
|
||||
offer the user a choice of login mechanisms.
|
||||
* `idp_icon`: An optional icon for this identity provider, which is presented
|
||||
by clients and Synapse's own IdP picker page. If given, must be an
|
||||
MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to
|
||||
obtain such an MXC URI is to upload an image to an (unencrypted) room
|
||||
and then copy the "url" from the source of the event.)
|
||||
* `idp_brand`: An optional brand for this identity provider, allowing clients
|
||||
to style the login flow according to the identity provider in question.
|
||||
See the [spec](https://spec.matrix.org/latest/) for possible options here.
|
||||
* `server_url`: The URL of the CAS authorization endpoint.
|
||||
* `displayname_attribute`: The attribute of the CAS response to use as the display name.
|
||||
If no name is given here, no displayname will be set.
|
||||
@@ -3606,6 +3659,7 @@ This option has the following sub-options:
|
||||
* `prefer_local_users`: Defines whether to prefer local users in search query results.
|
||||
If set to true, local users are more likely to appear above remote users when searching the
|
||||
user directory. Defaults to false.
|
||||
* `show_locked_users`: Defines whether to show locked users in search query results. Defaults to false.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3613,6 +3667,7 @@ user_directory:
|
||||
enabled: false
|
||||
search_all_users: true
|
||||
prefer_local_users: true
|
||||
show_locked_users: true
|
||||
```
|
||||
---
|
||||
### `user_consent`
|
||||
|
||||
@@ -24,6 +24,11 @@ Finally, we also stylise the chapter titles in the left sidebar by indenting the
|
||||
slightly so that they are more visually distinguishable from the section headers
|
||||
(the bold titles). This is done through the `indent-section-headers.css` file.
|
||||
|
||||
In addition to these modifications, we have added a version picker to the documentation.
|
||||
Users can switch between documentations for different versions of Synapse.
|
||||
This functionality was implemented through the `version-picker.js` and
|
||||
`version-picker.css` files.
|
||||
|
||||
More information can be found in mdbook's official documentation for
|
||||
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
|
||||
and
|
||||
|
||||
@@ -131,6 +131,18 @@
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
{{/if}}
|
||||
<div class="version-picker">
|
||||
<div class="dropdown">
|
||||
<div class="select">
|
||||
<span></span>
|
||||
<i class="fa fa-chevron-down"></i>
|
||||
</div>
|
||||
<input type="hidden" name="version">
|
||||
<ul class="dropdown-menu">
|
||||
<!-- Versions will be added dynamically in version-picker.js -->
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">{{ book_title }}</h1>
|
||||
@@ -309,4 +321,4 @@
|
||||
{{/if}}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
|
||||
78
docs/website_files/version-picker.css
Normal file
78
docs/website_files/version-picker.css
Normal file
@@ -0,0 +1,78 @@
|
||||
.version-picker {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.version-picker .dropdown {
|
||||
width: 130px;
|
||||
max-height: 29px;
|
||||
margin-left: 10px;
|
||||
display: inline-block;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
position: relative;
|
||||
font-size: 13px;
|
||||
color: var(--fg);
|
||||
height: 100%;
|
||||
text-align: left;
|
||||
}
|
||||
.version-picker .dropdown .select {
|
||||
cursor: pointer;
|
||||
display: block;
|
||||
padding: 5px 2px 5px 15px;
|
||||
}
|
||||
.version-picker .dropdown .select > i {
|
||||
font-size: 10px;
|
||||
color: var(--fg);
|
||||
cursor: pointer;
|
||||
float: right;
|
||||
line-height: 20px !important;
|
||||
}
|
||||
.version-picker .dropdown:hover {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
}
|
||||
.version-picker .dropdown:active {
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active:hover,
|
||||
.version-picker .dropdown.active {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 2px 2px 0 0;
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active .select > i {
|
||||
transform: rotate(-180deg);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
position: absolute;
|
||||
background-color: var(--theme-popup-bg);
|
||||
width: 100%;
|
||||
left: -1px;
|
||||
right: 1px;
|
||||
margin-top: 1px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 0 0 4px 4px;
|
||||
overflow: hidden;
|
||||
display: none;
|
||||
max-height: 300px;
|
||||
overflow-y: auto;
|
||||
z-index: 9;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li {
|
||||
font-size: 12px;
|
||||
padding: 6px 20px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
padding: 0;
|
||||
list-style: none;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li:hover {
|
||||
background-color: var(--theme-hover);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li.active::before {
|
||||
display: inline-block;
|
||||
content: "✓";
|
||||
margin-inline-start: -14px;
|
||||
width: 14px;
|
||||
}
|
||||
127
docs/website_files/version-picker.js
Normal file
127
docs/website_files/version-picker.js
Normal file
@@ -0,0 +1,127 @@
|
||||
|
||||
const dropdown = document.querySelector('.version-picker .dropdown');
|
||||
const dropdownMenu = dropdown.querySelector('.dropdown-menu');
|
||||
|
||||
fetchVersions(dropdown, dropdownMenu).then(() => {
|
||||
initializeVersionDropdown(dropdown, dropdownMenu);
|
||||
});
|
||||
|
||||
/**
|
||||
* Initialize the dropdown functionality for version selection.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
*/
|
||||
function initializeVersionDropdown(dropdown, dropdownMenu) {
|
||||
// Toggle the dropdown menu on click
|
||||
dropdown.addEventListener('click', function () {
|
||||
this.setAttribute('tabindex', 1);
|
||||
this.classList.toggle('active');
|
||||
dropdownMenu.style.display = (dropdownMenu.style.display === 'block') ? 'none' : 'block';
|
||||
});
|
||||
|
||||
// Remove the 'active' class and hide the dropdown menu on focusout
|
||||
dropdown.addEventListener('focusout', function () {
|
||||
this.classList.remove('active');
|
||||
dropdownMenu.style.display = 'none';
|
||||
});
|
||||
|
||||
// Handle item selection within the dropdown menu
|
||||
const dropdownMenuItems = dropdownMenu.querySelectorAll('li');
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.addEventListener('click', function () {
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.classList.remove('active');
|
||||
});
|
||||
this.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = this.textContent;
|
||||
dropdown.querySelector('input').value = this.getAttribute('id');
|
||||
|
||||
window.location.href = changeVersion(window.location.href, this.textContent);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* This function fetches the available versions from a GitHub repository
|
||||
* and inserts them into the version picker.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
* @returns {Promise<Array<string>>} A promise that resolves with an array of available versions.
|
||||
*/
|
||||
function fetchVersions(dropdown, dropdownMenu) {
|
||||
return new Promise((resolve, reject) => {
|
||||
window.addEventListener("load", () => {
|
||||
|
||||
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
|
||||
cache: "force-cache",
|
||||
}).then(res =>
|
||||
res.json()
|
||||
).then(resObject => {
|
||||
const excluded = ['dev-docs', 'v1.91.0', 'v1.80.0', 'v1.69.0'];
|
||||
const tree = resObject.tree.filter(item => item.type === "tree" && !excluded.includes(item.path));
|
||||
const versions = tree.map(item => item.path).sort(sortVersions);
|
||||
|
||||
// Create a list of <li> items for versions
|
||||
versions.forEach((version) => {
|
||||
const li = document.createElement("li");
|
||||
li.textContent = version;
|
||||
li.id = version;
|
||||
|
||||
if (window.SYNAPSE_VERSION === version) {
|
||||
li.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = version;
|
||||
dropdown.querySelector('input').value = version;
|
||||
}
|
||||
|
||||
dropdownMenu.appendChild(li);
|
||||
});
|
||||
|
||||
resolve(versions);
|
||||
|
||||
}).catch(ex => {
|
||||
console.error("Failed to fetch version data", ex);
|
||||
reject(ex);
|
||||
})
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom sorting function to sort an array of version strings.
|
||||
*
|
||||
* @param {string} a - The first version string to compare.
|
||||
* @param {string} b - The second version string to compare.
|
||||
* @returns {number} - A negative number if a should come before b, a positive number if b should come before a, or 0 if they are equal.
|
||||
*/
|
||||
function sortVersions(a, b) {
|
||||
// Put 'develop' and 'latest' at the top
|
||||
if (a === 'develop' || a === 'latest') return -1;
|
||||
if (b === 'develop' || b === 'latest') return 1;
|
||||
|
||||
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
|
||||
return versionB.localeCompare(versionA);
|
||||
}
|
||||
|
||||
/**
|
||||
* Change the version in a URL path.
|
||||
*
|
||||
* @param {string} url - The original URL to be modified.
|
||||
* @param {string} newVersion - The new version to replace the existing version in the URL.
|
||||
* @returns {string} The updated URL with the new version.
|
||||
*/
|
||||
function changeVersion(url, newVersion) {
|
||||
const parsedURL = new URL(url);
|
||||
const pathSegments = parsedURL.pathname.split('/');
|
||||
|
||||
// Modify the version
|
||||
pathSegments[2] = newVersion;
|
||||
|
||||
// Reconstruct the URL
|
||||
parsedURL.pathname = pathSegments.join('/');
|
||||
|
||||
return parsedURL.href;
|
||||
}
|
||||
1
docs/website_files/version.js
Normal file
1
docs/website_files/version.js
Normal file
@@ -0,0 +1 @@
|
||||
window.SYNAPSE_VERSION = 'v1.91';
|
||||
62
flake.lock
generated
62
flake.lock
generated
@@ -8,16 +8,16 @@
|
||||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1683102061,
|
||||
"narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=",
|
||||
"lastModified": 1688058187,
|
||||
"narHash": "sha256-ipDcc7qrucpJ0+0eYNlwnE+ISTcq4m03qW+CWUshRXI=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "ff1f29e41756553174d596cafe3a9fa77595100b",
|
||||
"rev": "c8778e3dc30eb9043e218aaa3861d42d4992de77",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "main",
|
||||
"ref": "v0.6.3",
|
||||
"repo": "devenv",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -39,12 +39,15 @@
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"lastModified": 1685518550,
|
||||
"narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -55,7 +58,7 @@
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
@@ -167,27 +170,27 @@
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1673800717,
|
||||
"narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=",
|
||||
"lastModified": 1685801374,
|
||||
"narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f",
|
||||
"rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.11",
|
||||
"ref": "nixos-23.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1682519441,
|
||||
"narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=",
|
||||
"lastModified": 1690535733,
|
||||
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7a32a141db568abde9bc389845949dc2a454dfd3",
|
||||
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -228,11 +231,11 @@
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1678376203,
|
||||
"narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=",
|
||||
"lastModified": 1688056373,
|
||||
"narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"rev": "1a20b9708962096ec2481eeb2ddca29ed747770a",
|
||||
"rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -246,7 +249,7 @@
|
||||
"devenv": "devenv",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"rust-overlay": "rust-overlay",
|
||||
"systems": "systems_2"
|
||||
"systems": "systems_3"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
@@ -255,11 +258,11 @@
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1689302058,
|
||||
"narHash": "sha256-yD74lcHTrw4niXcE9goJLbzsgyce48rQQoy5jK5ZK40=",
|
||||
"lastModified": 1690510705,
|
||||
"narHash": "sha256-6mjs3Gl9/xrseFh9iNcNq1u5yJ/MIoAmjoaG7SXZDIE=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "7b8dbbf4c67ed05a9bf3d9e658c12d4108bc24c8",
|
||||
"rev": "851ae4c128905a62834d53ce7704ebc1ba481bea",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -297,6 +300,21 @@
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
||||
@@ -39,13 +39,13 @@
|
||||
|
||||
{
|
||||
inputs = {
|
||||
# Use the master/unstable branch of nixpkgs. The latest stable, 22.11,
|
||||
# does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest.
|
||||
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
|
||||
# available versions of packages.
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/master";
|
||||
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
|
||||
systems.url = "github:nix-systems/default";
|
||||
# A development environment manager built on Nix. See https://devenv.sh.
|
||||
devenv.url = "github:cachix/devenv/main";
|
||||
devenv.url = "github:cachix/devenv/v0.6.3";
|
||||
# Rust toolchain.
|
||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
};
|
||||
|
||||
7
mypy.ini
7
mypy.ini
@@ -45,6 +45,13 @@ warn_unused_ignores = False
|
||||
disallow_untyped_defs = False
|
||||
disallow_incomplete_defs = False
|
||||
|
||||
[mypy-synapse.util.manhole]
|
||||
# This module imports something from Twisted which has a bad annotation in Twisted trunk,
|
||||
# but is unannotated in Twisted's latest release. We want to type-ignore the problem
|
||||
# in the twisted trunk job, even though it has no effect on normal mypy runs.
|
||||
warn_unused_ignores = False
|
||||
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
;; The `typeshed` project maintains stubs here:
|
||||
|
||||
575
poetry.lock
generated
575
poetry.lock
generated
@@ -226,13 +226,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2022.12.7"
|
||||
version = "2023.7.22"
|
||||
description = "Python package for providing Mozilla's CA Bundle."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"},
|
||||
{file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"},
|
||||
{file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
|
||||
{file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -397,13 +397,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "click"
|
||||
version = "8.1.3"
|
||||
version = "8.1.7"
|
||||
description = "Composable command line interface toolkit"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
|
||||
{file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
|
||||
{file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
|
||||
{file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -460,34 +460,34 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "41.0.2"
|
||||
version = "41.0.3"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-win32.whl", hash = "sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-win_amd64.whl", hash = "sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4"},
|
||||
{file = "cryptography-41.0.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a"},
|
||||
{file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd"},
|
||||
{file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766"},
|
||||
{file = "cryptography-41.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee"},
|
||||
{file = "cryptography-41.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831"},
|
||||
{file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b"},
|
||||
{file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa"},
|
||||
{file = "cryptography-41.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e"},
|
||||
{file = "cryptography-41.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14"},
|
||||
{file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2"},
|
||||
{file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f"},
|
||||
{file = "cryptography-41.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0"},
|
||||
{file = "cryptography-41.0.2.tar.gz", hash = "sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c"},
|
||||
{file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507"},
|
||||
{file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922"},
|
||||
{file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81"},
|
||||
{file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd"},
|
||||
{file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47"},
|
||||
{file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116"},
|
||||
{file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c"},
|
||||
{file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae"},
|
||||
{file = "cryptography-41.0.3-cp37-abi3-win32.whl", hash = "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306"},
|
||||
{file = "cryptography-41.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574"},
|
||||
{file = "cryptography-41.0.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087"},
|
||||
{file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858"},
|
||||
{file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906"},
|
||||
{file = "cryptography-41.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e"},
|
||||
{file = "cryptography-41.0.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd"},
|
||||
{file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207"},
|
||||
{file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84"},
|
||||
{file = "cryptography-41.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7"},
|
||||
{file = "cryptography-41.0.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d"},
|
||||
{file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"},
|
||||
{file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1"},
|
||||
{file = "cryptography-41.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4"},
|
||||
{file = "cryptography-41.0.3.tar.gz", hash = "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -558,13 +558,13 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler",
|
||||
|
||||
[[package]]
|
||||
name = "furo"
|
||||
version = "2023.5.20"
|
||||
version = "2023.7.26"
|
||||
description = "A clean customisable Sphinx documentation theme."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "furo-2023.5.20-py3-none-any.whl", hash = "sha256:594a8436ddfe0c071f3a9e9a209c314a219d8341f3f1af33fdf7c69544fab9e6"},
|
||||
{file = "furo-2023.5.20.tar.gz", hash = "sha256:40e09fa17c6f4b22419d122e933089226dcdb59747b5b6c79363089827dea16f"},
|
||||
{file = "furo-2023.7.26-py3-none-any.whl", hash = "sha256:1c7936929ec57c5ddecc7c85f07fa8b2ce536b5c89137764cca508be90e11efd"},
|
||||
{file = "furo-2023.7.26.tar.gz", hash = "sha256:257f63bab97aa85213a1fa24303837a3c3f30be92901ec732fea74290800f59e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -589,13 +589,13 @@ smmap = ">=3.0.1,<6"
|
||||
|
||||
[[package]]
|
||||
name = "gitpython"
|
||||
version = "3.1.31"
|
||||
version = "3.1.32"
|
||||
description = "GitPython is a Python library used to interact with Git repositories"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"},
|
||||
{file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"},
|
||||
{file = "GitPython-3.1.32-py3-none-any.whl", hash = "sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f"},
|
||||
{file = "GitPython-3.1.32.tar.gz", hash = "sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -726,89 +726,89 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "ijson"
|
||||
version = "3.2.1"
|
||||
version = "3.2.3"
|
||||
description = "Iterative JSON parser with standard Python iterator interfaces"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "ijson-3.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6f827f6961f093e1055a2be0c3137f0e7d667979da455ac9648f72d4a2bb8970"},
|
||||
{file = "ijson-3.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b6e51f4497065cd0d09f5e906cd538a8d22609eab716e3c883769acf147ab1b6"},
|
||||
{file = "ijson-3.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f022686c40bff3e340627a5a0c9212718d529e787ada3b76ba546d47a9ecdbbd"},
|
||||
{file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4105c15a13fa1dc24ebd3bf2e679fa14dcbfcc48bc39138a0fa3f4ddf6cc09b"},
|
||||
{file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:404423e666f185dfb753ddc92705c84dffdc4cc872aaf825bbe0607893cb5b02"},
|
||||
{file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39e71f32830827cf21d0233a814092e5a23668e18f52eca5cac4f670d9df1240"},
|
||||
{file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43af7ed5292caa1452747e2b62485b6c0ece4bcbc5bf6f2758abd547e4124a14"},
|
||||
{file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e805aa6897a11b0f73f1f6bca078981df8960aeeccf527a214f240409c742bab"},
|
||||
{file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5b2df0bd84889e9017a670328fe3e82ec509fd6744c7ac2c99c7ee2300d76afa"},
|
||||
{file = "ijson-3.2.1-cp310-cp310-win32.whl", hash = "sha256:675259c7ea7f51ffaf8cb9e79bf875e28bb09622892943f4f415588fd7ab7bec"},
|
||||
{file = "ijson-3.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:90d4b2eb771a3585c8186820fe50e3282ef62477b865e765a50a8295674abeac"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fc581a61e210bf6013c1fa6536566e51127be1cfbd69539b63d8b813206d2fe0"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75cdf7ad4c00a8f5ac94ff27e3b7c1bf5ac463f125bca2be1744c5bc9600db5c"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:85a2bf4636ace4d92e7c5d857a1c5694f42407c868953cf2927f18127bcd0d58"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fe0cb66e7dd4aa11da5fff60bdf5ee04819a5e6a57acf7ca12c65f7fc009afc"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6f7957ad38cb714378944032f2c2ee9c6531b5b0b38c5ccd08cedbb0ceddd02"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13283d264cca8a63e5bad91e82eec39711e95893e7e8d4a419799a8c5f85203a"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:12c24cde850fe79bc806be0e9fc38b47dd5ac0a223070ccb12e9b695425e2936"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:2ce8eed838e5a0791cb5948117b5453f2b3b3c28d93d06ee2bbf2c198c47881c"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b81c2589f191b0dc741f532be00b4bea617297dd9698431c8053e2d28272d4db"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-win32.whl", hash = "sha256:ba2beac56ac96f728d0f2430e4c667c66819a423d321bb9db9ebdebd803e1b5b"},
|
||||
{file = "ijson-3.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:c71614ed4bbc6a32ff1e42d7ce92a176fb67d658913343792d2c4567aa130817"},
|
||||
{file = "ijson-3.2.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:683fc8b0ea085e75ea34044fdc70649b37367d494f132a2bd1e59d7135054d89"},
|
||||
{file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deeaecec2f4e20e8bec20b0a5cdc34daebe7903f2e700f7dcaef68b5925d35ea"},
|
||||
{file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11923ac3188877f19dbb7051f7345202701cc39bf8e5ac44f8ae536c9eca8c82"},
|
||||
{file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:400deefcdae21e90fc39c1dcfc6ba2df24537e8c65bd57b763ed5256b73ba64d"},
|
||||
{file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:56bc4bad53770710a3a91944fe640fdeb269987a14352b74ebbad2aa55801c00"},
|
||||
{file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f5a179523e085126844c6161aabcd193dbb5747bd01fadb68e92abf048f32ec9"},
|
||||
{file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ee24655986e4415fbb7a0cf51445fff3072ceac0e219f4bbbd5c53535a3c5680"},
|
||||
{file = "ijson-3.2.1-cp36-cp36m-win32.whl", hash = "sha256:4a5c672b0540005c1bb0bba97aa559a87a2e4ee409fc68e2f5ba5b30f009ac99"},
|
||||
{file = "ijson-3.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cfaf1d89b0e122e69c87a15db6d6f44feb9db96d2af7fe88cdc464177a257b5d"},
|
||||
{file = "ijson-3.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1cbd052eb67c1b3611f25974ba967886e89391faaf55afec93808c19f06ca612"},
|
||||
{file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f13ffc491886e5d7bde7d68712d168bce0141b2a918db1164bc8599c0123e293"},
|
||||
{file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc4c4fc6bafc777f8422fe36edb1cbd72a13cb29695893a064c9c95776a4bdf9"},
|
||||
{file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42fcb2bf9748c26f004690b2feb6e13e4875bb7c9d83535f887c21e0a982a7c"},
|
||||
{file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0c92f7bc2f3a947c2ba7f7aa48382c36079f8259c930e81d9164341f9b853c45"},
|
||||
{file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fd497042db562379339660e787bc8679ed3abaa740768d39bc3746e769e7c7a5"},
|
||||
{file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7d61c7cd8ddd75dcef818ff5a111a31b902a6a0e410ee0c2b2ecaa6dac92658a"},
|
||||
{file = "ijson-3.2.1-cp37-cp37m-win32.whl", hash = "sha256:36caf624d263fc40e7e805d759d09ea368d8cf497aecb3241ac2f0a286ad8eca"},
|
||||
{file = "ijson-3.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:32f9ed25ff80942e433119600bca13b86a8f9b8b0966edbc1d91a48ccbdd4d54"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e89bbd747140eac3a3c9e7e5835b90d85c4a02763fc5134861bfc1ea03b66ae7"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d69b4b1d509de36ec42a0e4af30ede39fb754e4039b2928ef7282ebc2125ffdd"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e7feb0771f50deabe6ce85b210fa9e005843d3d3c60fb3315d69e1f9d0d75e0c"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fd8148a363888054ff06eaaa1103f2f98720ab39666084a214e4fedfc13cf64"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:598638dcc5141e9ae269903901877103f5362e0db4443e34721df8f8d34577b4"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e979190b7d0fabca20d6b7224ab1c1aa461ad1ab72ba94f1bb1e5894cd59f342"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bc810eb80b4f486c7957201ba2a53f53ddc9b3233af67e4359e29371bf04883b"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:26e758584611dfe826dd18ffd94dc0d8a062ce56e41674ad3bfa371c7b78c4b5"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:24e9ae5b35b85ea094b6c36495bc856089254aed6a48bada8d7eec5a04f74439"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-win32.whl", hash = "sha256:4b5dc7b5b4b8cb3087d188f37911cd67e26672d33d3571e73440de3f0a86f7e6"},
|
||||
{file = "ijson-3.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:1af94ff40609270bbb3eac47e072582bb578f5023fac8408cccd80fe5892d221"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2dda67affceebc52c8bc5fe72c3a4a1e338e4d4b0497dbac5089c2d3862df214"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bd780303ddfedc8d57cdb9f2d53a8cea2f2f4a6fb857bf8fe5a0c3ab1d4ca901"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4fbab6af1bab88a8e46beda08cf44610eed0adb8d157a1a60b4bb6c3a121c6de"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97a07988a1e0ce2bc8e8a62eb5f25195a3bd58a939ac353cbc6018a548cc08d"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a65671a6826ae723837143914c823ad7bcc0d1a3e38d87c71df897a2556fb48f"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1806372008bbed9ee92db5747e38c047fa1c4ee89cb2dd5daaa57feb46ce50a"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:91e5a8e96f78a59e2520078c227a4fec5bf91c13adeded9e33fb13981cb823c3"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1f820fce8ef093718f2319ff6f1322390664659b783775919dadccb1b470153d"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bca3e8c91a1076a20620dbaa6a2848772b0e8a4055e86d42d3fa39221b53ed1a"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-win32.whl", hash = "sha256:de87f137b7438d43840f4339a37d4e6a58c987f4bb2a70609969f854f8ae20f3"},
|
||||
{file = "ijson-3.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:0caebb8350b47266a58b766ec08e1de441d6d160702c428b5cf7504d93c832c4"},
|
||||
{file = "ijson-3.2.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37389785c1abd27fcc24800fcfa9a6b1022743413e4056507fd32356b623ff33"},
|
||||
{file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b364b82231d51cbeae52468c3b27e8a042e544ab764c8f3975e912cf010603f"},
|
||||
{file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a5999d0ec28a8ec47cf20c736fd4f895dc077bf6441bf237b00b074315a295d"},
|
||||
{file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd481857a39305517fb6f1313d558c2dc4e78c9e9384cc5bc1c3e28f1afbedf"},
|
||||
{file = "ijson-3.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:545f62f12f89350d4d73f2a779cb269198ae578fac080085a1927148b803e602"},
|
||||
{file = "ijson-3.2.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4d5622505d01c2f3d7b9638c1eb8c747eb550936b505225893704289ff28576f"},
|
||||
{file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20293bb36423b129fad3753858ccf7b2ccb5b2c0d3759efe810d0b9d79633a7e"},
|
||||
{file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cd8a4921b852fd2cb5b0c985540c97ff6893139a57fe7121d510ec5d1c0ca44"},
|
||||
{file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc902ff1ae1efed7d526294d7a9dd3df66d29b2cdc05fb5479838fef1327a534"},
|
||||
{file = "ijson-3.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2925a7978d8170146a9cb49a15a982b71fbbf21980bf2e16cd90c528545b7c02"},
|
||||
{file = "ijson-3.2.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c21c6509f6944939399f3630c5dc424d30d71d375f6cd58f9af56158fdf7251c"},
|
||||
{file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5729fc7648bc972d70922d7dad15459cca3a9e5ed0328eb9ae3ffa004066194"},
|
||||
{file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:805a2d5ed5a15d60327bc9347f2d4125ab621fb18071db98b1c598f1ee99e8f1"},
|
||||
{file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d0220a4b6c63f44589e429157174e3f4b8d1e534d5fb82bdb43a7f8dd77ae4b"},
|
||||
{file = "ijson-3.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:271d9b7c083f65c58ff0afd9dbb5d2f3d445f734632aebfef4a261b0a337abdb"},
|
||||
{file = "ijson-3.2.1.tar.gz", hash = "sha256:8574bf19f31fab870488769ad919a80f130825236ac8bde9a733f69c2961d7a7"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0a4ae076bf97b0430e4e16c9cb635a6b773904aec45ed8dcbc9b17211b8569ba"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cfced0a6ec85916eb8c8e22415b7267ae118eaff2a860c42d2cc1261711d0d31"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b9d1141cfd1e6d6643aa0b4876730d0d28371815ce846d2e4e84a2d4f471cf3"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0a27db6454edd6013d40a956d008361aac5bff375a9c04ab11fc8c214250b5"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0d526ccb335c3c13063c273637d8611f32970603dfb182177b232d01f14c23"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:545a30b3659df2a3481593d30d60491d1594bc8005f99600e1bba647bb44cbb5"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9680e37a10fedb3eab24a4a7e749d8a73f26f1a4c901430e7aa81b5da15f7307"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2a80c0bb1053055d1599e44dc1396f713e8b3407000e6390add72d49633ff3bb"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f05ed49f434ce396ddcf99e9fd98245328e99f991283850c309f5e3182211a79"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-win32.whl", hash = "sha256:b4eb2304573c9fdf448d3fa4a4fdcb727b93002b5c5c56c14a5ffbbc39f64ae4"},
|
||||
{file = "ijson-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:923131f5153c70936e8bd2dd9dcfcff43c67a3d1c789e9c96724747423c173eb"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:904f77dd3d87736ff668884fe5197a184748eb0c3e302ded61706501d0327465"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0974444c1f416e19de1e9f567a4560890095e71e81623c509feff642114c1e53"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1a4b8eb69b6d7b4e94170aa991efad75ba156b05f0de2a6cd84f991def12ff9"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d052417fd7ce2221114f8d3b58f05a83c1a2b6b99cafe0b86ac9ed5e2fc889df"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b8064a85ec1b0beda7dd028e887f7112670d574db606f68006c72dd0bb0e0e2"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaac293853f1342a8d2a45ac1f723c860f700860e7743fb97f7b76356df883a8"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6c32c18a934c1dc8917455b0ce478fd7a26c50c364bd52c5a4fb0fc6bb516af7"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:713a919e0220ac44dab12b5fed74f9130f3480e55e90f9d80f58de129ea24f83"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a3a6a2fbbe7550ffe52d151cf76065e6b89cfb3e9d0463e49a7e322a25d0426"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-win32.whl", hash = "sha256:6a4db2f7fb9acfb855c9ae1aae602e4648dd1f88804a0d5cfb78c3639bcf156c"},
|
||||
{file = "ijson-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:ccd6be56335cbb845f3d3021b1766299c056c70c4c9165fb2fbe2d62258bae3f"},
|
||||
{file = "ijson-3.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eeb286639649fb6bed37997a5e30eefcacddac79476d24128348ec890b2a0ccb"},
|
||||
{file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:396338a655fb9af4ac59dd09c189885b51fa0eefc84d35408662031023c110d1"},
|
||||
{file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e0243d166d11a2a47c17c7e885debf3b19ed136be2af1f5d1c34212850236ac"},
|
||||
{file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85afdb3f3a5d0011584d4fa8e6dccc5936be51c27e84cd2882fe904ca3bd04c5"},
|
||||
{file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4fc35d569eff3afa76bfecf533f818ecb9390105be257f3f83c03204661ace70"},
|
||||
{file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:455d7d3b7a6aacfb8ab1ebcaf697eedf5be66e044eac32508fccdc633d995f0e"},
|
||||
{file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:c63f3d57dbbac56cead05b12b81e8e1e259f14ce7f233a8cbe7fa0996733b628"},
|
||||
{file = "ijson-3.2.3-cp36-cp36m-win32.whl", hash = "sha256:a4d7fe3629de3ecb088bff6dfe25f77be3e8261ed53d5e244717e266f8544305"},
|
||||
{file = "ijson-3.2.3-cp36-cp36m-win_amd64.whl", hash = "sha256:96190d59f015b5a2af388a98446e411f58ecc6a93934e036daa75f75d02386a0"},
|
||||
{file = "ijson-3.2.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:35194e0b8a2bda12b4096e2e792efa5d4801a0abb950c48ade351d479cd22ba5"},
|
||||
{file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1053fb5f0b010ee76ca515e6af36b50d26c1728ad46be12f1f147a835341083"},
|
||||
{file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:211124cff9d9d139dd0dfced356f1472860352c055d2481459038b8205d7d742"},
|
||||
{file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92dc4d48e9f6a271292d6079e9fcdce33c83d1acf11e6e12696fb05c5889fe74"},
|
||||
{file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3dcc33ee56f92a77f48776014ddb47af67c33dda361e84371153c4f1ed4434e1"},
|
||||
{file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98c6799925a5d1988da4cd68879b8eeab52c6e029acc45e03abb7921a4715c4b"},
|
||||
{file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4252e48c95cd8ceefc2caade310559ab61c37d82dfa045928ed05328eb5b5f65"},
|
||||
{file = "ijson-3.2.3-cp37-cp37m-win32.whl", hash = "sha256:644f4f03349ff2731fd515afd1c91b9e439e90c9f8c28292251834154edbffca"},
|
||||
{file = "ijson-3.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:ba33c764afa9ecef62801ba7ac0319268a7526f50f7601370d9f8f04e77fc02b"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4b2ec8c2a3f1742cbd5f36b65e192028e541b5fd8c7fd97c1fc0ca6c427c704a"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dc357da4b4ebd8903e77dbcc3ce0555ee29ebe0747c3c7f56adda423df8ec89"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bcc51c84bb220ac330122468fe526a7777faa6464e3b04c15b476761beea424f"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8d54b624629f9903005c58d9321a036c72f5c212701bbb93d1a520ecd15e370"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6ea7c7e3ec44742e867c72fd750c6a1e35b112f88a917615332c4476e718d40"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:916acdc5e504f8b66c3e287ada5d4b39a3275fc1f2013c4b05d1ab9933671a6c"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81815b4184b85ce124bfc4c446d5f5e5e643fc119771c5916f035220ada29974"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b49fd5fe1cd9c1c8caf6c59f82b08117dd6bea2ec45b641594e25948f48f4169"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:86b3c91fdcb8ffb30556c9669930f02b7642de58ca2987845b04f0d7fe46d9a8"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-win32.whl", hash = "sha256:a729b0c8fb935481afe3cf7e0dadd0da3a69cc7f145dbab8502e2f1e01d85a7c"},
|
||||
{file = "ijson-3.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:d34e049992d8a46922f96483e96b32ac4c9cffd01a5c33a928e70a283710cd58"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9c2a12dcdb6fa28f333bf10b3a0f80ec70bc45280d8435be7e19696fab2bc706"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1844c5b57da21466f255a0aeddf89049e730d7f3dfc4d750f0e65c36e6a61a7c"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ec3e5ff2515f1c40ef6a94983158e172f004cd643b9e4b5302017139b6c96e4"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46bafb1b9959872a1f946f8dd9c6f1a30a970fc05b7bfae8579da3f1f988e598"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab4db9fee0138b60e31b3c02fff8a4c28d7b152040553b6a91b60354aebd4b02"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4bc87e69d1997c6a55fff5ee2af878720801ff6ab1fb3b7f94adda050651e37"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e9fd906f0c38e9f0bfd5365e1bed98d649f506721f76bb1a9baa5d7374f26f19"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e84d27d1acb60d9102728d06b9650e5b7e5cb0631bd6e3dfadba8fb6a80d6c2f"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2cc04fc0a22bb945cd179f614845c8b5106c0b3939ee0d84ce67c7a61ac1a936"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-win32.whl", hash = "sha256:e641814793a037175f7ec1b717ebb68f26d89d82cfd66f36e588f32d7e488d5f"},
|
||||
{file = "ijson-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:6bd3e7e91d031f1e8cea7ce53f704ab74e61e505e8072467e092172422728b22"},
|
||||
{file = "ijson-3.2.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:06f9707da06a19b01013f8c65bf67db523662a9b4a4ff027e946e66c261f17f0"},
|
||||
{file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be8495f7c13fa1f622a2c6b64e79ac63965b89caf664cc4e701c335c652d15f2"},
|
||||
{file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7596b42f38c3dcf9d434dddd50f46aeb28e96f891444c2b4b1266304a19a2c09"},
|
||||
{file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbac4e9609a1086bbad075beb2ceec486a3b138604e12d2059a33ce2cba93051"},
|
||||
{file = "ijson-3.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:db2d6341f9cb538253e7fe23311d59252f124f47165221d3c06a7ed667ecd595"},
|
||||
{file = "ijson-3.2.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fa8b98be298efbb2588f883f9953113d8a0023ab39abe77fe734b71b46b1220a"},
|
||||
{file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:674e585361c702fad050ab4c153fd168dc30f5980ef42b64400bc84d194e662d"},
|
||||
{file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd12e42b9cb9c0166559a3ffa276b4f9fc9d5b4c304e5a13668642d34b48b634"},
|
||||
{file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d31e0d771d82def80cd4663a66de277c3b44ba82cd48f630526b52f74663c639"},
|
||||
{file = "ijson-3.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ce4c70c23521179d6da842bb9bc2e36bb9fad1e0187e35423ff0f282890c9ca"},
|
||||
{file = "ijson-3.2.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39f551a6fbeed4433c85269c7c8778e2aaea2501d7ebcb65b38f556030642c17"},
|
||||
{file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b14d322fec0de7af16f3ef920bf282f0dd747200b69e0b9628117f381b7775b"},
|
||||
{file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7851a341429b12d4527ca507097c959659baf5106c7074d15c17c387719ffbcd"},
|
||||
{file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db3bf1b42191b5cc9b6441552fdcb3b583594cb6b19e90d1578b7cbcf80d0fae"},
|
||||
{file = "ijson-3.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6f662dc44362a53af3084d3765bb01cd7b4734d1f484a6095cad4cb0cbfe5374"},
|
||||
{file = "ijson-3.2.3.tar.gz", hash = "sha256:10294e9bf89cb713da05bc4790bdff616610432db561964827074898e174f917"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -824,13 +824,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "immutabledict"
|
||||
version = "2.2.4"
|
||||
version = "3.0.0"
|
||||
description = "Immutable wrapper around dictionaries (a fork of frozendict)"
|
||||
optional = false
|
||||
python-versions = ">=3.7,<4.0"
|
||||
python-versions = ">=3.8,<4.0"
|
||||
files = [
|
||||
{file = "immutabledict-2.2.4-py3-none-any.whl", hash = "sha256:c827715c147d2364522f9a7709cc424c7001015274a3c705250e673605bde64b"},
|
||||
{file = "immutabledict-2.2.4.tar.gz", hash = "sha256:3bedc0741faaa2846f6edf5c29183f993da3abaff6a5961bb70a5659bb9e68ab"},
|
||||
{file = "immutabledict-3.0.0-py3-none-any.whl", hash = "sha256:034bacc6c6872707c4ec0ea9515de6bbe0dcf0fcabd97ae19fd4e4c338f05798"},
|
||||
{file = "immutabledict-3.0.0.tar.gz", hash = "sha256:5a23cd369a6187f76a8c29d7d687980b092538eb9800e58964603f1b973c56fe"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -887,17 +887,17 @@ scripts = ["click (>=6.0)", "twisted (>=16.4.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "isort"
|
||||
version = "5.11.5"
|
||||
version = "5.12.0"
|
||||
description = "A Python utility / library to sort Python imports."
|
||||
optional = false
|
||||
python-versions = ">=3.7.0"
|
||||
python-versions = ">=3.8.0"
|
||||
files = [
|
||||
{file = "isort-5.11.5-py3-none-any.whl", hash = "sha256:ba1d72fb2595a01c7895a5128f9585a5cc4b6d395f1c8d514989b9a7eb2a8746"},
|
||||
{file = "isort-5.11.5.tar.gz", hash = "sha256:6be1f76a507cb2ecf16c7cf14a37e41609ca082330be4e3436a18ef74add55db"},
|
||||
{file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"},
|
||||
{file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
colors = ["colorama (>=0.4.3,<0.5.0)"]
|
||||
colors = ["colorama (>=0.4.3)"]
|
||||
pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"]
|
||||
plugins = ["setuptools"]
|
||||
requirements-deprecated-finder = ["pip-api", "pipreqs"]
|
||||
@@ -973,13 +973,13 @@ i18n = ["Babel (>=2.7)"]
|
||||
|
||||
[[package]]
|
||||
name = "jsonschema"
|
||||
version = "4.18.3"
|
||||
version = "4.19.0"
|
||||
description = "An implementation of JSON Schema validation for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "jsonschema-4.18.3-py3-none-any.whl", hash = "sha256:aab78b34c2de001c6b692232f08c21a97b436fe18e0b817bf0511046924fceef"},
|
||||
{file = "jsonschema-4.18.3.tar.gz", hash = "sha256:64b7104d72efe856bea49ca4af37a14a9eba31b40bb7238179f3803130fd34d9"},
|
||||
{file = "jsonschema-4.19.0-py3-none-any.whl", hash = "sha256:043dc26a3845ff09d20e4420d6012a9c91c9aa8999fa184e7efcfeccb41e32cb"},
|
||||
{file = "jsonschema-4.19.0.tar.gz", hash = "sha256:6e1e7569ac13be8139b2dd2c21a55d350066ee3f80df06c608b398cdc6f30e8f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1610,103 +1610,82 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "phonenumbers"
|
||||
version = "8.13.14"
|
||||
version = "8.13.18"
|
||||
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "phonenumbers-8.13.14-py2.py3-none-any.whl", hash = "sha256:a4b20b6ba7dd402728f5cc8e86e1f29b1a873af45f5381dbee7e3083af497ff6"},
|
||||
{file = "phonenumbers-8.13.14.tar.gz", hash = "sha256:5fa952b4abf9fccdaf1f130d96114a520c48890d4091b50a064e22c0fdc12dec"},
|
||||
{file = "phonenumbers-8.13.18-py2.py3-none-any.whl", hash = "sha256:3d802739a22592e4127139349937753dee9b6a20bdd5d56847cd885bdc766b1f"},
|
||||
{file = "phonenumbers-8.13.18.tar.gz", hash = "sha256:b360c756252805d44b447b5bca6d250cf6bd6c69b6f0f4258f3bfe5ab81bef69"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
version = "9.4.0"
|
||||
version = "10.0.0"
|
||||
description = "Python Imaging Library (Fork)"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"},
|
||||
{file = "Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"},
|
||||
{file = "Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"},
|
||||
{file = "Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"},
|
||||
{file = "Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"},
|
||||
{file = "Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"},
|
||||
{file = "Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"},
|
||||
{file = "Pillow-9.4.0-2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0"},
|
||||
{file = "Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f"},
|
||||
{file = "Pillow-9.4.0-2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c"},
|
||||
{file = "Pillow-9.4.0-2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848"},
|
||||
{file = "Pillow-9.4.0-2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1"},
|
||||
{file = "Pillow-9.4.0-2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33"},
|
||||
{file = "Pillow-9.4.0-2-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"},
|
||||
{file = "Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"},
|
||||
{file = "Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"},
|
||||
{file = "Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"},
|
||||
{file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"},
|
||||
{file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"},
|
||||
{file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"},
|
||||
{file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"},
|
||||
{file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"},
|
||||
{file = "Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"},
|
||||
{file = "Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"},
|
||||
{file = "Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"},
|
||||
{file = "Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"},
|
||||
{file = "Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"},
|
||||
{file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"},
|
||||
{file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"},
|
||||
{file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"},
|
||||
{file = "Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"},
|
||||
{file = "Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"},
|
||||
{file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"},
|
||||
{file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"},
|
||||
{file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"},
|
||||
{file = "Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"},
|
||||
{file = "Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"},
|
||||
{file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"},
|
||||
{file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"},
|
||||
{file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"},
|
||||
{file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"},
|
||||
{file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"},
|
||||
{file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"},
|
||||
{file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"},
|
||||
{file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"},
|
||||
{file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"]
|
||||
docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
|
||||
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
|
||||
|
||||
[[package]]
|
||||
@@ -1902,13 +1881,13 @@ email = ["email-validator (>=1.0.3)"]
|
||||
|
||||
[[package]]
|
||||
name = "pygithub"
|
||||
version = "1.58.2"
|
||||
version = "1.59.1"
|
||||
description = "Use the full Github API v3"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "PyGithub-1.58.2-py3-none-any.whl", hash = "sha256:f435884af617c6debaa76cbc355372d1027445a56fbc39972a3b9ed4968badc8"},
|
||||
{file = "PyGithub-1.58.2.tar.gz", hash = "sha256:1e6b1b7afe31f75151fb81f7ab6b984a7188a852bdb123dbb9ae90023c3ce60f"},
|
||||
{file = "PyGithub-1.59.1-py3-none-any.whl", hash = "sha256:3d87a822e6c868142f0c2c4bf16cce4696b5a7a4d142a7bd160e1bdf75bc54a9"},
|
||||
{file = "PyGithub-1.59.1.tar.gz", hash = "sha256:c44e3a121c15bf9d3a5cc98d94c9a047a5132a9b01d22264627f58ade9ddc217"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1919,13 +1898,13 @@ requests = ">=2.14.0"
|
||||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.14.0"
|
||||
version = "2.15.1"
|
||||
description = "Pygments is a syntax highlighting package written in Python."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "Pygments-2.14.0-py3-none-any.whl", hash = "sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717"},
|
||||
{file = "Pygments-2.14.0.tar.gz", hash = "sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297"},
|
||||
{file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"},
|
||||
{file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -2093,51 +2072,51 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "pyyaml"
|
||||
version = "6.0"
|
||||
version = "6.0.1"
|
||||
description = "YAML parser and emitter for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
|
||||
{file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
|
||||
{file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
|
||||
{file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"},
|
||||
{file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
|
||||
{file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
|
||||
{file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
|
||||
{file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"},
|
||||
{file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"},
|
||||
{file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"},
|
||||
{file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"},
|
||||
{file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"},
|
||||
{file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"},
|
||||
{file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"},
|
||||
{file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
|
||||
{file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
|
||||
{file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
|
||||
{file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"},
|
||||
{file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"},
|
||||
{file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"},
|
||||
{file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"},
|
||||
{file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"},
|
||||
{file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"},
|
||||
{file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"},
|
||||
{file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"},
|
||||
{file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"},
|
||||
{file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"},
|
||||
{file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"},
|
||||
{file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"},
|
||||
{file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"},
|
||||
{file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"},
|
||||
{file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"},
|
||||
{file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"},
|
||||
{file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"},
|
||||
{file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"},
|
||||
{file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"},
|
||||
{file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"},
|
||||
{file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"},
|
||||
{file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
|
||||
{file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
|
||||
{file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
|
||||
{file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
|
||||
{file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
|
||||
{file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
|
||||
{file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
|
||||
{file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
|
||||
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2406,13 +2385,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.26.0"
|
||||
version = "1.29.2"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "sentry-sdk-1.26.0.tar.gz", hash = "sha256:760e4fb6d01c994110507133e08ecd4bdf4d75ee4be77f296a3579796cf73134"},
|
||||
{file = "sentry_sdk-1.26.0-py2.py3-none-any.whl", hash = "sha256:0c9f858337ec3781cf4851972ef42bba8c9828aea116b0dbed8f38c5f9a1896c"},
|
||||
{file = "sentry-sdk-1.29.2.tar.gz", hash = "sha256:a99ee105384788c3f228726a88baf515fe7b5f1d2d0f215a03d194369f158df7"},
|
||||
{file = "sentry_sdk-1.29.2-py2.py3-none-any.whl", hash = "sha256:3e17215d8006612e2df02b0e73115eb8376c37e3f586d8436fa41644e605074d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2448,13 +2427,13 @@ tornado = ["tornado (>=5)"]
|
||||
|
||||
[[package]]
|
||||
name = "service-identity"
|
||||
version = "21.1.0"
|
||||
version = "23.1.0"
|
||||
description = "Service identity verification for pyOpenSSL & cryptography."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
|
||||
{file = "service_identity-21.1.0-py2.py3-none-any.whl", hash = "sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db"},
|
||||
{file = "service_identity-23.1.0-py3-none-any.whl", hash = "sha256:87415a691d52fcad954a500cb81f424d0273f8e7e3ee7d766128f4575080f383"},
|
||||
{file = "service_identity-23.1.0.tar.gz", hash = "sha256:ecb33cd96307755041e978ab14f8b14e13b40f1fbd525a4dc78f46d2b986431d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2462,12 +2441,12 @@ attrs = ">=19.1.0"
|
||||
cryptography = "*"
|
||||
pyasn1 = "*"
|
||||
pyasn1-modules = "*"
|
||||
six = "*"
|
||||
|
||||
[package.extras]
|
||||
dev = ["coverage[toml] (>=5.0.2)", "furo", "idna", "pyOpenSSL", "pytest", "sphinx"]
|
||||
docs = ["furo", "sphinx"]
|
||||
dev = ["pyopenssl", "service-identity[docs,idna,mypy,tests]"]
|
||||
docs = ["furo", "myst-parser", "pyopenssl", "sphinx", "sphinx-notfound-page"]
|
||||
idna = ["idna"]
|
||||
mypy = ["idna", "mypy", "types-pyopenssl"]
|
||||
tests = ["coverage[toml] (>=5.0.2)", "pytest"]
|
||||
|
||||
[[package]]
|
||||
@@ -2942,13 +2921,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "txredisapi"
|
||||
version = "1.4.9"
|
||||
version = "1.4.10"
|
||||
description = "non-blocking redis client for python"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "txredisapi-1.4.9-py3-none-any.whl", hash = "sha256:72e6ad09cc5fffe3bec2e55e5bfb74407bd357565fc212e6003f7e26ef7d8f78"},
|
||||
{file = "txredisapi-1.4.9.tar.gz", hash = "sha256:c9607062d05e4d0b8ef84719eb76a3fe7d5ccd606a2acf024429da51d6e84559"},
|
||||
{file = "txredisapi-1.4.10-py3-none-any.whl", hash = "sha256:0a6ea77f27f8cf092f907654f08302a97b48fa35f24e0ad99dfb74115f018161"},
|
||||
{file = "txredisapi-1.4.10.tar.gz", hash = "sha256:7609a6af6ff4619a3189c0adfb86aeda789afba69eb59fc1e19ac0199e725395"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2957,68 +2936,68 @@ twisted = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-bleach"
|
||||
version = "6.0.0.3"
|
||||
version = "6.0.0.4"
|
||||
description = "Typing stubs for bleach"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-bleach-6.0.0.3.tar.gz", hash = "sha256:8ce7896d4f658c562768674ffcf07492c7730e128018f03edd163ff912bfadee"},
|
||||
{file = "types_bleach-6.0.0.3-py3-none-any.whl", hash = "sha256:d43eaf30a643ca824e16e2dcdb0c87ef9226237e2fa3ac4732a50cb3f32e145f"},
|
||||
{file = "types-bleach-6.0.0.4.tar.gz", hash = "sha256:357b0226f65c4f20ab3b13ca8d78a6b91c78aad256d8ec168d4e90fc3303ebd4"},
|
||||
{file = "types_bleach-6.0.0.4-py3-none-any.whl", hash = "sha256:2b8767eb407c286b7f02803678732e522e04db8d56cbc9f1270bee49627eae92"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-commonmark"
|
||||
version = "0.9.2.3"
|
||||
version = "0.9.2.4"
|
||||
description = "Typing stubs for commonmark"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-commonmark-0.9.2.3.tar.gz", hash = "sha256:42769a2c194fd5b49fd9eedfd4a83cd1d2514c6d0a36f00f5c5ffe0b6a2d2fcf"},
|
||||
{file = "types_commonmark-0.9.2.3-py3-none-any.whl", hash = "sha256:b575156e1b8a292d43acb36f861110b85c4bc7aa53bbfb5ac64addec15d18cfa"},
|
||||
{file = "types-commonmark-0.9.2.4.tar.gz", hash = "sha256:2c6486f65735cf18215cca3e962b17787fa545be279306f79b801f64a5319959"},
|
||||
{file = "types_commonmark-0.9.2.4-py3-none-any.whl", hash = "sha256:d5090fa685c3e3c0ec3a5973ff842000baef6d86f762d52209b3c5e9fbd0b555"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-jsonschema"
|
||||
version = "4.17.0.8"
|
||||
version = "4.17.0.10"
|
||||
description = "Typing stubs for jsonschema"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-jsonschema-4.17.0.8.tar.gz", hash = "sha256:96a56990910f405e62de58862c0bbb3ac29ee6dba6d3d99aa0ba7f874cc547de"},
|
||||
{file = "types_jsonschema-4.17.0.8-py3-none-any.whl", hash = "sha256:f5958eb7b53217dfb5125f0412aeaef226a8a9013eac95816c95b5b523f6796b"},
|
||||
{file = "types-jsonschema-4.17.0.10.tar.gz", hash = "sha256:8e979db34d69bc9f9b3d6e8b89bdbc60b3a41cfce4e1fb87bf191d205c7f5098"},
|
||||
{file = "types_jsonschema-4.17.0.10-py3-none-any.whl", hash = "sha256:3aa2a89afbd9eaa6ce0c15618b36f02692a621433889ce73014656f7d8caf971"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-netaddr"
|
||||
version = "0.8.0.8"
|
||||
version = "0.8.0.9"
|
||||
description = "Typing stubs for netaddr"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-netaddr-0.8.0.8.tar.gz", hash = "sha256:db7e8cd16b1244e7c4541edd0df99d1039fc05fd5387c21840f0b958fc52aabc"},
|
||||
{file = "types_netaddr-0.8.0.8-py3-none-any.whl", hash = "sha256:6741b3824e2ec3f7a74842b394439b71107c7675f8ae42bb2b5e7a8ebfe8cf18"},
|
||||
{file = "types-netaddr-0.8.0.9.tar.gz", hash = "sha256:68900c267fd31627c1721c5c52b32a257657ac2777457dca49b6b096ba2faf74"},
|
||||
{file = "types_netaddr-0.8.0.9-py3-none-any.whl", hash = "sha256:63e871f064cd59473cec1177f372526f0fa3d565050247d5305bdc325be5c3f6"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-opentracing"
|
||||
version = "2.4.10.5"
|
||||
version = "2.4.10.6"
|
||||
description = "Typing stubs for opentracing"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-opentracing-2.4.10.5.tar.gz", hash = "sha256:852d13ab1324832835d50c00cfd58b9267f0e79ec3189e5664c2a90c26880fd4"},
|
||||
{file = "types_opentracing-2.4.10.5-py3-none-any.whl", hash = "sha256:8f12ab4dce3e298a8e6655da9a6d52171e7a275357eae4cec22a1663d94023a7"},
|
||||
{file = "types-opentracing-2.4.10.6.tar.gz", hash = "sha256:87a1bdfce9de5e555e30497663583b9b9c3bb494d029ef9806aa1f137c19e744"},
|
||||
{file = "types_opentracing-2.4.10.6-py3-none-any.whl", hash = "sha256:25914c834db033a4a38fc322df0b5e5e14503b0ac97f78304ae180d721555e97"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-pillow"
|
||||
version = "10.0.0.1"
|
||||
version = "10.0.0.2"
|
||||
description = "Typing stubs for Pillow"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-Pillow-10.0.0.1.tar.gz", hash = "sha256:834a07a04504f8bf37936679bc6a5802945e7644d0727460c0c4d4307967e2a3"},
|
||||
{file = "types_Pillow-10.0.0.1-py3-none-any.whl", hash = "sha256:be576b67418f1cb3b93794cf7946581be1009a33a10085b3c132eb0875a819b4"},
|
||||
{file = "types-Pillow-10.0.0.2.tar.gz", hash = "sha256:fe09380ab22d412ced989a067e9ee4af719fa3a47ba1b53b232b46514a871042"},
|
||||
{file = "types_Pillow-10.0.0.2-py3-none-any.whl", hash = "sha256:29d51a3ce6ef51fabf728a504d33b4836187ff14256b2e86996d55c91ab214b1"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3034,13 +3013,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-pyopenssl"
|
||||
version = "23.2.0.1"
|
||||
version = "23.2.0.2"
|
||||
description = "Typing stubs for pyOpenSSL"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-pyOpenSSL-23.2.0.1.tar.gz", hash = "sha256:beeb5d22704c625a1e4b6dc756355c5b4af0b980138b702a9d9f932acf020903"},
|
||||
{file = "types_pyOpenSSL-23.2.0.1-py3-none-any.whl", hash = "sha256:0568553f104466f1b8e0db3360fbe6770137d02e21a1a45c209bf2b1b03d90d4"},
|
||||
{file = "types-pyOpenSSL-23.2.0.2.tar.gz", hash = "sha256:6a010dac9ecd42b582d7dd2cc3e9e40486b79b3b64bb2fffba1474ff96af906d"},
|
||||
{file = "types_pyOpenSSL-23.2.0.2-py3-none-any.whl", hash = "sha256:19536aa3debfbe25a918cf0d898e9f5fbbe6f3594a429da7914bf331deb1b342"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3059,13 +3038,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-requests"
|
||||
version = "2.31.0.1"
|
||||
version = "2.31.0.2"
|
||||
description = "Typing stubs for requests"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-requests-2.31.0.1.tar.gz", hash = "sha256:3de667cffa123ce698591de0ad7db034a5317457a596eb0b4944e5a9d9e8d1ac"},
|
||||
{file = "types_requests-2.31.0.1-py3-none-any.whl", hash = "sha256:afb06ef8f25ba83d59a1d424bd7a5a939082f94b94e90ab5e6116bd2559deaa3"},
|
||||
{file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"},
|
||||
{file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3073,13 +3052,13 @@ types-urllib3 = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-setuptools"
|
||||
version = "68.0.0.0"
|
||||
version = "68.0.0.3"
|
||||
description = "Typing stubs for setuptools"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-setuptools-68.0.0.0.tar.gz", hash = "sha256:fc958b4123b155ffc069a66d3af5fe6c1f9d0600c35c0c8444b2ab4147112641"},
|
||||
{file = "types_setuptools-68.0.0.0-py3-none-any.whl", hash = "sha256:cc00e09ba8f535362cbe1ea8b8407d15d14b59c57f4190cceaf61a9e57616446"},
|
||||
{file = "types-setuptools-68.0.0.3.tar.gz", hash = "sha256:d57ae6076100b5704b3cc869fdefc671e1baf4c2cd6643f84265dfc0b955bf05"},
|
||||
{file = "types_setuptools-68.0.0.3-py3-none-any.whl", hash = "sha256:fec09e5c18264c5c09351c00be01a34456fb7a88e457abe97401325f84ad9d36"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.88.0"
|
||||
version = "1.91.2"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -367,7 +367,7 @@ furo = ">=2022.12.7,<2024.0.0"
|
||||
# system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"]
|
||||
requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.6.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
|
||||
@@ -13,6 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(test)]
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use synapse::push::{
|
||||
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue,
|
||||
PushRules, SimpleJsonValue,
|
||||
@@ -26,15 +29,15 @@ fn bench_match_exact(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -71,15 +74,15 @@ fn bench_match_word(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -116,15 +119,15 @@ fn bench_match_word_miss(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
@@ -161,15 +164,15 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
(
|
||||
"type".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
|
||||
),
|
||||
(
|
||||
"room_id".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
|
||||
),
|
||||
(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
|
||||
@@ -63,22 +63,6 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
|
||||
}];
|
||||
|
||||
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
// We don't want to notify on edits. Not only can this be confusing in real
|
||||
// time (2 notifications, one message) but it's especially confusing
|
||||
// if a bridge needs to edit a previously backfilled message.
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.com.beeper.suppress_edits"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("content.m\\.relates_to.rel_type"),
|
||||
pattern: Cow::Borrowed("m.replace"),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
|
||||
priority_class: 5,
|
||||
@@ -146,7 +130,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(
|
||||
KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
|
||||
key: Cow::Borrowed("content.m\\.mentions.user_ids"),
|
||||
key: Cow::Borrowed(r"content.m\.mentions.user_ids"),
|
||||
value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
|
||||
}),
|
||||
)]),
|
||||
@@ -167,8 +151,8 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
|
||||
key: Cow::Borrowed("content.m\\.mentions.room"),
|
||||
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
|
||||
key: Cow::Borrowed(r"content.m\.mentions.room"),
|
||||
value: Cow::Owned(SimpleJsonValue::Bool(true)),
|
||||
})),
|
||||
Condition::Known(KnownCondition::SenderNotificationPermission {
|
||||
key: Cow::Borrowed("room"),
|
||||
@@ -241,6 +225,21 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
// We don't want to notify on edits *unless* the edit directly mentions a
|
||||
// user, which is handled above.
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3958.suppress_edits"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventPropertyIs(
|
||||
EventPropertyIsCondition {
|
||||
key: Cow::Borrowed(r"content.m\.relates_to.rel_type"),
|
||||
value: Cow::Owned(SimpleJsonValue::Str(Cow::Borrowed("m.replace"))),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"),
|
||||
priority_class: 5,
|
||||
|
||||
@@ -117,7 +117,7 @@ impl PushRuleEvaluator {
|
||||
msc3931_enabled: bool,
|
||||
) -> Result<Self, Error> {
|
||||
let body = match flattened_keys.get("content.body") {
|
||||
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(),
|
||||
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
|
||||
_ => String::new(),
|
||||
};
|
||||
|
||||
@@ -313,13 +313,15 @@ impl PushRuleEvaluator {
|
||||
};
|
||||
|
||||
let pattern = match &*exact_event_match.value_type {
|
||||
EventMatchPatternType::UserId => user_id,
|
||||
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
|
||||
EventMatchPatternType::UserId => user_id.to_owned(),
|
||||
EventMatchPatternType::UserLocalpart => {
|
||||
get_localpart_from_id(user_id)?.to_owned()
|
||||
}
|
||||
};
|
||||
|
||||
self.match_event_property_contains(
|
||||
exact_event_match.key.clone(),
|
||||
Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())),
|
||||
Cow::Borrowed(&SimpleJsonValue::Str(Cow::Owned(pattern))),
|
||||
)?
|
||||
}
|
||||
KnownCondition::ContainsDisplayName => {
|
||||
@@ -494,7 +496,7 @@ fn push_rule_evaluator() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
|
||||
);
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
@@ -522,7 +524,7 @@ fn test_requires_room_version_supports_condition() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert(
|
||||
"content.body".to_string(),
|
||||
JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
|
||||
JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
|
||||
);
|
||||
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
|
||||
@@ -256,7 +256,7 @@ impl<'de> Deserialize<'de> for Action {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[serde(untagged)]
|
||||
pub enum SimpleJsonValue {
|
||||
Str(String),
|
||||
Str(Cow<'static, str>),
|
||||
Int(i64),
|
||||
Bool(bool),
|
||||
Null,
|
||||
@@ -265,7 +265,7 @@ pub enum SimpleJsonValue {
|
||||
impl<'source> FromPyObject<'source> for SimpleJsonValue {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
if let Ok(s) = <PyString as pyo3::PyTryFrom>::try_from(ob) {
|
||||
Ok(SimpleJsonValue::Str(s.to_string()))
|
||||
Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
|
||||
// A bool *is* an int, ensure we try bool first.
|
||||
} else if let Ok(b) = <PyBool as pyo3::PyTryFrom>::try_from(ob) {
|
||||
Ok(SimpleJsonValue::Bool(b.extract()?))
|
||||
@@ -585,7 +585,7 @@ impl FilteredPushRules {
|
||||
}
|
||||
|
||||
if !self.msc3958_suppress_edits_enabled
|
||||
&& rule.rule_id == "global/override/.com.beeper.suppress_edits"
|
||||
&& rule.rule_id == "global/override/.org.matrix.msc3958.suppress_edits"
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ DISTS = (
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:trixie", # (EOL not specified yet)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
@@ -46,7 +47,7 @@ can be passed on the commandline for debugging.
|
||||
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
|
||||
class Builder(object):
|
||||
class Builder:
|
||||
def __init__(
|
||||
self,
|
||||
redirect_stdout: bool = False,
|
||||
|
||||
@@ -43,7 +43,7 @@ def main(force_colors: bool) -> None:
|
||||
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
|
||||
|
||||
# Get the schema version of the local file to check against current schema on develop
|
||||
with open("synapse/storage/schema/__init__.py", "r") as file:
|
||||
with open("synapse/storage/schema/__init__.py") as file:
|
||||
local_schema = file.read()
|
||||
new_locals: Dict[str, Any] = {}
|
||||
exec(local_schema, new_locals)
|
||||
|
||||
@@ -247,7 +247,7 @@ def main() -> None:
|
||||
|
||||
|
||||
def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
with open(args.config, "r") as fh:
|
||||
with open(args.config) as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
|
||||
if not args.server_name:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -145,7 +145,7 @@ Example usage:
|
||||
|
||||
|
||||
def read_args_from_config(args: argparse.Namespace) -> None:
|
||||
with open(args.config, "r") as fh:
|
||||
with open(args.config) as fh:
|
||||
config = yaml.safe_load(fh)
|
||||
if not args.server_name:
|
||||
args.server_name = config["server_name"]
|
||||
|
||||
@@ -25,7 +25,11 @@ from synapse.util.rust import check_rust_lib_up_to_date
|
||||
from synapse.util.stringutils import strtobool
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 8):
|
||||
#
|
||||
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
|
||||
# if-statement completely.
|
||||
py_version = sys.version_info
|
||||
if py_version < (3, 8):
|
||||
print("Synapse requires Python 3.8 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -78,7 +82,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import synapse.util
|
||||
import synapse.util # noqa: E402
|
||||
|
||||
__version__ = synapse.util.SYNAPSE_VERSION
|
||||
|
||||
|
||||
@@ -123,7 +123,7 @@ BOOLEAN_COLUMNS = {
|
||||
"redactions": ["have_censored"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"rooms": ["is_public", "has_auth_chain_index"],
|
||||
"users": ["shadow_banned", "approved"],
|
||||
"users": ["shadow_banned", "approved", "locked"],
|
||||
"un_partial_stated_event_stream": ["rejection_status_changed"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"per_user_experimental_features": ["enabled"],
|
||||
@@ -761,7 +761,7 @@ class Porter:
|
||||
|
||||
# Step 2. Set up sequences
|
||||
#
|
||||
# We do this before porting the tables so that event if we fail half
|
||||
# We do this before porting the tables so that even if we fail half
|
||||
# way through the postgres DB always have sequences that are greater
|
||||
# than their respective tables. If we don't then creating the
|
||||
# `DataStore` object will fail due to the inconsistency.
|
||||
@@ -769,6 +769,10 @@ class Porter:
|
||||
await self._setup_state_group_id_seq()
|
||||
await self._setup_user_id_seq()
|
||||
await self._setup_events_stream_seqs()
|
||||
await self._setup_sequence(
|
||||
"un_partial_stated_event_stream_sequence",
|
||||
("un_partial_stated_event_stream",),
|
||||
)
|
||||
await self._setup_sequence(
|
||||
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
|
||||
)
|
||||
@@ -779,6 +783,11 @@ class Porter:
|
||||
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
|
||||
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
|
||||
await self._setup_auth_chain_sequence()
|
||||
await self._setup_sequence(
|
||||
"application_services_txn_id_seq",
|
||||
("application_services_txns",),
|
||||
"txn_id",
|
||||
)
|
||||
|
||||
# Step 3. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
@@ -1083,7 +1092,10 @@ class Porter:
|
||||
)
|
||||
|
||||
async def _setup_sequence(
|
||||
self, sequence_name: str, stream_id_tables: Iterable[str]
|
||||
self,
|
||||
sequence_name: str,
|
||||
stream_id_tables: Iterable[str],
|
||||
column_name: str = "stream_id",
|
||||
) -> None:
|
||||
"""Set a sequence to the correct value."""
|
||||
current_stream_ids = []
|
||||
@@ -1093,7 +1105,7 @@ class Porter:
|
||||
await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table=stream_id_table,
|
||||
keyvalues={},
|
||||
retcol="COALESCE(MAX(stream_id), 1)",
|
||||
retcol=f"COALESCE(MAX({column_name}), 1)",
|
||||
allow_none=True,
|
||||
),
|
||||
)
|
||||
@@ -1193,10 +1205,10 @@ class CursesProgress(Progress):
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
|
||||
super(CursesProgress, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
def update(self, table: str, num_done: int) -> None:
|
||||
super(CursesProgress, self).update(table, num_done)
|
||||
super().update(table, num_done)
|
||||
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
@@ -1292,7 +1304,7 @@ class TerminalProgress(Progress):
|
||||
"""Just prints progress to the terminal"""
|
||||
|
||||
def update(self, table: str, num_done: int) -> None:
|
||||
super(TerminalProgress, self).update(table, num_done)
|
||||
super().update(table, num_done)
|
||||
|
||||
data = self.tables[table]
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore [assignment]
|
||||
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
super(MockHomeserver, self).__init__(
|
||||
super().__init__(
|
||||
hostname=config.server.server_name,
|
||||
config=config,
|
||||
reactor=reactor,
|
||||
|
||||
@@ -60,6 +60,7 @@ class Auth(Protocol):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ class InternalAuth(BaseAuth):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Get a registered user's ID.
|
||||
|
||||
@@ -79,7 +80,7 @@ class InternalAuth(BaseAuth):
|
||||
parent_span = active_span()
|
||||
with start_active_span("get_user_by_req"):
|
||||
requester = await self._wrapped_get_user_by_req(
|
||||
request, allow_guest, allow_expired
|
||||
request, allow_guest, allow_expired, allow_locked
|
||||
)
|
||||
|
||||
if parent_span:
|
||||
@@ -107,6 +108,7 @@ class InternalAuth(BaseAuth):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool,
|
||||
allow_expired: bool,
|
||||
allow_locked: bool,
|
||||
) -> Requester:
|
||||
"""Helper for get_user_by_req
|
||||
|
||||
@@ -126,6 +128,17 @@ class InternalAuth(BaseAuth):
|
||||
access_token, allow_expired=allow_expired
|
||||
)
|
||||
|
||||
# Deny the request if the user account is locked.
|
||||
if not allow_locked and await self.store.get_user_locked_status(
|
||||
requester.user.to_string()
|
||||
):
|
||||
raise AuthError(
|
||||
401,
|
||||
"User account has been locked",
|
||||
errcode=Codes.USER_LOCKED,
|
||||
additional_fields={"soft_logout": True},
|
||||
)
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
# This check is only done for regular users, not appservice ones.
|
||||
if not allow_expired:
|
||||
|
||||
@@ -20,6 +20,7 @@ from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret
|
||||
from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign
|
||||
from authlib.oauth2.rfc7662 import IntrospectionToken
|
||||
from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url
|
||||
from prometheus_client import Histogram
|
||||
|
||||
from twisted.web.client import readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
@@ -44,6 +45,13 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
introspection_response_timer = Histogram(
|
||||
"synapse_api_auth_delegated_introspection_response",
|
||||
"Time taken to get a response for an introspection request",
|
||||
["code"],
|
||||
)
|
||||
|
||||
|
||||
# Scope as defined by MSC2967
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
|
||||
SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
|
||||
@@ -99,6 +107,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
assert self._config.client_id, "No client_id provided"
|
||||
assert auth_method is not None, "Invalid client_auth_method provided"
|
||||
|
||||
self._clock = hs.get_clock()
|
||||
self._http_client = hs.get_proxied_http_client()
|
||||
self._hostname = hs.hostname
|
||||
self._admin_token = self._config.admin_token
|
||||
@@ -163,14 +172,26 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
# Do the actual request
|
||||
# We're not using the SimpleHttpClient util methods as we don't want to
|
||||
# check the HTTP status code, and we do the body encoding ourselves.
|
||||
response = await self._http_client.request(
|
||||
method="POST",
|
||||
uri=uri,
|
||||
data=body.encode("utf-8"),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
resp_body = await make_deferred_yieldable(readBody(response))
|
||||
start_time = self._clock.time()
|
||||
try:
|
||||
response = await self._http_client.request(
|
||||
method="POST",
|
||||
uri=uri,
|
||||
data=body.encode("utf-8"),
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
resp_body = await make_deferred_yieldable(readBody(response))
|
||||
except Exception:
|
||||
end_time = self._clock.time()
|
||||
introspection_response_timer.labels("ERR").observe(end_time - start_time)
|
||||
raise
|
||||
|
||||
end_time = self._clock.time()
|
||||
introspection_response_timer.labels(response.code).observe(
|
||||
end_time - start_time
|
||||
)
|
||||
|
||||
if response.code < 200 or response.code >= 300:
|
||||
raise HttpResponseException(
|
||||
@@ -196,6 +217,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
||||
request: SynapseRequest,
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
|
||||
@@ -18,8 +18,7 @@
|
||||
"""Contains constants from the specification."""
|
||||
|
||||
import enum
|
||||
|
||||
from typing_extensions import Final
|
||||
from typing import Final
|
||||
|
||||
# the max size of a (canonical-json-encoded) event
|
||||
MAX_PDU_SIZE = 65536
|
||||
|
||||
@@ -80,6 +80,8 @@ class Codes(str, Enum):
|
||||
WEAK_PASSWORD = "M_WEAK_PASSWORD"
|
||||
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
|
||||
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
||||
# USER_LOCKED = "M_USER_LOCKED"
|
||||
USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED"
|
||||
|
||||
# Part of MSC3848
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3848
|
||||
|
||||
@@ -91,6 +91,7 @@ from synapse.storage.databases.main.state import StateGroupWorkerStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.storage.databases.main.tags import TagsWorkerStore
|
||||
from synapse.storage.databases.main.task_scheduler import TaskSchedulerWorkerStore
|
||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.databases.main.user_directory import UserDirectoryStore
|
||||
@@ -144,6 +145,7 @@ class GenericWorkerStore(
|
||||
TransactionWorkerStore,
|
||||
LockStore,
|
||||
SessionStore,
|
||||
TaskSchedulerWorkerStore,
|
||||
):
|
||||
# Properties that multiple storage classes define. Tell mypy what the
|
||||
# expected type is.
|
||||
|
||||
@@ -16,9 +16,6 @@ import logging
|
||||
import urllib.parse
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -27,10 +24,11 @@ from typing import (
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import Concatenate, ParamSpec, TypeGuard
|
||||
from typing_extensions import ParamSpec, TypeGuard
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException, HttpResponseException
|
||||
@@ -80,9 +78,7 @@ sent_todevice_counter = Counter(
|
||||
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/v1"
|
||||
APP_SERVICE_UNSTABLE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
@@ -123,52 +119,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
self.config = hs.config.appservice
|
||||
|
||||
self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
|
||||
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
)
|
||||
|
||||
async def _send_with_fallbacks(
|
||||
self,
|
||||
service: "ApplicationService",
|
||||
prefixes: List[str],
|
||||
path: str,
|
||||
func: Callable[Concatenate[str, P], Awaitable[R]],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> R:
|
||||
"""
|
||||
Attempt to call an application service with multiple paths, falling back
|
||||
until one succeeds.
|
||||
|
||||
Args:
|
||||
service: The appliacation service, this provides the base URL.
|
||||
prefixes: A last of paths to try in order for the requests.
|
||||
path: A suffix to append to each prefix.
|
||||
func: The function to call, the first argument will be the full
|
||||
endpoint to fetch. Other arguments are provided by args/kwargs.
|
||||
|
||||
Returns:
|
||||
The return value of func.
|
||||
"""
|
||||
for i, prefix in enumerate(prefixes, start=1):
|
||||
uri = f"{service.url}{prefix}{path}"
|
||||
try:
|
||||
return await func(uri, *args, **kwargs)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised path,
|
||||
# fallback to next path (if one exists). Otherwise, consider it
|
||||
# a legitimate error and raise.
|
||||
if i < len(prefixes) and is_unknown_endpoint(e):
|
||||
continue
|
||||
raise
|
||||
except Exception:
|
||||
# Unexpected exceptions get sent to the caller.
|
||||
raise
|
||||
|
||||
# The function should always exit via the return or raise above this.
|
||||
raise RuntimeError("Unexpected fallback behaviour. This should never be seen.")
|
||||
|
||||
async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
|
||||
if service.url is None:
|
||||
return False
|
||||
@@ -177,12 +133,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/users/{urllib.parse.quote(user_id)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/users/{urllib.parse.quote(user_id)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
@@ -203,12 +159,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/rooms/{urllib.parse.quote(alias)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/rooms/{urllib.parse.quote(alias)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
@@ -241,15 +197,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
assert service.hs_token is not None
|
||||
|
||||
try:
|
||||
args: Mapping[Any, Any] = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
args: Mapping[bytes, Union[List[bytes], str]] = fields
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
@@ -285,12 +240,12 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
try:
|
||||
info = await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX],
|
||||
f"/thirdparty/protocol/{urllib.parse.quote(protocol)}",
|
||||
self.get_json,
|
||||
{"access_token": service.hs_token},
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
info = await self.get_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/protocol/{urllib.parse.quote(protocol)}",
|
||||
args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
@@ -401,13 +356,14 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
}
|
||||
|
||||
try:
|
||||
await self._send_with_fallbacks(
|
||||
service,
|
||||
[APP_SERVICE_PREFIX, ""],
|
||||
f"/transactions/{urllib.parse.quote(str(txn_id))}",
|
||||
self.put_json,
|
||||
args = None
|
||||
if self.config.use_appservice_legacy_authorization:
|
||||
args = {"access_token": service.hs_token}
|
||||
|
||||
await self.put_json(
|
||||
f"{service.url}{APP_SERVICE_PREFIX}/transactions/{urllib.parse.quote(str(txn_id))}",
|
||||
json_body=body,
|
||||
args={"access_token": service.hs_token},
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
|
||||
@@ -43,6 +43,14 @@ class AppServiceConfig(Config):
|
||||
)
|
||||
|
||||
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
|
||||
self.use_appservice_legacy_authorization = config.get(
|
||||
"use_appservice_legacy_authorization", False
|
||||
)
|
||||
if self.use_appservice_legacy_authorization:
|
||||
logger.warning(
|
||||
"The use of appservice legacy authorization via query params is deprecated"
|
||||
" and should be considered insecure."
|
||||
)
|
||||
|
||||
|
||||
def load_appservices(
|
||||
|
||||
@@ -47,6 +47,10 @@ class CasConfig(Config):
|
||||
required_attributes
|
||||
)
|
||||
|
||||
self.idp_name = cas_config.get("idp_name", "CAS")
|
||||
self.idp_icon = cas_config.get("idp_icon")
|
||||
self.idp_brand = cas_config.get("idp_brand")
|
||||
|
||||
else:
|
||||
self.cas_server_url = None
|
||||
self.cas_service_url = None
|
||||
|
||||
@@ -173,6 +173,13 @@ class MSC3861:
|
||||
("enable_registration",),
|
||||
)
|
||||
|
||||
# We only need to test the user consent version, as if it must be set if the user_consent section was present in the config
|
||||
if root.consent.user_consent_version is not None:
|
||||
raise ConfigError(
|
||||
"User consent cannot be enabled when OAuth delegation is enabled",
|
||||
("user_consent",),
|
||||
)
|
||||
|
||||
if (
|
||||
root.oidc.oidc_enabled
|
||||
or root.saml2.saml2_enabled
|
||||
@@ -216,10 +223,10 @@ class MSC3861:
|
||||
("session_lifetime",),
|
||||
)
|
||||
|
||||
if not root.experimental.msc3970_enabled:
|
||||
if root.registration.enable_3pid_changes:
|
||||
raise ConfigError(
|
||||
"experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled",
|
||||
("experimental_features", "msc3970_enabled"),
|
||||
"enable_3pid_changes cannot be enabled when OAuth delegation is enabled",
|
||||
("enable_3pid_changes",),
|
||||
)
|
||||
|
||||
|
||||
@@ -247,6 +254,27 @@ class ExperimentalConfig(Config):
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
|
||||
|
||||
# MSC2697 (device dehydration)
|
||||
# Enabled by default since this option was added after adding the feature.
|
||||
# It is not recommended that both MSC2697 and MSC3814 both be enabled at
|
||||
# once.
|
||||
self.msc2697_enabled: bool = experimental.get("msc2697_enabled", True)
|
||||
|
||||
# MSC3814 (dehydrated devices with SSSS)
|
||||
# This is an alternative method to achieve the same goals as MSC2697.
|
||||
# It is not recommended that both MSC2697 and MSC3814 both be enabled at
|
||||
# once.
|
||||
self.msc3814_enabled: bool = experimental.get("msc3814_enabled", False)
|
||||
|
||||
if self.msc2697_enabled and self.msc3814_enabled:
|
||||
raise ConfigError(
|
||||
"MSC2697 and MSC3814 should not both be enabled.",
|
||||
(
|
||||
"experimental_features",
|
||||
"msc3814_enabled",
|
||||
),
|
||||
)
|
||||
|
||||
# MSC3244 (room version capabilities)
|
||||
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True)
|
||||
|
||||
@@ -376,9 +404,6 @@ class ExperimentalConfig(Config):
|
||||
"Invalid MSC3861 configuration", ("experimental", "msc3861")
|
||||
) from exc
|
||||
|
||||
# MSC3970: Scope transaction IDs to devices
|
||||
self.msc3970_enabled = experimental.get("msc3970_enabled", self.msc3861.enabled)
|
||||
|
||||
# Check that none of the other config options conflict with MSC3861 when enabled
|
||||
self.msc3861.check_config_conflicts(self.root)
|
||||
|
||||
|
||||
@@ -65,5 +65,23 @@ class FederationConfig(Config):
|
||||
self.max_long_retries = federation_config.get("max_long_retries", 10)
|
||||
self.max_short_retries = federation_config.get("max_short_retries", 3)
|
||||
|
||||
# Allow for the configuration of the backoff algorithm used
|
||||
# when trying to reach an unavailable destination.
|
||||
# Unlike previous configuration those values applies across
|
||||
# multiple requests and the state of the backoff is stored on DB.
|
||||
self.destination_min_retry_interval_ms = Config.parse_duration(
|
||||
federation_config.get("destination_min_retry_interval", "10m")
|
||||
)
|
||||
self.destination_retry_multiplier = federation_config.get(
|
||||
"destination_retry_multiplier", 2
|
||||
)
|
||||
self.destination_max_retry_interval_ms = min(
|
||||
Config.parse_duration(
|
||||
federation_config.get("destination_max_retry_interval", "7d")
|
||||
),
|
||||
# Set a hard-limit to not overflow the database column.
|
||||
2**62,
|
||||
)
|
||||
|
||||
|
||||
_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}
|
||||
|
||||
@@ -280,6 +280,20 @@ def _parse_oidc_config_dict(
|
||||
for x in oidc_config.get("attribute_requirements", [])
|
||||
]
|
||||
|
||||
# Read from either `client_secret_path` or `client_secret`. If both exist, error.
|
||||
client_secret = oidc_config.get("client_secret")
|
||||
client_secret_path = oidc_config.get("client_secret_path")
|
||||
if client_secret_path is not None:
|
||||
if client_secret is None:
|
||||
client_secret = read_file(
|
||||
client_secret_path, config_path + ("client_secret_path",)
|
||||
).rstrip("\n")
|
||||
else:
|
||||
raise ConfigError(
|
||||
"Cannot specify both client_secret and client_secret_path",
|
||||
config_path + ("client_secret",),
|
||||
)
|
||||
|
||||
return OidcProviderConfig(
|
||||
idp_id=idp_id,
|
||||
idp_name=oidc_config.get("idp_name", "OIDC"),
|
||||
@@ -288,7 +302,7 @@ def _parse_oidc_config_dict(
|
||||
discover=oidc_config.get("discover", True),
|
||||
issuer=oidc_config["issuer"],
|
||||
client_id=oidc_config["client_id"],
|
||||
client_secret=oidc_config.get("client_secret"),
|
||||
client_secret=client_secret,
|
||||
client_secret_jwt_key=client_secret_jwt_key,
|
||||
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
|
||||
pkce_method=oidc_config.get("pkce_method", "auto"),
|
||||
|
||||
@@ -133,7 +133,16 @@ class RegistrationConfig(Config):
|
||||
|
||||
self.enable_set_displayname = config.get("enable_set_displayname", True)
|
||||
self.enable_set_avatar_url = config.get("enable_set_avatar_url", True)
|
||||
self.enable_3pid_changes = config.get("enable_3pid_changes", True)
|
||||
|
||||
# The default value of enable_3pid_changes is True, unless msc3861 is enabled.
|
||||
msc3861_enabled = (
|
||||
(config.get("experimental_features") or {})
|
||||
.get("msc3861", {})
|
||||
.get("enabled", False)
|
||||
)
|
||||
self.enable_3pid_changes = config.get(
|
||||
"enable_3pid_changes", not msc3861_enabled
|
||||
)
|
||||
|
||||
self.disable_msisdn_registration = config.get(
|
||||
"disable_msisdn_registration", False
|
||||
|
||||
@@ -89,8 +89,14 @@ class SAML2Config(Config):
|
||||
"grandfathered_mxid_source_attribute", "uid"
|
||||
)
|
||||
|
||||
# refers to a SAML IdP entity ID
|
||||
self.saml2_idp_entityid = saml2_config.get("idp_entityid", None)
|
||||
|
||||
# IdP properties for Matrix clients
|
||||
self.idp_name = saml2_config.get("idp_name", "SAML")
|
||||
self.idp_icon = saml2_config.get("idp_icon")
|
||||
self.idp_brand = saml2_config.get("idp_brand")
|
||||
|
||||
# user_mapping_provider may be None if the key is present but has no value
|
||||
ump_dict = saml2_config.get("user_mapping_provider") or {}
|
||||
|
||||
|
||||
@@ -35,3 +35,4 @@ class UserDirectoryConfig(Config):
|
||||
self.user_directory_search_prefer_local_users = user_directory_config.get(
|
||||
"prefer_local_users", False
|
||||
)
|
||||
self.show_locked_users = user_directory_config.get("show_locked_users", False)
|
||||
|
||||
@@ -186,9 +186,6 @@ class EventContext(UnpersistedEventContextBase):
|
||||
),
|
||||
"app_service_id": self.app_service.id if self.app_service else None,
|
||||
"partial_state": self.partial_state,
|
||||
# add dummy delta_ids and prev_group for backwards compatibility
|
||||
"delta_ids": None,
|
||||
"prev_group": None,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@@ -203,13 +200,6 @@ class EventContext(UnpersistedEventContextBase):
|
||||
Returns:
|
||||
The event context.
|
||||
"""
|
||||
# workaround for backwards/forwards compatibility: if the input doesn't have a value
|
||||
# for "state_group_deltas" just assign an empty dict
|
||||
state_group_deltas = input.get("state_group_deltas", None)
|
||||
if state_group_deltas:
|
||||
state_group_deltas = _decode_state_group_delta(state_group_deltas)
|
||||
else:
|
||||
state_group_deltas = {}
|
||||
|
||||
context = EventContext(
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
@@ -217,7 +207,7 @@ class EventContext(UnpersistedEventContextBase):
|
||||
storage=storage,
|
||||
state_group=input["state_group"],
|
||||
state_group_before_event=input["state_group_before_event"],
|
||||
state_group_deltas=state_group_deltas,
|
||||
state_group_deltas=_decode_state_group_delta(input["state_group_deltas"]),
|
||||
state_delta_due_to_event=_decode_state_dict(
|
||||
input["state_delta_due_to_event"]
|
||||
),
|
||||
|
||||
@@ -136,11 +136,13 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
||||
]
|
||||
|
||||
elif event_type == EventTypes.Create:
|
||||
# MSC2176 rules state that create events cannot be redacted.
|
||||
if room_version.updated_redaction_rules:
|
||||
return event_dict
|
||||
# MSC2176 rules state that create events cannot have their `content` redacted.
|
||||
new_content = event_dict["content"]
|
||||
elif not room_version.implicit_room_creator:
|
||||
# Some room versions give meaning to `creator`
|
||||
add_fields("creator")
|
||||
|
||||
add_fields("creator")
|
||||
elif event_type == EventTypes.JoinRules:
|
||||
add_fields("join_rule")
|
||||
if room_version.restricted_join_rule:
|
||||
@@ -392,7 +394,6 @@ def serialize_event(
|
||||
time_now_ms: int,
|
||||
*,
|
||||
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
|
||||
msc3970_enabled: bool = False,
|
||||
) -> JsonDict:
|
||||
"""Serialize event for clients
|
||||
|
||||
@@ -400,8 +401,6 @@ def serialize_event(
|
||||
e
|
||||
time_now_ms
|
||||
config: Event serialization config
|
||||
msc3970_enabled: Whether MSC3970 is enabled. It changes whether we should
|
||||
include the `transaction_id` in the event's `unsigned` section.
|
||||
|
||||
Returns:
|
||||
The serialized event dictionary.
|
||||
@@ -427,38 +426,46 @@ def serialize_event(
|
||||
e.unsigned["redacted_because"],
|
||||
time_now_ms,
|
||||
config=config,
|
||||
msc3970_enabled=msc3970_enabled,
|
||||
)
|
||||
|
||||
# If we have a txn_id saved in the internal_metadata, we should include it in the
|
||||
# unsigned section of the event if it was sent by the same session as the one
|
||||
# requesting the event.
|
||||
txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None)
|
||||
if txn_id is not None and config.requester is not None:
|
||||
# For the MSC3970 rules to be applied, we *need* to have the device ID in the
|
||||
# event internal metadata. Since we were not recording them before, if it hasn't
|
||||
# been recorded, we fallback to the old behaviour.
|
||||
if (
|
||||
txn_id is not None
|
||||
and config.requester is not None
|
||||
and config.requester.user.to_string() == e.sender
|
||||
):
|
||||
# Some events do not have the device ID stored in the internal metadata,
|
||||
# this includes old events as well as those created by appservice, guests,
|
||||
# or with tokens minted with the admin API. For those events, fallback
|
||||
# to using the access token instead.
|
||||
event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None)
|
||||
if msc3970_enabled and event_device_id is not None:
|
||||
if event_device_id is not None:
|
||||
if event_device_id == config.requester.device_id:
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
else:
|
||||
# The pre-MSC3970 behaviour is to only include the transaction ID if the
|
||||
# event was sent from the same access token. For regular users, we can use
|
||||
# the access token ID to determine this. For guests, we can't, but since
|
||||
# each guest only has one access token, we can just check that the event was
|
||||
# sent by the same user as the one requesting the event.
|
||||
# Fallback behaviour: only include the transaction ID if the event
|
||||
# was sent from the same access token.
|
||||
#
|
||||
# For regular users, the access token ID can be used to determine this.
|
||||
# This includes access tokens minted with the admin API.
|
||||
#
|
||||
# For guests and appservice users, we can't check the access token ID
|
||||
# so assume it is the same session.
|
||||
event_token_id: Optional[int] = getattr(
|
||||
e.internal_metadata, "token_id", None
|
||||
)
|
||||
if config.requester.user.to_string() == e.sender and (
|
||||
if (
|
||||
(
|
||||
event_token_id is not None
|
||||
and config.requester.access_token_id is not None
|
||||
and event_token_id == config.requester.access_token_id
|
||||
)
|
||||
or config.requester.is_guest
|
||||
or config.requester.app_service
|
||||
):
|
||||
d["unsigned"]["transaction_id"] = txn_id
|
||||
|
||||
@@ -473,14 +480,16 @@ def serialize_event(
|
||||
if config.as_client_event:
|
||||
d = config.event_format(d)
|
||||
|
||||
# If the event is a redaction, copy the redacts field from the content to
|
||||
# top-level for backwards compatibility.
|
||||
if (
|
||||
e.type == EventTypes.Redaction
|
||||
and e.room_version.updated_redaction_rules
|
||||
and e.redacts is not None
|
||||
):
|
||||
d["redacts"] = e.redacts
|
||||
# If the event is a redaction, the field with the redacted event ID appears
|
||||
# in a different location depending on the room version. e.redacts handles
|
||||
# fetching from the proper location; copy it to the other location for forwards-
|
||||
# and backwards-compatibility with clients.
|
||||
if e.type == EventTypes.Redaction and e.redacts is not None:
|
||||
if e.room_version.updated_redaction_rules:
|
||||
d["redacts"] = e.redacts
|
||||
else:
|
||||
d["content"] = dict(d["content"])
|
||||
d["content"]["redacts"] = e.redacts
|
||||
|
||||
only_event_fields = config.only_event_fields
|
||||
if only_event_fields:
|
||||
@@ -500,9 +509,6 @@ class EventClientSerializer:
|
||||
clients.
|
||||
"""
|
||||
|
||||
def __init__(self, *, msc3970_enabled: bool = False):
|
||||
self._msc3970_enabled = msc3970_enabled
|
||||
|
||||
def serialize_event(
|
||||
self,
|
||||
event: Union[JsonDict, EventBase],
|
||||
@@ -527,9 +533,7 @@ class EventClientSerializer:
|
||||
if not isinstance(event, EventBase):
|
||||
return event
|
||||
|
||||
serialized_event = serialize_event(
|
||||
event, time_now, config=config, msc3970_enabled=self._msc3970_enabled
|
||||
)
|
||||
serialized_event = serialize_event(event, time_now, config=config)
|
||||
|
||||
# Check if there are any bundled aggregations to include with the event.
|
||||
if bundle_aggregations:
|
||||
|
||||
@@ -63,6 +63,7 @@ from synapse.federation.federation_base import (
|
||||
)
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
@@ -137,6 +138,7 @@ class FederationServer(FederationBase):
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self._room_member_handler = hs.get_room_member_handler()
|
||||
self._e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
self._state_storage_controller = hs.get_storage_controllers().state
|
||||
|
||||
@@ -1236,9 +1238,18 @@ class FederationServer(FederationBase):
|
||||
logger.info("handling received PDU in room %s: %s", room_id, event)
|
||||
try:
|
||||
with nested_logging_context(event.event_id):
|
||||
await self._federation_event_handler.on_receive_pdu(
|
||||
origin, event
|
||||
)
|
||||
# We're taking out a lock within a lock, which could
|
||||
# lead to deadlocks if we're not careful. However, it is
|
||||
# safe on this occasion as we only ever take a write
|
||||
# lock when deleting a room, which we would never do
|
||||
# while holding the `_INBOUND_EVENT_HANDLING_LOCK_NAME`
|
||||
# lock.
|
||||
async with self._worker_lock_handler.acquire_read_write_lock(
|
||||
NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
await self._federation_event_handler.on_receive_pdu(
|
||||
origin, event
|
||||
)
|
||||
except FederationError as e:
|
||||
# XXX: Ideally we'd inform the remote we failed to process
|
||||
# the event, but we can't return an error in the transaction
|
||||
|
||||
@@ -67,6 +67,7 @@ class AdminHandler:
|
||||
"name",
|
||||
"admin",
|
||||
"deactivated",
|
||||
"locked",
|
||||
"shadow_banned",
|
||||
"creation_ts",
|
||||
"appservice_id",
|
||||
|
||||
@@ -76,12 +76,13 @@ class CasHandler:
|
||||
self.idp_id = "cas"
|
||||
|
||||
# user-facing name of this auth provider
|
||||
self.idp_name = "CAS"
|
||||
self.idp_name = hs.config.cas.idp_name
|
||||
|
||||
# we do not currently support brands/icons for CAS auth, but this is required by
|
||||
# the SsoIdentityProvider protocol type.
|
||||
self.idp_icon = None
|
||||
self.idp_brand = None
|
||||
# MXC URI for icon for this auth provider
|
||||
self.idp_icon = hs.config.cas.idp_icon
|
||||
|
||||
# optional brand identifier for this auth provider
|
||||
self.idp_brand = hs.config.cas.idp_brand
|
||||
|
||||
self._sso_handler = hs.get_sso_handler()
|
||||
|
||||
|
||||
@@ -385,6 +385,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
self._account_data_handler = hs.get_account_data_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
self.db_pool = hs.get_datastores().main.db_pool
|
||||
|
||||
self.device_list_updater = DeviceListUpdater(hs, self)
|
||||
|
||||
@@ -653,29 +654,38 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
async def store_dehydrated_device(
|
||||
self,
|
||||
user_id: str,
|
||||
device_id: Optional[str],
|
||||
device_data: JsonDict,
|
||||
initial_device_display_name: Optional[str] = None,
|
||||
keys_for_device: Optional[JsonDict] = None,
|
||||
) -> str:
|
||||
"""Store a dehydrated device for a user. If the user had a previous
|
||||
dehydrated device, it is removed.
|
||||
"""Store a dehydrated device for a user, optionally storing the keys associated with
|
||||
it as well. If the user had a previous dehydrated device, it is removed.
|
||||
|
||||
Args:
|
||||
user_id: the user that we are storing the device for
|
||||
device_id: device id supplied by client
|
||||
device_data: the dehydrated device information
|
||||
initial_device_display_name: The display name to use for the device
|
||||
keys_for_device: keys for the dehydrated device
|
||||
Returns:
|
||||
device id of the dehydrated device
|
||||
"""
|
||||
device_id = await self.check_device_registered(
|
||||
user_id,
|
||||
None,
|
||||
device_id,
|
||||
initial_device_display_name,
|
||||
)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
old_device_id = await self.store.store_dehydrated_device(
|
||||
user_id, device_id, device_data
|
||||
user_id, device_id, device_data, time_now, keys_for_device
|
||||
)
|
||||
|
||||
if old_device_id is not None:
|
||||
await self.delete_devices(user_id, [old_device_id])
|
||||
|
||||
return device_id
|
||||
|
||||
async def rehydrate_device(
|
||||
@@ -720,6 +730,22 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
return {"success": True}
|
||||
|
||||
async def delete_dehydrated_device(self, user_id: str, device_id: str) -> None:
|
||||
"""
|
||||
Delete a stored dehydrated device.
|
||||
|
||||
Args:
|
||||
user_id: the user_id to delete the device from
|
||||
device_id: id of the dehydrated device to delete
|
||||
"""
|
||||
success = await self.store.remove_dehydrated_device(user_id, device_id)
|
||||
|
||||
if not success:
|
||||
raise errors.NotFoundError()
|
||||
|
||||
await self.delete_devices(user_id, [device_id])
|
||||
await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id)
|
||||
|
||||
@wrap_as_background_process("_handle_new_device_update_async")
|
||||
async def _handle_new_device_update_async(self) -> None:
|
||||
"""Called when we have a new local device list update that we need to
|
||||
@@ -1124,7 +1150,14 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
||||
)
|
||||
|
||||
if resync:
|
||||
await self.multi_user_device_resync([user_id])
|
||||
# We mark as stale up front in case we get restarted.
|
||||
await self.store.mark_remote_users_device_caches_as_stale([user_id])
|
||||
run_as_background_process(
|
||||
"_maybe_retry_device_resync",
|
||||
self.multi_user_device_resync,
|
||||
[user_id],
|
||||
False,
|
||||
)
|
||||
else:
|
||||
# Simply update the single device, since we know that is the only
|
||||
# change (because of the single prev_id matching the current cache)
|
||||
|
||||
@@ -13,10 +13,11 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional
|
||||
|
||||
from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.logging.opentracing import (
|
||||
@@ -48,6 +49,9 @@ class DeviceMessageHandler:
|
||||
self.store = hs.get_datastores().main
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine = hs.is_mine
|
||||
if hs.config.experimental.msc3814_enabled:
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
|
||||
# We only need to poke the federation sender explicitly if its on the
|
||||
# same instance. Other federation sender instances will get notified by
|
||||
@@ -303,3 +307,90 @@ class DeviceMessageHandler:
|
||||
# Enqueue a new federation transaction to send the new
|
||||
# device messages to each remote destination.
|
||||
self.federation_sender.send_device_messages(destination)
|
||||
|
||||
async def get_events_for_dehydrated_device(
|
||||
self,
|
||||
requester: Requester,
|
||||
device_id: str,
|
||||
since_token: Optional[str],
|
||||
limit: int,
|
||||
) -> JsonDict:
|
||||
"""Fetches up to `limit` events sent to `device_id` starting from `since_token`
|
||||
and returns the new since token. If there are no more messages, returns an empty
|
||||
array.
|
||||
|
||||
Args:
|
||||
requester: the user requesting the messages
|
||||
device_id: ID of the dehydrated device
|
||||
since_token: stream id to start from when fetching messages
|
||||
limit: the number of messages to fetch
|
||||
Returns:
|
||||
A dict containing the to-device messages, as well as a token that the client
|
||||
can provide in the next call to fetch the next batch of messages
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# only allow fetching messages for the dehydrated device id currently associated
|
||||
# with the user
|
||||
dehydrated_device = await self.device_handler.get_dehydrated_device(user_id)
|
||||
if dehydrated_device is None:
|
||||
raise SynapseError(
|
||||
HTTPStatus.FORBIDDEN,
|
||||
"No dehydrated device exists",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
dehydrated_device_id, _ = dehydrated_device
|
||||
if device_id != dehydrated_device_id:
|
||||
raise SynapseError(
|
||||
HTTPStatus.FORBIDDEN,
|
||||
"You may only fetch messages for your dehydrated device",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
since_stream_id = 0
|
||||
if since_token:
|
||||
if not since_token.startswith("d"):
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
"from parameter %r has an invalid format" % (since_token,),
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
try:
|
||||
since_stream_id = int(since_token[1:])
|
||||
except Exception:
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
"from parameter %r has an invalid format" % (since_token,),
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
|
||||
to_token = self.event_sources.get_current_token().to_device_key
|
||||
|
||||
messages, stream_id = await self.store.get_messages_for_device(
|
||||
user_id, device_id, since_stream_id, to_token, limit
|
||||
)
|
||||
|
||||
for message in messages:
|
||||
# Remove the message id before sending to client
|
||||
message_id = message.pop("message_id", None)
|
||||
if message_id:
|
||||
set_tag(SynapseTags.TO_DEVICE_EDU_ID, message_id)
|
||||
|
||||
logger.debug(
|
||||
"Returning %d to-device messages between %d and %d (current token: %d) for "
|
||||
"dehydrated device %s, user_id %s",
|
||||
len(messages),
|
||||
since_stream_id,
|
||||
stream_id,
|
||||
to_token,
|
||||
device_id,
|
||||
user_id,
|
||||
)
|
||||
|
||||
return {
|
||||
"events": messages,
|
||||
"next_batch": f"d{stream_id}",
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.federation.federation_client import InvalidResponseError
|
||||
from synapse.handlers.pagination import PURGE_PAGINATION_LOCK_NAME
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace
|
||||
@@ -152,6 +153,7 @@ class FederationHandler:
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
|
||||
self._notifier = hs.get_notifier()
|
||||
self._worker_locks = hs.get_worker_locks_handler()
|
||||
|
||||
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
|
||||
hs
|
||||
@@ -200,7 +202,7 @@ class FederationHandler:
|
||||
@trace
|
||||
@tag_args
|
||||
async def maybe_backfill(
|
||||
self, room_id: str, current_depth: int, limit: int
|
||||
self, room_id: str, current_depth: int, limit: int, record_time: bool = True
|
||||
) -> bool:
|
||||
"""Checks the database to see if we should backfill before paginating,
|
||||
and if so do.
|
||||
@@ -213,21 +215,25 @@ class FederationHandler:
|
||||
limit: The number of events that the pagination request will
|
||||
return. This is used as part of the heuristic to decide if we
|
||||
should back paginate.
|
||||
record_time: Whether to record the time it takes to backfill.
|
||||
|
||||
Returns:
|
||||
True if we actually tried to backfill something, otherwise False.
|
||||
"""
|
||||
# Starting the processing time here so we can include the room backfill
|
||||
# linearizer lock queue in the timing
|
||||
processing_start_time = self.clock.time_msec()
|
||||
processing_start_time = self.clock.time_msec() if record_time else 0
|
||||
|
||||
async with self._room_backfill.queue(room_id):
|
||||
return await self._maybe_backfill_inner(
|
||||
room_id,
|
||||
current_depth,
|
||||
limit,
|
||||
processing_start_time=processing_start_time,
|
||||
)
|
||||
async with self._worker_locks.acquire_read_write_lock(
|
||||
PURGE_PAGINATION_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
return await self._maybe_backfill_inner(
|
||||
room_id,
|
||||
current_depth,
|
||||
limit,
|
||||
processing_start_time=processing_start_time,
|
||||
)
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@@ -305,12 +311,21 @@ class FederationHandler:
|
||||
# of history that extends all the way back to where we are currently paginating
|
||||
# and it's within the 100 events that are returned from `/backfill`.
|
||||
if not sorted_backfill_points and current_depth != MAX_DEPTH:
|
||||
# Check that we actually have later backfill points, if not just return.
|
||||
have_later_backfill_points = await self.store.get_backfill_points_in_room(
|
||||
room_id=room_id,
|
||||
current_depth=MAX_DEPTH,
|
||||
limit=1,
|
||||
)
|
||||
if not have_later_backfill_points:
|
||||
return False
|
||||
|
||||
logger.debug(
|
||||
"_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points."
|
||||
)
|
||||
run_as_background_process(
|
||||
"_maybe_backfill_inner_anyway_with_max_depth",
|
||||
self._maybe_backfill_inner,
|
||||
self.maybe_backfill,
|
||||
room_id=room_id,
|
||||
# We use `MAX_DEPTH` so that we find all backfill points next
|
||||
# time (all events are below the `MAX_DEPTH`)
|
||||
@@ -319,7 +334,7 @@ class FederationHandler:
|
||||
# We don't want to start another timing observation from this
|
||||
# nested recursive call. The top-most call can record the time
|
||||
# overall otherwise the smaller one will throw off the results.
|
||||
processing_start_time=None,
|
||||
record_time=False,
|
||||
)
|
||||
# We return `False` because we're backfilling in the background and there is
|
||||
# no new events immediately for the caller to know about yet.
|
||||
|
||||
@@ -53,6 +53,7 @@ from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
|
||||
from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.handlers.directory import DirectoryHandler
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging import opentracing
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -485,6 +486,7 @@ class EventCreationHandler:
|
||||
self._events_shard_config = self.config.worker.events_shard_config
|
||||
self._instance_name = hs.get_instance_name()
|
||||
self._notifier = hs.get_notifier()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
|
||||
|
||||
@@ -559,8 +561,6 @@ class EventCreationHandler:
|
||||
expiry_ms=30 * 60 * 1000,
|
||||
)
|
||||
|
||||
self._msc3970_enabled = hs.config.experimental.msc3970_enabled
|
||||
|
||||
async def create_event(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -876,6 +876,53 @@ class EventCreationHandler:
|
||||
return prev_event
|
||||
return None
|
||||
|
||||
async def get_event_id_from_transaction(
|
||||
self,
|
||||
requester: Requester,
|
||||
txn_id: str,
|
||||
room_id: str,
|
||||
) -> Optional[str]:
|
||||
"""For the given transaction ID and room ID, check if there is a matching event ID.
|
||||
|
||||
Args:
|
||||
requester: The requester making the request in the context of which we want
|
||||
to fetch the event.
|
||||
txn_id: The transaction ID.
|
||||
room_id: The room ID.
|
||||
|
||||
Returns:
|
||||
An event ID if one could be found, None otherwise.
|
||||
"""
|
||||
existing_event_id = None
|
||||
|
||||
# According to the spec, transactions are scoped to a user's device ID.
|
||||
if requester.device_id:
|
||||
existing_event_id = (
|
||||
await self.store.get_event_id_from_transaction_id_and_device_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.device_id,
|
||||
txn_id,
|
||||
)
|
||||
)
|
||||
if existing_event_id:
|
||||
return existing_event_id
|
||||
|
||||
# Some requsters don't have device IDs (appservice, guests, and access
|
||||
# tokens minted with the admin API), fallback to checking the access token
|
||||
# ID, which should be close enough.
|
||||
if requester.access_token_id:
|
||||
existing_event_id = (
|
||||
await self.store.get_event_id_from_transaction_id_and_token_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.access_token_id,
|
||||
txn_id,
|
||||
)
|
||||
)
|
||||
|
||||
return existing_event_id
|
||||
|
||||
async def get_event_from_transaction(
|
||||
self,
|
||||
requester: Requester,
|
||||
@@ -894,35 +941,11 @@ class EventCreationHandler:
|
||||
Returns:
|
||||
An event if one could be found, None otherwise.
|
||||
"""
|
||||
|
||||
if self._msc3970_enabled and requester.device_id:
|
||||
# When MSC3970 is enabled, we lookup for events sent by the same device first,
|
||||
# and fallback to the old behaviour if none were found.
|
||||
existing_event_id = (
|
||||
await self.store.get_event_id_from_transaction_id_and_device_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.device_id,
|
||||
txn_id,
|
||||
)
|
||||
)
|
||||
if existing_event_id:
|
||||
return await self.store.get_event(existing_event_id)
|
||||
|
||||
# Pre-MSC3970, we looked up for events that were sent by the same session by
|
||||
# using the access token ID.
|
||||
if requester.access_token_id:
|
||||
existing_event_id = (
|
||||
await self.store.get_event_id_from_transaction_id_and_token_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.access_token_id,
|
||||
txn_id,
|
||||
)
|
||||
)
|
||||
if existing_event_id:
|
||||
return await self.store.get_event(existing_event_id)
|
||||
|
||||
existing_event_id = await self.get_event_id_from_transaction(
|
||||
requester, txn_id, room_id
|
||||
)
|
||||
if existing_event_id:
|
||||
return await self.store.get_event(existing_event_id)
|
||||
return None
|
||||
|
||||
async def create_and_send_nonmember_event(
|
||||
@@ -1010,6 +1033,37 @@ class EventCreationHandler:
|
||||
event.internal_metadata.stream_ordering,
|
||||
)
|
||||
|
||||
async with self._worker_lock_handler.acquire_read_write_lock(
|
||||
NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
return await self._create_and_send_nonmember_event_locked(
|
||||
requester=requester,
|
||||
event_dict=event_dict,
|
||||
allow_no_prev_events=allow_no_prev_events,
|
||||
prev_event_ids=prev_event_ids,
|
||||
state_event_ids=state_event_ids,
|
||||
ratelimit=ratelimit,
|
||||
txn_id=txn_id,
|
||||
ignore_shadow_ban=ignore_shadow_ban,
|
||||
outlier=outlier,
|
||||
depth=depth,
|
||||
)
|
||||
|
||||
async def _create_and_send_nonmember_event_locked(
|
||||
self,
|
||||
requester: Requester,
|
||||
event_dict: dict,
|
||||
allow_no_prev_events: bool = False,
|
||||
prev_event_ids: Optional[List[str]] = None,
|
||||
state_event_ids: Optional[List[str]] = None,
|
||||
ratelimit: bool = True,
|
||||
txn_id: Optional[str] = None,
|
||||
ignore_shadow_ban: bool = False,
|
||||
outlier: bool = False,
|
||||
depth: Optional[int] = None,
|
||||
) -> Tuple[EventBase, int]:
|
||||
room_id = event_dict["room_id"]
|
||||
|
||||
# If we don't have any prev event IDs specified then we need to
|
||||
# check that the host is in the room (as otherwise populating the
|
||||
# prev events will fail), at which point we may as well check the
|
||||
@@ -1565,12 +1619,11 @@ class EventCreationHandler:
|
||||
if state_entry.state_group in self._external_cache_joined_hosts_updates:
|
||||
return
|
||||
|
||||
state = await state_entry.get_state(
|
||||
self._storage_controllers.state, StateFilter.all()
|
||||
)
|
||||
with opentracing.start_active_span("get_joined_hosts"):
|
||||
joined_hosts = await self.store.get_joined_hosts(
|
||||
event.room_id, state, state_entry
|
||||
joined_hosts = (
|
||||
await self._storage_controllers.state.get_joined_hosts(
|
||||
event.room_id, state_entry
|
||||
)
|
||||
)
|
||||
|
||||
# Note that the expiry times must be larger than the expiry time in
|
||||
@@ -1924,7 +1977,10 @@ class EventCreationHandler:
|
||||
)
|
||||
|
||||
for room_id in room_ids:
|
||||
dummy_event_sent = await self._send_dummy_event_for_room(room_id)
|
||||
async with self._worker_lock_handler.acquire_read_write_lock(
|
||||
NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
dummy_event_sent = await self._send_dummy_event_for_room(room_id)
|
||||
|
||||
if not dummy_event_sent:
|
||||
# Did not find a valid user in the room, so remove from future attempts
|
||||
|
||||
@@ -24,6 +24,7 @@ from synapse.api.errors import SynapseError
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.room import ShutdownRoomResponse
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.rest.admin._base import assert_user_is_admin
|
||||
@@ -46,6 +47,12 @@ logger = logging.getLogger(__name__)
|
||||
BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3
|
||||
|
||||
|
||||
# This is used to avoid purging a room several time at the same moment,
|
||||
# and also paginating during a purge. Pagination can trigger backfill,
|
||||
# which would create old events locally, and would potentially clash with the room delete.
|
||||
PURGE_PAGINATION_LOCK_NAME = "purge_pagination_lock"
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class PurgeStatus:
|
||||
"""Object tracking the status of a purge request
|
||||
@@ -142,6 +149,7 @@ class PaginationHandler:
|
||||
self._server_name = hs.hostname
|
||||
self._room_shutdown_handler = hs.get_room_shutdown_handler()
|
||||
self._relations_handler = hs.get_relations_handler()
|
||||
self._worker_locks = hs.get_worker_locks_handler()
|
||||
|
||||
self.pagination_lock = ReadWriteLock()
|
||||
# IDs of rooms in which there currently an active purge *or delete* operation.
|
||||
@@ -356,7 +364,9 @@ class PaginationHandler:
|
||||
"""
|
||||
self._purges_in_progress_by_room.add(room_id)
|
||||
try:
|
||||
async with self.pagination_lock.write(room_id):
|
||||
async with self._worker_locks.acquire_read_write_lock(
|
||||
PURGE_PAGINATION_LOCK_NAME, room_id, write=True
|
||||
):
|
||||
await self._storage_controllers.purge_events.purge_history(
|
||||
room_id, token, delete_local_events
|
||||
)
|
||||
@@ -412,7 +422,13 @@ class PaginationHandler:
|
||||
room_id: room to be purged
|
||||
force: set true to skip checking for joined users.
|
||||
"""
|
||||
async with self.pagination_lock.write(room_id):
|
||||
async with self._worker_locks.acquire_multi_read_write_lock(
|
||||
[
|
||||
(PURGE_PAGINATION_LOCK_NAME, room_id),
|
||||
(NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id),
|
||||
],
|
||||
write=True,
|
||||
):
|
||||
# first check that we have no users in this room
|
||||
if not force:
|
||||
joined = await self.store.is_host_joined(room_id, self._server_name)
|
||||
@@ -471,153 +487,150 @@ class PaginationHandler:
|
||||
|
||||
room_token = from_token.room_key
|
||||
|
||||
async with self.pagination_lock.read(room_id):
|
||||
(membership, member_event_id) = (None, None)
|
||||
if not use_admin_priviledge:
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, requester, allow_departed_users=True
|
||||
)
|
||||
|
||||
if pagin_config.direction == Direction.BACKWARDS:
|
||||
# if we're going backwards, we might need to backfill. This
|
||||
# requires that we have a topo token.
|
||||
if room_token.topological:
|
||||
curr_topo = room_token.topological
|
||||
else:
|
||||
curr_topo = await self.store.get_current_topological_token(
|
||||
room_id, room_token.stream
|
||||
)
|
||||
|
||||
# If they have left the room then clamp the token to be before
|
||||
# they left the room, to save the effort of loading from the
|
||||
# database.
|
||||
if (
|
||||
pagin_config.direction == Direction.BACKWARDS
|
||||
and not use_admin_priviledge
|
||||
and membership == Membership.LEAVE
|
||||
):
|
||||
# This is only None if the room is world_readable, in which case
|
||||
# "Membership.JOIN" would have been returned and we should never hit
|
||||
# this branch.
|
||||
assert member_event_id
|
||||
|
||||
leave_token = await self.store.get_topological_token_for_event(
|
||||
member_event_id
|
||||
)
|
||||
assert leave_token.topological is not None
|
||||
|
||||
if leave_token.topological < curr_topo:
|
||||
from_token = from_token.copy_and_replace(
|
||||
StreamKeyType.ROOM, leave_token
|
||||
)
|
||||
|
||||
to_room_key = None
|
||||
if pagin_config.to_token:
|
||||
to_room_key = pagin_config.to_token.room_key
|
||||
|
||||
# Initially fetch the events from the database. With any luck, we can return
|
||||
# these without blocking on backfill (handled below).
|
||||
events, next_key = await self.store.paginate_room_events(
|
||||
room_id=room_id,
|
||||
from_key=from_token.room_key,
|
||||
to_key=to_room_key,
|
||||
direction=pagin_config.direction,
|
||||
limit=pagin_config.limit,
|
||||
event_filter=event_filter,
|
||||
(membership, member_event_id) = (None, None)
|
||||
if not use_admin_priviledge:
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, requester, allow_departed_users=True
|
||||
)
|
||||
|
||||
if pagin_config.direction == Direction.BACKWARDS:
|
||||
# We use a `Set` because there can be multiple events at a given depth
|
||||
# and we only care about looking at the unique continum of depths to
|
||||
# find gaps.
|
||||
event_depths: Set[int] = {event.depth for event in events}
|
||||
sorted_event_depths = sorted(event_depths)
|
||||
|
||||
# Inspect the depths of the returned events to see if there are any gaps
|
||||
found_big_gap = False
|
||||
number_of_gaps = 0
|
||||
previous_event_depth = (
|
||||
sorted_event_depths[0] if len(sorted_event_depths) > 0 else 0
|
||||
if pagin_config.direction == Direction.BACKWARDS:
|
||||
# if we're going backwards, we might need to backfill. This
|
||||
# requires that we have a topo token.
|
||||
if room_token.topological:
|
||||
curr_topo = room_token.topological
|
||||
else:
|
||||
curr_topo = await self.store.get_current_topological_token(
|
||||
room_id, room_token.stream
|
||||
)
|
||||
for event_depth in sorted_event_depths:
|
||||
# We don't expect a negative depth but we'll just deal with it in
|
||||
# any case by taking the absolute value to get the true gap between
|
||||
# any two integers.
|
||||
depth_gap = abs(event_depth - previous_event_depth)
|
||||
# A `depth_gap` of 1 is a normal continuous chain to the next event
|
||||
# (1 <-- 2 <-- 3) so anything larger indicates a missing event (it's
|
||||
# also possible there is no event at a given depth but we can't ever
|
||||
# know that for sure)
|
||||
if depth_gap > 1:
|
||||
number_of_gaps += 1
|
||||
|
||||
# We only tolerate a small number single-event long gaps in the
|
||||
# returned events because those are most likely just events we've
|
||||
# failed to pull in the past. Anything longer than that is probably
|
||||
# a sign that we're missing a decent chunk of history and we should
|
||||
# try to backfill it.
|
||||
#
|
||||
# XXX: It's possible we could tolerate longer gaps if we checked
|
||||
# that a given events `prev_events` is one that has failed pull
|
||||
# attempts and we could just treat it like a dead branch of history
|
||||
# for now or at least something that we don't need the block the
|
||||
# client on to try pulling.
|
||||
#
|
||||
# XXX: If we had something like MSC3871 to indicate gaps in the
|
||||
# timeline to the client, we could also get away with any sized gap
|
||||
# and just have the client refetch the holes as they see fit.
|
||||
if depth_gap > 2:
|
||||
found_big_gap = True
|
||||
break
|
||||
previous_event_depth = event_depth
|
||||
# If they have left the room then clamp the token to be before
|
||||
# they left the room, to save the effort of loading from the
|
||||
# database.
|
||||
if (
|
||||
pagin_config.direction == Direction.BACKWARDS
|
||||
and not use_admin_priviledge
|
||||
and membership == Membership.LEAVE
|
||||
):
|
||||
# This is only None if the room is world_readable, in which case
|
||||
# "Membership.JOIN" would have been returned and we should never hit
|
||||
# this branch.
|
||||
assert member_event_id
|
||||
|
||||
# Backfill in the foreground if we found a big gap, have too many holes,
|
||||
# or we don't have enough events to fill the limit that the client asked
|
||||
# for.
|
||||
missing_too_many_events = (
|
||||
number_of_gaps > BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD
|
||||
leave_token = await self.store.get_topological_token_for_event(
|
||||
member_event_id
|
||||
)
|
||||
assert leave_token.topological is not None
|
||||
|
||||
if leave_token.topological < curr_topo:
|
||||
from_token = from_token.copy_and_replace(
|
||||
StreamKeyType.ROOM, leave_token
|
||||
)
|
||||
not_enough_events_to_fill_response = len(events) < pagin_config.limit
|
||||
if (
|
||||
found_big_gap
|
||||
or missing_too_many_events
|
||||
or not_enough_events_to_fill_response
|
||||
):
|
||||
did_backfill = (
|
||||
await self.hs.get_federation_handler().maybe_backfill(
|
||||
room_id,
|
||||
curr_topo,
|
||||
limit=pagin_config.limit,
|
||||
)
|
||||
)
|
||||
|
||||
# If we did backfill something, refetch the events from the database to
|
||||
# catch anything new that might have been added since we last fetched.
|
||||
if did_backfill:
|
||||
events, next_key = await self.store.paginate_room_events(
|
||||
room_id=room_id,
|
||||
from_key=from_token.room_key,
|
||||
to_key=to_room_key,
|
||||
direction=pagin_config.direction,
|
||||
limit=pagin_config.limit,
|
||||
event_filter=event_filter,
|
||||
)
|
||||
else:
|
||||
# Otherwise, we can backfill in the background for eventual
|
||||
# consistency's sake but we don't need to block the client waiting
|
||||
# for a costly federation call and processing.
|
||||
run_as_background_process(
|
||||
"maybe_backfill_in_the_background",
|
||||
self.hs.get_federation_handler().maybe_backfill,
|
||||
room_id,
|
||||
curr_topo,
|
||||
to_room_key = None
|
||||
if pagin_config.to_token:
|
||||
to_room_key = pagin_config.to_token.room_key
|
||||
|
||||
# Initially fetch the events from the database. With any luck, we can return
|
||||
# these without blocking on backfill (handled below).
|
||||
events, next_key = await self.store.paginate_room_events(
|
||||
room_id=room_id,
|
||||
from_key=from_token.room_key,
|
||||
to_key=to_room_key,
|
||||
direction=pagin_config.direction,
|
||||
limit=pagin_config.limit,
|
||||
event_filter=event_filter,
|
||||
)
|
||||
|
||||
if pagin_config.direction == Direction.BACKWARDS:
|
||||
# We use a `Set` because there can be multiple events at a given depth
|
||||
# and we only care about looking at the unique continum of depths to
|
||||
# find gaps.
|
||||
event_depths: Set[int] = {event.depth for event in events}
|
||||
sorted_event_depths = sorted(event_depths)
|
||||
|
||||
# Inspect the depths of the returned events to see if there are any gaps
|
||||
found_big_gap = False
|
||||
number_of_gaps = 0
|
||||
previous_event_depth = (
|
||||
sorted_event_depths[0] if len(sorted_event_depths) > 0 else 0
|
||||
)
|
||||
for event_depth in sorted_event_depths:
|
||||
# We don't expect a negative depth but we'll just deal with it in
|
||||
# any case by taking the absolute value to get the true gap between
|
||||
# any two integers.
|
||||
depth_gap = abs(event_depth - previous_event_depth)
|
||||
# A `depth_gap` of 1 is a normal continuous chain to the next event
|
||||
# (1 <-- 2 <-- 3) so anything larger indicates a missing event (it's
|
||||
# also possible there is no event at a given depth but we can't ever
|
||||
# know that for sure)
|
||||
if depth_gap > 1:
|
||||
number_of_gaps += 1
|
||||
|
||||
# We only tolerate a small number single-event long gaps in the
|
||||
# returned events because those are most likely just events we've
|
||||
# failed to pull in the past. Anything longer than that is probably
|
||||
# a sign that we're missing a decent chunk of history and we should
|
||||
# try to backfill it.
|
||||
#
|
||||
# XXX: It's possible we could tolerate longer gaps if we checked
|
||||
# that a given events `prev_events` is one that has failed pull
|
||||
# attempts and we could just treat it like a dead branch of history
|
||||
# for now or at least something that we don't need the block the
|
||||
# client on to try pulling.
|
||||
#
|
||||
# XXX: If we had something like MSC3871 to indicate gaps in the
|
||||
# timeline to the client, we could also get away with any sized gap
|
||||
# and just have the client refetch the holes as they see fit.
|
||||
if depth_gap > 2:
|
||||
found_big_gap = True
|
||||
break
|
||||
previous_event_depth = event_depth
|
||||
|
||||
# Backfill in the foreground if we found a big gap, have too many holes,
|
||||
# or we don't have enough events to fill the limit that the client asked
|
||||
# for.
|
||||
missing_too_many_events = (
|
||||
number_of_gaps > BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD
|
||||
)
|
||||
not_enough_events_to_fill_response = len(events) < pagin_config.limit
|
||||
if (
|
||||
found_big_gap
|
||||
or missing_too_many_events
|
||||
or not_enough_events_to_fill_response
|
||||
):
|
||||
did_backfill = await self.hs.get_federation_handler().maybe_backfill(
|
||||
room_id,
|
||||
curr_topo,
|
||||
limit=pagin_config.limit,
|
||||
)
|
||||
|
||||
# If we did backfill something, refetch the events from the database to
|
||||
# catch anything new that might have been added since we last fetched.
|
||||
if did_backfill:
|
||||
events, next_key = await self.store.paginate_room_events(
|
||||
room_id=room_id,
|
||||
from_key=from_token.room_key,
|
||||
to_key=to_room_key,
|
||||
direction=pagin_config.direction,
|
||||
limit=pagin_config.limit,
|
||||
event_filter=event_filter,
|
||||
)
|
||||
else:
|
||||
# Otherwise, we can backfill in the background for eventual
|
||||
# consistency's sake but we don't need to block the client waiting
|
||||
# for a costly federation call and processing.
|
||||
run_as_background_process(
|
||||
"maybe_backfill_in_the_background",
|
||||
self.hs.get_federation_handler().maybe_backfill,
|
||||
room_id,
|
||||
curr_topo,
|
||||
limit=pagin_config.limit,
|
||||
)
|
||||
|
||||
next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key)
|
||||
next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key)
|
||||
|
||||
# if no events are returned from pagination, that implies
|
||||
# we have reached the end of the available events.
|
||||
@@ -747,7 +760,9 @@ class PaginationHandler:
|
||||
|
||||
self._purges_in_progress_by_room.add(room_id)
|
||||
try:
|
||||
async with self.pagination_lock.write(room_id):
|
||||
async with self._worker_locks.acquire_read_write_lock(
|
||||
PURGE_PAGINATION_LOCK_NAME, room_id, write=True
|
||||
):
|
||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
|
||||
self._delete_by_id[
|
||||
delete_id
|
||||
|
||||
@@ -30,9 +30,9 @@ from types import TracebackType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
ContextManager,
|
||||
Dict,
|
||||
Generator,
|
||||
Iterable,
|
||||
@@ -44,7 +44,6 @@ from typing import (
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import ContextManager
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState
|
||||
@@ -54,7 +53,10 @@ from synapse.appservice import ApplicationService
|
||||
from synapse.events.presence_router import PresenceRouter
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.metrics.background_process_metrics import (
|
||||
run_as_background_process,
|
||||
wrap_as_background_process,
|
||||
)
|
||||
from synapse.replication.http.presence import (
|
||||
ReplicationBumpPresenceActiveTime,
|
||||
ReplicationPresenceSetState,
|
||||
@@ -141,6 +143,8 @@ class BasePresenceHandler(abc.ABC):
|
||||
self.state = hs.get_state_handler()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
self._presence_enabled = hs.config.server.use_presence
|
||||
|
||||
self._federation = None
|
||||
if hs.should_send_federation():
|
||||
self._federation = hs.get_federation_sender()
|
||||
@@ -149,6 +153,15 @@ class BasePresenceHandler(abc.ABC):
|
||||
|
||||
self._busy_presence_enabled = hs.config.experimental.msc3026_enabled
|
||||
|
||||
self.VALID_PRESENCE: Tuple[str, ...] = (
|
||||
PresenceState.ONLINE,
|
||||
PresenceState.UNAVAILABLE,
|
||||
PresenceState.OFFLINE,
|
||||
)
|
||||
|
||||
if self._busy_presence_enabled:
|
||||
self.VALID_PRESENCE += (PresenceState.BUSY,)
|
||||
|
||||
active_presence = self.store.take_presence_startup_info()
|
||||
self.user_to_current_state = {state.user_id: state for state in active_presence}
|
||||
|
||||
@@ -395,8 +408,6 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
|
||||
self._presence_writer_instance = hs.config.worker.writers.presence[0]
|
||||
|
||||
self._presence_enabled = hs.config.server.use_presence
|
||||
|
||||
# Route presence EDUs to the right worker
|
||||
hs.get_federation_registry().register_instances_for_edu(
|
||||
EduTypes.PRESENCE,
|
||||
@@ -421,8 +432,6 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
self.send_stop_syncing, UPDATE_SYNCING_USERS_MS
|
||||
)
|
||||
|
||||
self._busy_presence_enabled = hs.config.experimental.msc3026_enabled
|
||||
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"before",
|
||||
"shutdown",
|
||||
@@ -490,7 +499,9 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
# what the spec wants: see comment in the BasePresenceHandler version
|
||||
# of this function.
|
||||
await self.set_state(
|
||||
UserID.from_string(user_id), {"presence": presence_state}, True
|
||||
UserID.from_string(user_id),
|
||||
{"presence": presence_state},
|
||||
ignore_status_msg=True,
|
||||
)
|
||||
|
||||
curr_sync = self._user_to_num_current_syncs.get(user_id, 0)
|
||||
@@ -601,22 +612,13 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
"""
|
||||
presence = state["presence"]
|
||||
|
||||
valid_presence = (
|
||||
PresenceState.ONLINE,
|
||||
PresenceState.UNAVAILABLE,
|
||||
PresenceState.OFFLINE,
|
||||
PresenceState.BUSY,
|
||||
)
|
||||
|
||||
if presence not in valid_presence or (
|
||||
presence == PresenceState.BUSY and not self._busy_presence_enabled
|
||||
):
|
||||
if presence not in self.VALID_PRESENCE:
|
||||
raise SynapseError(400, "Invalid presence state")
|
||||
|
||||
user_id = target_user.to_string()
|
||||
|
||||
# If presence is disabled, no-op
|
||||
if not self.hs.config.server.use_presence:
|
||||
if not self._presence_enabled:
|
||||
return
|
||||
|
||||
# Proxy request to instance that writes presence
|
||||
@@ -633,7 +635,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
with the app.
|
||||
"""
|
||||
# If presence is disabled, no-op
|
||||
if not self.hs.config.server.use_presence:
|
||||
if not self._presence_enabled:
|
||||
return
|
||||
|
||||
# Proxy request to instance that writes presence
|
||||
@@ -649,7 +651,6 @@ class PresenceHandler(BasePresenceHandler):
|
||||
self.hs = hs
|
||||
self.wheel_timer: WheelTimer[str] = WheelTimer()
|
||||
self.notifier = hs.get_notifier()
|
||||
self._presence_enabled = hs.config.server.use_presence
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
@@ -700,8 +701,6 @@ class PresenceHandler(BasePresenceHandler):
|
||||
self._on_shutdown,
|
||||
)
|
||||
|
||||
self._next_serial = 1
|
||||
|
||||
# Keeps track of the number of *ongoing* syncs on this process. While
|
||||
# this is non zero a user will never go offline.
|
||||
self.user_to_num_current_syncs: Dict[str, int] = {}
|
||||
@@ -723,21 +722,16 @@ class PresenceHandler(BasePresenceHandler):
|
||||
# Start a LoopingCall in 30s that fires every 5s.
|
||||
# The initial delay is to allow disconnected clients a chance to
|
||||
# reconnect before we treat them as offline.
|
||||
def run_timeout_handler() -> Awaitable[None]:
|
||||
return run_as_background_process(
|
||||
"handle_presence_timeouts", self._handle_timeouts
|
||||
)
|
||||
|
||||
self.clock.call_later(
|
||||
30, self.clock.looping_call, run_timeout_handler, 5000
|
||||
30, self.clock.looping_call, self._handle_timeouts, 5000
|
||||
)
|
||||
|
||||
def run_persister() -> Awaitable[None]:
|
||||
return run_as_background_process(
|
||||
"persist_presence_changes", self._persist_unpersisted_changes
|
||||
)
|
||||
|
||||
self.clock.call_later(60, self.clock.looping_call, run_persister, 60 * 1000)
|
||||
self.clock.call_later(
|
||||
60,
|
||||
self.clock.looping_call,
|
||||
self._persist_unpersisted_changes,
|
||||
60 * 1000,
|
||||
)
|
||||
|
||||
LaterGauge(
|
||||
"synapse_handlers_presence_wheel_timer_size",
|
||||
@@ -783,6 +777,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
)
|
||||
logger.info("Finished _on_shutdown")
|
||||
|
||||
@wrap_as_background_process("persist_presence_changes")
|
||||
async def _persist_unpersisted_changes(self) -> None:
|
||||
"""We periodically persist the unpersisted changes, as otherwise they
|
||||
may stack up and slow down shutdown times.
|
||||
@@ -898,6 +893,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
states, [destination]
|
||||
)
|
||||
|
||||
@wrap_as_background_process("handle_presence_timeouts")
|
||||
async def _handle_timeouts(self) -> None:
|
||||
"""Checks the presence of users that have timed out and updates as
|
||||
appropriate.
|
||||
@@ -955,7 +951,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
with the app.
|
||||
"""
|
||||
# If presence is disabled, no-op
|
||||
if not self.hs.config.server.use_presence:
|
||||
if not self._presence_enabled:
|
||||
return
|
||||
|
||||
user_id = user.to_string()
|
||||
@@ -990,56 +986,51 @@ class PresenceHandler(BasePresenceHandler):
|
||||
client that is being used by a user.
|
||||
presence_state: The presence state indicated in the sync request
|
||||
"""
|
||||
# Override if it should affect the user's presence, if presence is
|
||||
# disabled.
|
||||
if not self.hs.config.server.use_presence:
|
||||
affect_presence = False
|
||||
if not affect_presence or not self._presence_enabled:
|
||||
return _NullContextManager()
|
||||
|
||||
if affect_presence:
|
||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
|
||||
# If they're busy then they don't stop being busy just by syncing,
|
||||
# so just update the last sync time.
|
||||
if prev_state.state != PresenceState.BUSY:
|
||||
# XXX: We set_state separately here and just update the last_active_ts above
|
||||
# This keeps the logic as similar as possible between the worker and single
|
||||
# process modes. Using set_state will actually cause last_active_ts to be
|
||||
# updated always, which is not what the spec calls for, but synapse has done
|
||||
# this for... forever, I think.
|
||||
await self.set_state(
|
||||
UserID.from_string(user_id),
|
||||
{"presence": presence_state},
|
||||
ignore_status_msg=True,
|
||||
)
|
||||
# Retrieve the new state for the logic below. This should come from the
|
||||
# in-memory cache.
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
|
||||
# If they're busy then they don't stop being busy just by syncing,
|
||||
# so just update the last sync time.
|
||||
if prev_state.state != PresenceState.BUSY:
|
||||
# XXX: We set_state separately here and just update the last_active_ts above
|
||||
# This keeps the logic as similar as possible between the worker and single
|
||||
# process modes. Using set_state will actually cause last_active_ts to be
|
||||
# updated always, which is not what the spec calls for, but synapse has done
|
||||
# this for... forever, I think.
|
||||
await self.set_state(
|
||||
UserID.from_string(user_id), {"presence": presence_state}, True
|
||||
)
|
||||
# Retrieve the new state for the logic below. This should come from the
|
||||
# in-memory cache.
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
|
||||
# To keep the single process behaviour consistent with worker mode, run the
|
||||
# same logic as `update_external_syncs_row`, even though it looks weird.
|
||||
if prev_state.state == PresenceState.OFFLINE:
|
||||
await self._update_states(
|
||||
[
|
||||
prev_state.copy_and_replace(
|
||||
state=PresenceState.ONLINE,
|
||||
last_active_ts=self.clock.time_msec(),
|
||||
last_user_sync_ts=self.clock.time_msec(),
|
||||
)
|
||||
]
|
||||
)
|
||||
# otherwise, set the new presence state & update the last sync time,
|
||||
# but don't update last_active_ts as this isn't an indication that
|
||||
# they've been active (even though it's probably been updated by
|
||||
# set_state above)
|
||||
else:
|
||||
await self._update_states(
|
||||
[
|
||||
prev_state.copy_and_replace(
|
||||
last_user_sync_ts=self.clock.time_msec()
|
||||
)
|
||||
]
|
||||
)
|
||||
# To keep the single process behaviour consistent with worker mode, run the
|
||||
# same logic as `update_external_syncs_row`, even though it looks weird.
|
||||
if prev_state.state == PresenceState.OFFLINE:
|
||||
await self._update_states(
|
||||
[
|
||||
prev_state.copy_and_replace(
|
||||
state=PresenceState.ONLINE,
|
||||
last_active_ts=self.clock.time_msec(),
|
||||
last_user_sync_ts=self.clock.time_msec(),
|
||||
)
|
||||
]
|
||||
)
|
||||
# otherwise, set the new presence state & update the last sync time,
|
||||
# but don't update last_active_ts as this isn't an indication that
|
||||
# they've been active (even though it's probably been updated by
|
||||
# set_state above)
|
||||
else:
|
||||
await self._update_states(
|
||||
[prev_state.copy_and_replace(last_user_sync_ts=self.clock.time_msec())]
|
||||
)
|
||||
|
||||
async def _end() -> None:
|
||||
try:
|
||||
@@ -1061,8 +1052,7 @@ class PresenceHandler(BasePresenceHandler):
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if affect_presence:
|
||||
run_in_background(_end)
|
||||
run_in_background(_end)
|
||||
|
||||
return _user_syncing()
|
||||
|
||||
@@ -1229,20 +1219,11 @@ class PresenceHandler(BasePresenceHandler):
|
||||
status_msg = state.get("status_msg", None)
|
||||
presence = state["presence"]
|
||||
|
||||
valid_presence = (
|
||||
PresenceState.ONLINE,
|
||||
PresenceState.UNAVAILABLE,
|
||||
PresenceState.OFFLINE,
|
||||
PresenceState.BUSY,
|
||||
)
|
||||
|
||||
if presence not in valid_presence or (
|
||||
presence == PresenceState.BUSY and not self._busy_presence_enabled
|
||||
):
|
||||
if presence not in self.VALID_PRESENCE:
|
||||
raise SynapseError(400, "Invalid presence state")
|
||||
|
||||
# If presence is disabled, no-op
|
||||
if not self.hs.config.server.use_presence:
|
||||
if not self._presence_enabled:
|
||||
return
|
||||
|
||||
user_id = target_user.to_string()
|
||||
|
||||
@@ -68,7 +68,7 @@ class ProfileHandler:
|
||||
|
||||
if self.hs.is_mine(target_user):
|
||||
profileinfo = await self.store.get_profileinfo(target_user)
|
||||
if profileinfo.display_name is None:
|
||||
if profileinfo.display_name is None and profileinfo.avatar_url is None:
|
||||
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
|
||||
|
||||
return {
|
||||
@@ -163,7 +163,7 @@ class ProfileHandler:
|
||||
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,)
|
||||
)
|
||||
|
||||
displayname_to_set: Optional[str] = new_displayname
|
||||
displayname_to_set: Optional[str] = new_displayname.strip()
|
||||
if new_displayname == "":
|
||||
displayname_to_set = None
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
|
||||
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging import opentracing
|
||||
from synapse.metrics import event_processing_positions
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
@@ -94,6 +95,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.account_data_handler = hs.get_account_data_handler()
|
||||
self.event_auth_handler = hs.get_event_auth_handler()
|
||||
self._worker_lock_handler = hs.get_worker_locks_handler()
|
||||
|
||||
self.member_linearizer: Linearizer = Linearizer(name="member")
|
||||
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
|
||||
@@ -174,8 +176,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
self.request_ratelimiter = hs.get_request_ratelimiter()
|
||||
hs.get_notifier().add_new_join_in_room_callback(self._on_user_joined_room)
|
||||
|
||||
self._msc3970_enabled = hs.config.experimental.msc3970_enabled
|
||||
|
||||
def _on_user_joined_room(self, event_id: str, room_id: str) -> None:
|
||||
"""Notify the rate limiter that a room join has occurred.
|
||||
|
||||
@@ -416,29 +416,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
# do this check just before we persist an event as well, but may as well
|
||||
# do it up front for efficiency.)
|
||||
if txn_id:
|
||||
existing_event_id = None
|
||||
if self._msc3970_enabled and requester.device_id:
|
||||
# When MSC3970 is enabled, we lookup for events sent by the same device
|
||||
# first, and fallback to the old behaviour if none were found.
|
||||
existing_event_id = (
|
||||
await self.store.get_event_id_from_transaction_id_and_device_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.device_id,
|
||||
txn_id,
|
||||
)
|
||||
existing_event_id = (
|
||||
await self.event_creation_handler.get_event_id_from_transaction(
|
||||
requester, txn_id, room_id
|
||||
)
|
||||
|
||||
if requester.access_token_id and not existing_event_id:
|
||||
existing_event_id = (
|
||||
await self.store.get_event_id_from_transaction_id_and_token_id(
|
||||
room_id,
|
||||
requester.user.to_string(),
|
||||
requester.access_token_id,
|
||||
txn_id,
|
||||
)
|
||||
)
|
||||
|
||||
)
|
||||
if existing_event_id:
|
||||
event_pos = await self.store.get_position_for_event(existing_event_id)
|
||||
return existing_event_id, event_pos.stream
|
||||
@@ -638,26 +620,29 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
# by application services), and then by room ID.
|
||||
async with self.member_as_limiter.queue(as_id):
|
||||
async with self.member_linearizer.queue(key):
|
||||
with opentracing.start_active_span("update_membership_locked"):
|
||||
result = await self.update_membership_locked(
|
||||
requester,
|
||||
target,
|
||||
room_id,
|
||||
action,
|
||||
txn_id=txn_id,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
third_party_signed=third_party_signed,
|
||||
ratelimit=ratelimit,
|
||||
content=content,
|
||||
new_room=new_room,
|
||||
require_consent=require_consent,
|
||||
outlier=outlier,
|
||||
allow_no_prev_events=allow_no_prev_events,
|
||||
prev_event_ids=prev_event_ids,
|
||||
state_event_ids=state_event_ids,
|
||||
depth=depth,
|
||||
origin_server_ts=origin_server_ts,
|
||||
)
|
||||
async with self._worker_lock_handler.acquire_read_write_lock(
|
||||
NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False
|
||||
):
|
||||
with opentracing.start_active_span("update_membership_locked"):
|
||||
result = await self.update_membership_locked(
|
||||
requester,
|
||||
target,
|
||||
room_id,
|
||||
action,
|
||||
txn_id=txn_id,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
third_party_signed=third_party_signed,
|
||||
ratelimit=ratelimit,
|
||||
content=content,
|
||||
new_room=new_room,
|
||||
require_consent=require_consent,
|
||||
outlier=outlier,
|
||||
allow_no_prev_events=allow_no_prev_events,
|
||||
prev_event_ids=prev_event_ids,
|
||||
state_event_ids=state_event_ids,
|
||||
depth=depth,
|
||||
origin_server_ts=origin_server_ts,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -74,12 +74,13 @@ class SamlHandler:
|
||||
self.idp_id = "saml"
|
||||
|
||||
# user-facing name of this auth provider
|
||||
self.idp_name = "SAML"
|
||||
self.idp_name = hs.config.saml2.idp_name
|
||||
|
||||
# we do not currently support icons/brands for SAML auth, but this is required by
|
||||
# the SsoIdentityProvider protocol type.
|
||||
self.idp_icon = None
|
||||
self.idp_brand = None
|
||||
# MXC URI for icon for this auth provider
|
||||
self.idp_icon = hs.config.saml2.idp_icon
|
||||
|
||||
# optional brand identifier for this auth provider
|
||||
self.idp_brand = hs.config.saml2.idp_brand
|
||||
|
||||
# a map from saml session id to Saml2SessionData object
|
||||
self._outstanding_requests_dict: Dict[str, Saml2SessionData] = {}
|
||||
|
||||
@@ -24,13 +24,14 @@ from typing import (
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Set,
|
||||
)
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import attr
|
||||
from typing_extensions import NoReturn, Protocol
|
||||
from typing_extensions import Protocol
|
||||
|
||||
from twisted.web.iweb import IRequest
|
||||
from twisted.web.server import Request
|
||||
@@ -791,7 +792,7 @@ class SsoHandler:
|
||||
|
||||
if code != 200:
|
||||
raise Exception(
|
||||
"GET request to download sso avatar image returned {}".format(code)
|
||||
f"GET request to download sso avatar image returned {code}"
|
||||
)
|
||||
|
||||
# upload name includes hash of the image file's content so that we can
|
||||
|
||||
@@ -14,9 +14,15 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from collections import Counter
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Tuple
|
||||
|
||||
from typing_extensions import Counter as CounterType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Counter as CounterType,
|
||||
Dict,
|
||||
Iterable,
|
||||
Optional,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
from synapse.api.constants import EventContentFields, EventTypes, Membership
|
||||
from synapse.metrics import event_processing_positions
|
||||
|
||||
@@ -387,16 +387,16 @@ class SyncHandler:
|
||||
from_token=since_token,
|
||||
)
|
||||
|
||||
# if nothing has happened in any of the users' rooms since /sync was called,
|
||||
# the resultant next_batch will be the same as since_token (since the result
|
||||
# is generated when wait_for_events is first called, and not regenerated
|
||||
# when wait_for_events times out).
|
||||
#
|
||||
# If that happens, we mustn't cache it, so that when the client comes back
|
||||
# with the same cache token, we don't immediately return the same empty
|
||||
# result, causing a tightloop. (#8518)
|
||||
if result.next_batch == since_token:
|
||||
cache_context.should_cache = False
|
||||
# if nothing has happened in any of the users' rooms since /sync was called,
|
||||
# the resultant next_batch will be the same as since_token (since the result
|
||||
# is generated when wait_for_events is first called, and not regenerated
|
||||
# when wait_for_events times out).
|
||||
#
|
||||
# If that happens, we mustn't cache it, so that when the client comes back
|
||||
# with the same cache token, we don't immediately return the same empty
|
||||
# result, causing a tightloop. (#8518)
|
||||
if result.next_batch == since_token:
|
||||
cache_context.should_cache = False
|
||||
|
||||
if result:
|
||||
if sync_config.filter_collection.lazy_load_members():
|
||||
@@ -1442,11 +1442,9 @@ class SyncHandler:
|
||||
|
||||
# Now we have our list of joined room IDs, exclude as configured and freeze
|
||||
joined_room_ids = frozenset(
|
||||
(
|
||||
room_id
|
||||
for room_id in mutable_joined_room_ids
|
||||
if room_id not in mutable_rooms_to_exclude
|
||||
)
|
||||
room_id
|
||||
for room_id in mutable_joined_room_ids
|
||||
if room_id not in mutable_rooms_to_exclude
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
|
||||
@@ -94,6 +94,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.update_user_directory = hs.config.worker.should_update_user_directory
|
||||
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
|
||||
self.show_locked_users = hs.config.userdirectory.show_locked_users
|
||||
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
|
||||
self._hs = hs
|
||||
|
||||
@@ -144,7 +145,9 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
]
|
||||
}
|
||||
"""
|
||||
results = await self.store.search_user_dir(user_id, search_term, limit)
|
||||
results = await self.store.search_user_dir(
|
||||
user_id, search_term, limit, self.show_locked_users
|
||||
)
|
||||
|
||||
# Remove any spammy users from the results.
|
||||
non_spammy_users = []
|
||||
|
||||
337
synapse/handlers/worker_lock.py
Normal file
337
synapse/handlers/worker_lock.py
Normal file
@@ -0,0 +1,337 @@
|
||||
# Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import random
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AsyncContextManager,
|
||||
Collection,
|
||||
Dict,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
from weakref import WeakSet
|
||||
|
||||
import attr
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.interfaces import IReactorTime
|
||||
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.logging.opentracing import start_active_span
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.storage.databases.main.lock import Lock, LockStore
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.logging.opentracing import opentracing
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
# This lock is used to avoid creating an event while we are purging the room.
|
||||
# We take a read lock when creating an event, and a write one when purging a room.
|
||||
# This is because it is fine to create several events concurrently, since referenced events
|
||||
# will not disappear under our feet as long as we don't delete the room.
|
||||
NEW_EVENT_DURING_PURGE_LOCK_NAME = "new_event_during_purge_lock"
|
||||
|
||||
|
||||
class WorkerLocksHandler:
|
||||
"""A class for waiting on taking out locks, rather than using the storage
|
||||
functions directly (which don't support awaiting).
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer") -> None:
|
||||
self._reactor = hs.get_reactor()
|
||||
self._store = hs.get_datastores().main
|
||||
self._clock = hs.get_clock()
|
||||
self._notifier = hs.get_notifier()
|
||||
self._instance_name = hs.get_instance_name()
|
||||
|
||||
# Map from lock name/key to set of `WaitingLock` that are active for
|
||||
# that lock.
|
||||
self._locks: Dict[
|
||||
Tuple[str, str], WeakSet[Union[WaitingLock, WaitingMultiLock]]
|
||||
] = {}
|
||||
|
||||
self._clock.looping_call(self._cleanup_locks, 30_000)
|
||||
|
||||
self._notifier.add_lock_released_callback(self._on_lock_released)
|
||||
|
||||
def acquire_lock(self, lock_name: str, lock_key: str) -> "WaitingLock":
|
||||
"""Acquire a standard lock, returns a context manager that will block
|
||||
until the lock is acquired.
|
||||
|
||||
Note: Care must be taken to avoid deadlocks. In particular, this
|
||||
function does *not* timeout.
|
||||
|
||||
Usage:
|
||||
async with handler.acquire_lock(name, key):
|
||||
# Do work while holding the lock...
|
||||
"""
|
||||
|
||||
lock = WaitingLock(
|
||||
reactor=self._reactor,
|
||||
store=self._store,
|
||||
handler=self,
|
||||
lock_name=lock_name,
|
||||
lock_key=lock_key,
|
||||
write=None,
|
||||
)
|
||||
|
||||
self._locks.setdefault((lock_name, lock_key), WeakSet()).add(lock)
|
||||
|
||||
return lock
|
||||
|
||||
def acquire_read_write_lock(
|
||||
self,
|
||||
lock_name: str,
|
||||
lock_key: str,
|
||||
*,
|
||||
write: bool,
|
||||
) -> "WaitingLock":
|
||||
"""Acquire a read/write lock, returns a context manager that will block
|
||||
until the lock is acquired.
|
||||
|
||||
Note: Care must be taken to avoid deadlocks. In particular, this
|
||||
function does *not* timeout.
|
||||
|
||||
Usage:
|
||||
async with handler.acquire_read_write_lock(name, key, write=True):
|
||||
# Do work while holding the lock...
|
||||
"""
|
||||
|
||||
lock = WaitingLock(
|
||||
reactor=self._reactor,
|
||||
store=self._store,
|
||||
handler=self,
|
||||
lock_name=lock_name,
|
||||
lock_key=lock_key,
|
||||
write=write,
|
||||
)
|
||||
|
||||
self._locks.setdefault((lock_name, lock_key), WeakSet()).add(lock)
|
||||
|
||||
return lock
|
||||
|
||||
def acquire_multi_read_write_lock(
|
||||
self,
|
||||
lock_names: Collection[Tuple[str, str]],
|
||||
*,
|
||||
write: bool,
|
||||
) -> "WaitingMultiLock":
|
||||
"""Acquires multi read/write locks at once, returns a context manager
|
||||
that will block until all the locks are acquired.
|
||||
|
||||
This will try and acquire all locks at once, and will never hold on to a
|
||||
subset of the locks. (This avoids accidentally creating deadlocks).
|
||||
|
||||
Note: Care must be taken to avoid deadlocks. In particular, this
|
||||
function does *not* timeout.
|
||||
"""
|
||||
|
||||
lock = WaitingMultiLock(
|
||||
lock_names=lock_names,
|
||||
write=write,
|
||||
reactor=self._reactor,
|
||||
store=self._store,
|
||||
handler=self,
|
||||
)
|
||||
|
||||
for lock_name, lock_key in lock_names:
|
||||
self._locks.setdefault((lock_name, lock_key), WeakSet()).add(lock)
|
||||
|
||||
return lock
|
||||
|
||||
def notify_lock_released(self, lock_name: str, lock_key: str) -> None:
|
||||
"""Notify that a lock has been released.
|
||||
|
||||
Pokes both the notifier and replication.
|
||||
"""
|
||||
|
||||
self._notifier.notify_lock_released(self._instance_name, lock_name, lock_key)
|
||||
|
||||
def _on_lock_released(
|
||||
self, instance_name: str, lock_name: str, lock_key: str
|
||||
) -> None:
|
||||
"""Called when a lock has been released.
|
||||
|
||||
Wakes up any locks that might be waiting on this.
|
||||
"""
|
||||
locks = self._locks.get((lock_name, lock_key))
|
||||
if not locks:
|
||||
return
|
||||
|
||||
def _wake_deferred(deferred: defer.Deferred) -> None:
|
||||
if not deferred.called:
|
||||
deferred.callback(None)
|
||||
|
||||
for lock in locks:
|
||||
self._clock.call_later(0, _wake_deferred, lock.deferred)
|
||||
|
||||
@wrap_as_background_process("_cleanup_locks")
|
||||
async def _cleanup_locks(self) -> None:
|
||||
"""Periodically cleans out stale entries in the locks map"""
|
||||
self._locks = {key: value for key, value in self._locks.items() if value}
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, eq=False)
|
||||
class WaitingLock:
|
||||
reactor: IReactorTime
|
||||
store: LockStore
|
||||
handler: WorkerLocksHandler
|
||||
lock_name: str
|
||||
lock_key: str
|
||||
write: Optional[bool]
|
||||
deferred: "defer.Deferred[None]" = attr.Factory(defer.Deferred)
|
||||
_inner_lock: Optional[Lock] = None
|
||||
_retry_interval: float = 0.1
|
||||
_lock_span: "opentracing.Scope" = attr.Factory(
|
||||
lambda: start_active_span("WaitingLock.lock")
|
||||
)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
self._lock_span.__enter__()
|
||||
|
||||
with start_active_span("WaitingLock.waiting_for_lock"):
|
||||
while self._inner_lock is None:
|
||||
self.deferred = defer.Deferred()
|
||||
|
||||
if self.write is not None:
|
||||
lock = await self.store.try_acquire_read_write_lock(
|
||||
self.lock_name, self.lock_key, write=self.write
|
||||
)
|
||||
else:
|
||||
lock = await self.store.try_acquire_lock(
|
||||
self.lock_name, self.lock_key
|
||||
)
|
||||
|
||||
if lock:
|
||||
self._inner_lock = lock
|
||||
break
|
||||
|
||||
try:
|
||||
# Wait until the we get notified the lock might have been
|
||||
# released (by the deferred being resolved). We also
|
||||
# periodically wake up in case the lock was released but we
|
||||
# weren't notified.
|
||||
with PreserveLoggingContext():
|
||||
await timeout_deferred(
|
||||
deferred=self.deferred,
|
||||
timeout=self._get_next_retry_interval(),
|
||||
reactor=self.reactor,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return await self._inner_lock.__aenter__()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: Optional[Type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
) -> Optional[bool]:
|
||||
assert self._inner_lock
|
||||
|
||||
self.handler.notify_lock_released(self.lock_name, self.lock_key)
|
||||
|
||||
try:
|
||||
r = await self._inner_lock.__aexit__(exc_type, exc, tb)
|
||||
finally:
|
||||
self._lock_span.__exit__(exc_type, exc, tb)
|
||||
|
||||
return r
|
||||
|
||||
def _get_next_retry_interval(self) -> float:
|
||||
next = self._retry_interval
|
||||
self._retry_interval = max(5, next * 2)
|
||||
return next * random.uniform(0.9, 1.1)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, eq=False)
|
||||
class WaitingMultiLock:
|
||||
lock_names: Collection[Tuple[str, str]]
|
||||
|
||||
write: bool
|
||||
|
||||
reactor: IReactorTime
|
||||
store: LockStore
|
||||
handler: WorkerLocksHandler
|
||||
|
||||
deferred: "defer.Deferred[None]" = attr.Factory(defer.Deferred)
|
||||
|
||||
_inner_lock_cm: Optional[AsyncContextManager] = None
|
||||
_retry_interval: float = 0.1
|
||||
_lock_span: "opentracing.Scope" = attr.Factory(
|
||||
lambda: start_active_span("WaitingLock.lock")
|
||||
)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
self._lock_span.__enter__()
|
||||
|
||||
with start_active_span("WaitingLock.waiting_for_lock"):
|
||||
while self._inner_lock_cm is None:
|
||||
self.deferred = defer.Deferred()
|
||||
|
||||
lock_cm = await self.store.try_acquire_multi_read_write_lock(
|
||||
self.lock_names, write=self.write
|
||||
)
|
||||
|
||||
if lock_cm:
|
||||
self._inner_lock_cm = lock_cm
|
||||
break
|
||||
|
||||
try:
|
||||
# Wait until the we get notified the lock might have been
|
||||
# released (by the deferred being resolved). We also
|
||||
# periodically wake up in case the lock was released but we
|
||||
# weren't notified.
|
||||
with PreserveLoggingContext():
|
||||
await timeout_deferred(
|
||||
deferred=self.deferred,
|
||||
timeout=self._get_next_retry_interval(),
|
||||
reactor=self.reactor,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
assert self._inner_lock_cm
|
||||
await self._inner_lock_cm.__aenter__()
|
||||
return
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: Optional[Type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
) -> Optional[bool]:
|
||||
assert self._inner_lock_cm
|
||||
|
||||
for lock_name, lock_key in self.lock_names:
|
||||
self.handler.notify_lock_released(lock_name, lock_key)
|
||||
|
||||
try:
|
||||
r = await self._inner_lock_cm.__aexit__(exc_type, exc, tb)
|
||||
finally:
|
||||
self._lock_span.__exit__(exc_type, exc, tb)
|
||||
|
||||
return r
|
||||
|
||||
def _get_next_retry_interval(self) -> float:
|
||||
next = self._retry_interval
|
||||
self._retry_interval = max(5, next * 2)
|
||||
return next * random.uniform(0.9, 1.1)
|
||||
@@ -18,10 +18,9 @@ import traceback
|
||||
from collections import deque
|
||||
from ipaddress import IPv4Address, IPv6Address, ip_address
|
||||
from math import floor
|
||||
from typing import Callable, Optional
|
||||
from typing import Callable, Deque, Optional
|
||||
|
||||
import attr
|
||||
from typing_extensions import Deque
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.application.internet import ClientService
|
||||
|
||||
@@ -31,9 +31,10 @@ from typing import (
|
||||
|
||||
import attr
|
||||
import jinja2
|
||||
from typing_extensions import ParamSpec
|
||||
from typing_extensions import Concatenate, ParamSpec
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from synapse.api import errors
|
||||
@@ -884,7 +885,7 @@ class ModuleApi:
|
||||
def run_db_interaction(
|
||||
self,
|
||||
desc: str,
|
||||
func: Callable[P, T],
|
||||
func: Callable[Concatenate[LoggingTransaction, P], T],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> "defer.Deferred[T]":
|
||||
@@ -1230,6 +1231,58 @@ class ModuleApi:
|
||||
f,
|
||||
)
|
||||
|
||||
def should_run_background_tasks(self) -> bool:
|
||||
"""
|
||||
Return true if and only if the current worker is configured to run
|
||||
background tasks.
|
||||
There should only be one worker configured to run background tasks, so
|
||||
this is helpful when you need to only run a task on one worker but don't
|
||||
have any other good way to choose which one.
|
||||
|
||||
Added in Synapse v1.89.0.
|
||||
"""
|
||||
return self._hs.config.worker.run_background_tasks
|
||||
|
||||
def delayed_background_call(
|
||||
self,
|
||||
msec: float,
|
||||
f: Callable,
|
||||
*args: object,
|
||||
desc: Optional[str] = None,
|
||||
**kwargs: object,
|
||||
) -> IDelayedCall:
|
||||
"""Wraps a function as a background process and calls it in a given number of milliseconds.
|
||||
|
||||
The scheduled call is not persistent: if the current Synapse instance is
|
||||
restarted before the call is made, the call will not be made.
|
||||
|
||||
Added in Synapse v1.90.0.
|
||||
|
||||
Args:
|
||||
msec: How long to wait before calling, in milliseconds.
|
||||
f: The function to call once. f can be either synchronous or
|
||||
asynchronous, and must follow Synapse's logcontext rules.
|
||||
More info about logcontexts is available at
|
||||
https://matrix-org.github.io/synapse/latest/log_contexts.html
|
||||
*args: Positional arguments to pass to function.
|
||||
desc: The background task's description. Default to the function's name.
|
||||
**kwargs: Keyword arguments to pass to function.
|
||||
|
||||
Returns:
|
||||
IDelayedCall handle from twisted, which allows to cancel the delayed call if desired.
|
||||
"""
|
||||
|
||||
if desc is None:
|
||||
desc = f.__name__
|
||||
|
||||
return self._clock.call_later(
|
||||
# convert ms to seconds as needed by call_later.
|
||||
msec * 0.001,
|
||||
run_as_background_process,
|
||||
desc,
|
||||
lambda: maybe_awaitable(f(*args, **kwargs)),
|
||||
)
|
||||
|
||||
async def sleep(self, seconds: float) -> None:
|
||||
"""Sleeps for the given number of seconds.
|
||||
|
||||
|
||||
@@ -426,9 +426,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
generally discouraged as it doesn't support internationalization.
|
||||
"""
|
||||
for callback in self._check_event_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
res = await delay_cancellation(callback(event))
|
||||
if res is False or res == self.NOT_SPAM:
|
||||
# This spam-checker accepts the event.
|
||||
@@ -481,9 +479,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
True if the event should be silently dropped
|
||||
"""
|
||||
for callback in self._should_drop_federated_event_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
res: Union[bool, str] = await delay_cancellation(callback(event))
|
||||
if res:
|
||||
return res
|
||||
@@ -505,9 +501,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
NOT_SPAM if the operation is permitted, [Codes, Dict] otherwise.
|
||||
"""
|
||||
for callback in self._user_may_join_room_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
res = await delay_cancellation(callback(user_id, room_id, is_invited))
|
||||
# Normalize return values to `Codes` or `"NOT_SPAM"`.
|
||||
if res is True or res is self.NOT_SPAM:
|
||||
@@ -546,9 +540,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
NOT_SPAM if the operation is permitted, Codes otherwise.
|
||||
"""
|
||||
for callback in self._user_may_invite_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
res = await delay_cancellation(
|
||||
callback(inviter_userid, invitee_userid, room_id)
|
||||
)
|
||||
@@ -593,9 +585,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
NOT_SPAM if the operation is permitted, Codes otherwise.
|
||||
"""
|
||||
for callback in self._user_may_send_3pid_invite_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
res = await delay_cancellation(
|
||||
callback(inviter_userid, medium, address, room_id)
|
||||
)
|
||||
@@ -630,9 +620,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
userid: The ID of the user attempting to create a room
|
||||
"""
|
||||
for callback in self._user_may_create_room_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
res = await delay_cancellation(callback(userid))
|
||||
if res is True or res is self.NOT_SPAM:
|
||||
continue
|
||||
@@ -666,9 +654,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
|
||||
"""
|
||||
for callback in self._user_may_create_room_alias_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
res = await delay_cancellation(callback(userid, room_alias))
|
||||
if res is True or res is self.NOT_SPAM:
|
||||
continue
|
||||
@@ -701,9 +687,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
room_id: The ID of the room that would be published
|
||||
"""
|
||||
for callback in self._user_may_publish_room_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
res = await delay_cancellation(callback(userid, room_id))
|
||||
if res is True or res is self.NOT_SPAM:
|
||||
continue
|
||||
@@ -742,9 +726,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
True if the user is spammy.
|
||||
"""
|
||||
for callback in self._check_username_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
# Make a copy of the user profile object to ensure the spam checker cannot
|
||||
# modify it.
|
||||
res = await delay_cancellation(callback(user_profile.copy()))
|
||||
@@ -776,9 +758,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
"""
|
||||
|
||||
for callback in self._check_registration_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
behaviour = await delay_cancellation(
|
||||
callback(email_threepid, username, request_info, auth_provider_id)
|
||||
)
|
||||
@@ -820,9 +800,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
"""
|
||||
|
||||
for callback in self._check_media_file_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
res = await delay_cancellation(callback(file_wrapper, file_info))
|
||||
# Normalize return values to `Codes` or `"NOT_SPAM"`.
|
||||
if res is False or res is self.NOT_SPAM:
|
||||
@@ -869,9 +847,7 @@ class SpamCheckerModuleApiCallbacks:
|
||||
"""
|
||||
|
||||
for callback in self._check_login_for_spam_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"):
|
||||
res = await delay_cancellation(
|
||||
callback(
|
||||
user_id,
|
||||
|
||||
@@ -234,6 +234,9 @@ class Notifier:
|
||||
|
||||
self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
|
||||
|
||||
# List of callbacks to be notified when a lock is released
|
||||
self._lock_released_callback: List[Callable[[str, str, str], None]] = []
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.appservice_handler = hs.get_application_service_handler()
|
||||
self._pusher_pool = hs.get_pusherpool()
|
||||
@@ -785,6 +788,19 @@ class Notifier:
|
||||
# that any in flight requests can be immediately retried.
|
||||
self._federation_client.wake_destination(server)
|
||||
|
||||
def add_lock_released_callback(
|
||||
self, callback: Callable[[str, str, str], None]
|
||||
) -> None:
|
||||
"""Add a function to be called whenever we are notified about a released lock."""
|
||||
self._lock_released_callback.append(callback)
|
||||
|
||||
def notify_lock_released(
|
||||
self, instance_name: str, lock_name: str, lock_key: str
|
||||
) -> None:
|
||||
"""Notify the callbacks that a lock has been released."""
|
||||
for cb in self._lock_released_callback:
|
||||
cb(instance_name, lock_name, lock_key)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class ReplicationNotifier:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user