Compare commits
1 Commits
shay/super
...
anoa/docs_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
793a5bfd12 |
28
.ci/scripts/record_available_doc_versions.py
Executable file
28
.ci/scripts/record_available_doc_versions.py
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env python3
|
||||
# This script will write a json file to $OUTPUT_FILE that contains the name of
|
||||
# each available Synapse version with documentation.
|
||||
#
|
||||
# This script assumes that any top-level directory in the "gh-pages" branch is
|
||||
# named after a documentation version and contains documentation website files.
|
||||
|
||||
import os.path
|
||||
import json
|
||||
|
||||
OUTPUT_FILE = "versions.json"
|
||||
|
||||
# Determine the list of Synapse versions that have documentation.
|
||||
doc_versions = []
|
||||
for filepath in os.listdir():
|
||||
if os.path.isdir(filepath):
|
||||
doc_versions.append(filepath)
|
||||
|
||||
# Record the documentation versions in a json file, such that the
|
||||
# frontend javascript is aware of what versions exist.
|
||||
to_write = {
|
||||
"versions": doc_versions,
|
||||
"default_version": "latest",
|
||||
}
|
||||
|
||||
# Write the file.
|
||||
with open(OUTPUT_FILE, "w") as f:
|
||||
f.write(json.dumps(to_write))
|
||||
30
.github/workflows/docker.yml
vendored
30
.github/workflows/docker.yml
vendored
@@ -34,24 +34,32 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# TODO: consider using https://github.com/docker/metadata-action instead of this
|
||||
# custom magic
|
||||
- name: Calculate docker image tag
|
||||
id: set-tag
|
||||
uses: docker/metadata-action@master
|
||||
with:
|
||||
images: matrixdotorg/synapse
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/develop' }}
|
||||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/master' }}
|
||||
type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
|
||||
type=pep440,pattern={{raw}}
|
||||
run: |
|
||||
case "${GITHUB_REF}" in
|
||||
refs/heads/develop)
|
||||
tag=develop
|
||||
;;
|
||||
refs/heads/master|refs/heads/main)
|
||||
tag=latest
|
||||
;;
|
||||
refs/tags/*)
|
||||
tag=${GITHUB_REF#refs/tags/}
|
||||
;;
|
||||
*)
|
||||
tag=${GITHUB_SHA}
|
||||
;;
|
||||
esac
|
||||
echo "::set-output name=tag::$tag"
|
||||
|
||||
- name: Build and push all platforms
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||
tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
28
.github/workflows/docs.yaml
vendored
28
.github/workflows/docs.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
|
||||
jobs:
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
name: Build and deploy docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -63,3 +63,29 @@ jobs:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
list_available_versions:
|
||||
needs: pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Check out the current branch
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Save the script
|
||||
run: cp .ci/scripts/record_available_doc_versions.py /
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
|
||||
# Check out the gh-pages branch, which we'll be pushing the doc versions to
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
# Check out the gh-pages branch
|
||||
ref: 'gh-pages'
|
||||
|
||||
- name: Record the available documentation versions
|
||||
run: |
|
||||
# Download the script
|
||||
/record_available_doc_versions
|
||||
|
||||
12
CHANGES.md
12
CHANGES.md
@@ -4,18 +4,6 @@ Synapse 1.59.0
|
||||
The non-standard `m.login.jwt` login type has been removed from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration.
|
||||
|
||||
|
||||
Synapse 1.58.1 (2022-05-05)
|
||||
===========================
|
||||
|
||||
This patch release includes a fix to the Debian packages, installing the
|
||||
`systemd` and `cache_memory` extra package groups, which were incorrectly
|
||||
omitted in v1.58.0. This primarily prevented Synapse from starting
|
||||
when the `systemd.journal.JournalHandler` log handler was configured.
|
||||
See [#12631](https://github.com/matrix-org/synapse/issues/12631) for further information.
|
||||
|
||||
Otherwise, no significant changes since 1.58.0.
|
||||
|
||||
|
||||
Synapse 1.58.0 (2022-05-03)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation.
|
||||
@@ -1 +0,0 @@
|
||||
Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner.
|
||||
@@ -1 +0,0 @@
|
||||
Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services.
|
||||
@@ -1 +0,0 @@
|
||||
Protect module callbacks with read semantics against cancellation.
|
||||
@@ -1 +0,0 @@
|
||||
Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by henryclw.
|
||||
@@ -1 +0,0 @@
|
||||
Improve comments and error messages around access tokens.
|
||||
@@ -1 +1 @@
|
||||
Log status code of cancelled requests as 499 and avoid logging stack traces for them.
|
||||
Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect.
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Use `getClientAddress` instead of the deprecated `getClientIP`.
|
||||
@@ -1 +0,0 @@
|
||||
Add link to documentation in Grafana Dashboard.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce log spam when running multiple event persisters.
|
||||
@@ -1 +0,0 @@
|
||||
Prevent remote homeservers from requesting local user device names by default.
|
||||
@@ -1 +0,0 @@
|
||||
Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice.
|
||||
@@ -1 +0,0 @@
|
||||
Remove unused code related to receipts.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in Synapse v1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated.
|
||||
@@ -1 +0,0 @@
|
||||
Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner.
|
||||
@@ -1 +0,0 @@
|
||||
Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner.
|
||||
@@ -1 +0,0 @@
|
||||
Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid.
|
||||
@@ -1 +0,0 @@
|
||||
Update to mypy 0.950.
|
||||
@@ -1 +0,0 @@
|
||||
Move `pympler` back in to the `all` extras.
|
||||
@@ -1 +0,0 @@
|
||||
Prevent memory leak from reoccurring when presence is disabled.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where rooms containing power levels with string values could not be upgraded.
|
||||
@@ -1 +0,0 @@
|
||||
Log status code of cancelled requests as 499 and avoid logging stack traces for them.
|
||||
@@ -1 +0,0 @@
|
||||
Fix docs on how to run specific Complement tests using the `complement.sh` test runner.
|
||||
@@ -1 +0,0 @@
|
||||
Fix spelling of `M_UNRECOGNIZED` in comments.
|
||||
@@ -1 +0,0 @@
|
||||
Use `Concatenate` to better annotate `_do_execute`.
|
||||
@@ -1 +0,0 @@
|
||||
Use `ParamSpec` to refine type hints.
|
||||
@@ -1 +0,0 @@
|
||||
Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner.
|
||||
@@ -1 +0,0 @@
|
||||
Fix mypy against latest pillow stubs.
|
||||
@@ -1 +0,0 @@
|
||||
Add topics to synapse documentation.
|
||||
@@ -66,18 +66,6 @@
|
||||
],
|
||||
"title": "Dashboards",
|
||||
"type": "dashboards"
|
||||
},
|
||||
{
|
||||
"asDropdown": false,
|
||||
"icon": "external link",
|
||||
"includeVars": false,
|
||||
"keepTime": false,
|
||||
"tags": [],
|
||||
"targetBlank": true,
|
||||
"title": "Synapse Documentation",
|
||||
"tooltip": "Open Documentation",
|
||||
"type": "link",
|
||||
"url": "https://matrix-org.github.io/synapse/latest/"
|
||||
}
|
||||
],
|
||||
"panels": [
|
||||
@@ -10901,4 +10889,4 @@
|
||||
"title": "Synapse",
|
||||
"uid": "000000012",
|
||||
"version": 100
|
||||
}
|
||||
}
|
||||
6
debian/build_virtualenv
vendored
6
debian/build_virtualenv
vendored
@@ -37,11 +37,7 @@ python3 -m venv "$TEMP_VENV"
|
||||
source "$TEMP_VENV/bin/activate"
|
||||
pip install -U pip
|
||||
pip install poetry==1.2.0b1
|
||||
poetry export \
|
||||
--extras all \
|
||||
--extras test \
|
||||
--extras systemd \
|
||||
-o exported_requirements.txt
|
||||
poetry export --extras all --extras test -o exported_requirements.txt
|
||||
deactivate
|
||||
rm -rf "$TEMP_VENV"
|
||||
|
||||
|
||||
16
debian/changelog
vendored
16
debian/changelog
vendored
@@ -1,19 +1,3 @@
|
||||
matrix-synapse-py3 (1.58.2) UNRELEASED; urgency=medium
|
||||
|
||||
* Adjust how the `exported-requirements.txt` file is generated as part of
|
||||
the process of building these packages. This affects the package
|
||||
maintainers only; end-users are unaffected.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 06 May 2022 13:49:29 +0100
|
||||
|
||||
matrix-synapse-py3 (1.58.1) stable; urgency=medium
|
||||
|
||||
* Include python dependencies from the `systemd` and `cache_memory` extras package groups, which
|
||||
were incorrectly omitted from the 1.58.0 package.
|
||||
* New Synapse release 1.58.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 05 May 2022 14:58:23 +0100
|
||||
|
||||
matrix-synapse-py3 (1.58.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.58.0.
|
||||
|
||||
@@ -69,10 +69,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
"appservice": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"app": "synapse.app.appservice",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"notify_appservices_from_worker": "appservice"},
|
||||
"shared_extra_conf": {"notify_appservices": False},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
|
||||
159
docs/SUMMARY.md
159
docs/SUMMARY.md
@@ -88,158 +88,19 @@
|
||||
- [OpenTracing](opentracing.md)
|
||||
- [Database Schemas](development/database_schema.md)
|
||||
- [Experimental features](development/experimental_features.md)
|
||||
- [Synapse Architecture](development/synapse_architechture.md)
|
||||
- [Overview]()
|
||||
- Flows of data through Synapse
|
||||
- Starts with a Request/BG Job/Module
|
||||
- Processing
|
||||
- Maybe Response
|
||||
- Config
|
||||
- Request Handling
|
||||
- Clients
|
||||
- Federation
|
||||
- Internal Processing - there could be an exception!
|
||||
- Storage
|
||||
- Federation
|
||||
- External Entities: Remote homeservers, Appservices, Pushers, Identity Servers
|
||||
- Modules
|
||||
- Workers/Replication/Redis/Organization of a large homeserver instance
|
||||
- [Configuration]()
|
||||
- Experimental config options
|
||||
- Define how long they should live after the corresponding msc has merged
|
||||
- [Logging]()
|
||||
- combine with [Log Contexts](log_contexts.md)
|
||||
- [Storage]()
|
||||
- What is current database schema?
|
||||
- Start by linking script that can compile full schema
|
||||
- Is there a tool that can visualize a postgres db? - Shay to check
|
||||
- [Database Backends]()
|
||||
- [Database Migrations]()
|
||||
- Background [Schema] Updates
|
||||
- [Caching]()
|
||||
- Cache tuning
|
||||
- How to check sizes
|
||||
- Nice to have a metric for max cache sizes
|
||||
- How to add a cache to a [storage] function
|
||||
- Why would you
|
||||
- How to invalidate (with support for workers)
|
||||
- [Request Listening]()
|
||||
- Mention that we use Twisted
|
||||
- [Servlets and Resources]()
|
||||
- When to use one over the other (always use a servlet?)
|
||||
- Unstable/release-based endpoints
|
||||
- Don't put too many servlets on the same resource
|
||||
- [Handlers]()
|
||||
- Machinery behind the servlet that does all the processing
|
||||
- [Background Jobs]()
|
||||
- [Federation]()
|
||||
- Federation Catchup
|
||||
- [Data Types]()
|
||||
- [Users]()
|
||||
- Allowed username characters
|
||||
- [Devices]()
|
||||
- [Events]()
|
||||
- What is an event?
|
||||
- Different event formats depending on room version
|
||||
- Outlier events, rejected events, dropped events
|
||||
- [Rooms]()
|
||||
- [Room DAG concepts](development/room-dag-concepts.md)
|
||||
- [Sync]()
|
||||
- [State Resolution]()
|
||||
- [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
|
||||
- [User Authentication]()
|
||||
- [Synapse Architecture]()
|
||||
- [Log Contexts](log_contexts.md)
|
||||
- [Replication](replication.md)
|
||||
- [TCP Replication](tcp_replication.md)
|
||||
- [Internal Documentation](development/internal_documentation/README.md)
|
||||
- [Single Sign-On]()
|
||||
- [SAML](development/saml.md)
|
||||
- [CAS](development/cas.md)
|
||||
- [User-Interactive Auth]()
|
||||
- [Password-based]()
|
||||
- [Password Auth Modules]()
|
||||
- [Token-based]()
|
||||
- [Rate Limiting]()
|
||||
- Different rate limiting classes (Ratelimiter vs RequestRatelimiter)
|
||||
- How does our rate limiting work
|
||||
- When to use/add rate limiting
|
||||
- Rate limiting for federation traffic
|
||||
- [Media Repository](media_repository.md)
|
||||
- [Email and HTML Templating]()
|
||||
- [Presence]()
|
||||
- What is Presence?
|
||||
- How is it designed to work currently
|
||||
- Why it is recommended to be disabled
|
||||
- Why is this resource intensive
|
||||
- Links and issues on how to fix this
|
||||
- How it's implemented
|
||||
- [Application Services]()
|
||||
- [Push Notifications]()
|
||||
- [Synapse Admin API]()
|
||||
- [Synapse Modules]()
|
||||
- [Workers]()
|
||||
- Things to be mindful of in order to make your feature work with workers
|
||||
- How to test a feature on workers
|
||||
- [Replication](replication.md)
|
||||
- [TCP Replication](tcp_replication.md)
|
||||
- [Sources]()
|
||||
- TypingSource, ReceiptSource, PresenceSource, etc
|
||||
- Classes containing methods for storing and getting different types of data
|
||||
- [Streams]()
|
||||
- Stream Types (the ID of the stream)
|
||||
- Stream Token: each entity added to the stream gets a unique, incremental token
|
||||
- StreamReplication classes and clients
|
||||
- How this interacts with sync?
|
||||
- Stream token generators: Single Writer Only (sqlite), MultiWriterStreamIDGenerator (Postgres)
|
||||
- [Notifier]()
|
||||
- [Monitoring and Metrics]() # opentracing, phone home and metrics
|
||||
- [Opentracing]()
|
||||
- How does this work?
|
||||
- How is this helpful?
|
||||
- What's jaeger?
|
||||
- How do I create and mutate a span?
|
||||
- How are spans sent between homeservers?
|
||||
- Only between whitelisted homeservers
|
||||
- What areas are currently traced
|
||||
- Database layer
|
||||
- Servlets
|
||||
- Crypto stuff
|
||||
- [Metrics]()
|
||||
- Prometheus
|
||||
- How to make a new metrics / good practices
|
||||
- What are all the current metrics and what each one means
|
||||
- How to view in grafana
|
||||
- Here's a quick example of making a new panel to visualize metrics
|
||||
- Btw it would be nice to have a tooltip with an explainer for every graph in graphana
|
||||
- [Room and User Statistics](room_and_user_statistics.md) # TODO: This page is currently useless
|
||||
- [Testing]()
|
||||
- [Unit Tests]()
|
||||
- How to run the tests (partially in the contributing guide)
|
||||
- IDE-specific examples
|
||||
- How to run one test / one test case / run in parallel
|
||||
- Documenting test environment variables: SYNAPSE_TEST_LOG_LEVEL, SYNAPSE_TEST_PERSISTENT_SQLITE_DB, SYNAPSE_POSTGRES etc.
|
||||
- Test case versus test method
|
||||
- prepare, make_homeserver, default_config, @override_config decorator, @parameterized decorator, pump/reactor.advance
|
||||
- Test utilities: fake homeserver, federation, workers, channels, requests, HomeServerTestCase.helper
|
||||
- How to use them in your tests at a high level
|
||||
- How they are implemented
|
||||
- Different inherited TestCase classes: HomeServerTestCase, FederatedHomeServerTestCase(sp?), TestCase
|
||||
- Test pattern: How to start a request, check the state of the homeserver, respond to the request
|
||||
- Ensuring the right servlets for your test are registered
|
||||
- Good practice: use storage methods vs. tampering with the database directly (database schema changes)
|
||||
- Dealing with deferreds/async (get_success/get_failure).
|
||||
- Mocking - async mocks
|
||||
- Organization of tests
|
||||
- Skipping tests
|
||||
- [Integration Tests]()
|
||||
- [Complement]()
|
||||
- Point to Complement's documentation on its structure and how to write a test
|
||||
- How to run Complement against Synapse
|
||||
- Document Synapse's Complement images and how they're built
|
||||
- Which Complement build flags does Synapse support
|
||||
- Unstable features
|
||||
- Synapse's Complement Blacklist
|
||||
- [Sytest]()
|
||||
- You should prefer Complement :)
|
||||
- Helper functions
|
||||
- Dealing with race conditions
|
||||
- Link to that one perl helper doc Matthew wrote
|
||||
- [Room DAG concepts](development/room-dag-concepts.md)
|
||||
- [State Resolution]()
|
||||
- [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
|
||||
- [Media Repository](media_repository.md)
|
||||
- [Room and User Statistics](room_and_user_statistics.md)
|
||||
- [Scripts]()
|
||||
|
||||
# Other
|
||||
|
||||
@@ -270,13 +270,13 @@ COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh
|
||||
To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output:
|
||||
|
||||
```sh
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory
|
||||
```
|
||||
|
||||
To run a specific test, you can specify the whole name structure:
|
||||
|
||||
```sh
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order
|
||||
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -407,11 +407,6 @@ manhole_settings:
|
||||
# sign up in a short space of time never to return after their initial
|
||||
# session.
|
||||
#
|
||||
# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but
|
||||
# applies a different trial number if the user was registered by an appservice.
|
||||
# A value of 0 means no trial days are applied. Appservices not listed in this
|
||||
# dictionary use the value of `mau_trial_days` instead.
|
||||
#
|
||||
# 'mau_limit_alerting' is a means of limiting client side alerting
|
||||
# should the mau limit be reached. This is useful for small instances
|
||||
# where the admin has 5 mau seats (say) for 5 specific people and no
|
||||
@@ -422,8 +417,6 @@ manhole_settings:
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
#mau_limit_alerting: false
|
||||
#mau_appservice_trial_days:
|
||||
# "appservice-id": 1
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
@@ -716,11 +709,11 @@ retention:
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to allow device display name lookup over federation. By default, the
|
||||
# Federation API prevents other homeservers from obtaining the display names of
|
||||
# user devices on this homeserver. Defaults to 'false'.
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: true
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
|
||||
|
||||
## Caching ##
|
||||
|
||||
@@ -89,43 +89,6 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.59.0
|
||||
|
||||
## Device name lookup over federation has been disabled by default
|
||||
|
||||
The names of user devices are no longer visible to users on other homeservers by default.
|
||||
Device IDs are unaffected, as these are necessary to facilitate end-to-end encryption.
|
||||
|
||||
To re-enable this functionality, set the
|
||||
[`allow_device_name_lookup_over_federation`](https://matrix-org.github.io/synapse/v1.59/usage/configuration/config_documentation.html#federation)
|
||||
homeserver config option to `true`.
|
||||
|
||||
|
||||
## Deprecation of the `synapse.app.appservice` worker application type
|
||||
|
||||
The `synapse.app.appservice` worker application type allowed you to configure a
|
||||
single worker to use to notify application services of new events, as long
|
||||
as this functionality was disabled on the main process with `notify_appservices: False`.
|
||||
|
||||
To unify Synapse's worker types, the `synapse.app.appservice` worker application
|
||||
type and the `notify_appservices` configuration option have been deprecated.
|
||||
|
||||
To get the same functionality, it's now recommended that the `synapse.app.generic_worker`
|
||||
worker application type is used and that the `notify_appservices_from_worker` option
|
||||
is set to the name of a worker.
|
||||
|
||||
For the time being, `notify_appservices_from_worker` can be used alongside
|
||||
`synapse.app.appservice` and `notify_appservices` to make it easier to transition
|
||||
between the two configurations, however please note that:
|
||||
|
||||
- the options must not contradict each other (otherwise Synapse won't start); and
|
||||
- the `notify_appservices` option will be removed in a future release of Synapse.
|
||||
|
||||
Please see [the relevant section of the worker documentation][v1_59_notify_ases_from] for more information.
|
||||
|
||||
[v1_59_notify_ases_from]: workers.md#notifying-application-services
|
||||
|
||||
|
||||
# Upgrading to v1.58.0
|
||||
|
||||
## Groups/communities feature has been disabled by default
|
||||
@@ -133,7 +96,6 @@ Please see [the relevant section of the worker documentation][v1_59_notify_ases_
|
||||
The non-standard groups/communities feature in Synapse has been disabled by default
|
||||
and will be removed in Synapse v1.61.0.
|
||||
|
||||
|
||||
# Upgrading to v1.57.0
|
||||
|
||||
## Changes to database schema for application services
|
||||
|
||||
@@ -627,20 +627,6 @@ Example configuration:
|
||||
mau_trial_days: 5
|
||||
```
|
||||
---
|
||||
Config option: `mau_appservice_trial_days`
|
||||
|
||||
The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but applies a different
|
||||
trial number if the user was registered by an appservice. A value
|
||||
of 0 means no trial days are applied. Appservices not listed in this dictionary
|
||||
use the value of `mau_trial_days` instead.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
mau_appservice_trial_days:
|
||||
my_appservice_id: 3
|
||||
another_appservice_id: 6
|
||||
```
|
||||
---
|
||||
Config option: `mau_limit_alerting`
|
||||
|
||||
The option `mau_limit_alerting` is a means of limiting client-side alerting
|
||||
@@ -1049,13 +1035,13 @@ allow_profile_lookup_over_federation: false
|
||||
---
|
||||
Config option: `allow_device_name_lookup_over_federation`
|
||||
|
||||
Set this option to true to allow device display name lookup over federation. By default, the
|
||||
Federation API prevents other homeservers from obtaining the display names of any user devices
|
||||
Set this option to false to disable device display name lookup over federation. By default, the
|
||||
Federation API allows other homeservers to obtain device display names of any user
|
||||
on this homeserver.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
allow_device_name_lookup_over_federation: true
|
||||
allow_device_name_lookup_over_federation: false
|
||||
```
|
||||
---
|
||||
## Caching ##
|
||||
|
||||
@@ -435,23 +435,6 @@ An example for a dedicated background worker instance:
|
||||
{{#include systemd-with-workers/workers/background_worker.yaml}}
|
||||
```
|
||||
|
||||
#### Notifying Application Services
|
||||
|
||||
You can designate one worker to send output traffic to Application Services.
|
||||
|
||||
Specify its name in the shared configuration as follows:
|
||||
|
||||
```yaml
|
||||
notify_appservices_from_worker: worker_name
|
||||
```
|
||||
|
||||
This work cannot be load-balanced; please ensure the main process is restarted
|
||||
after setting this option in the shared configuration!
|
||||
|
||||
This style of configuration supersedes the legacy `synapse.app.appservice`
|
||||
worker application type.
|
||||
|
||||
|
||||
### `synapse.app.pusher`
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
@@ -470,9 +453,6 @@ pusher_instances:
|
||||
|
||||
### `synapse.app.appservice`
|
||||
|
||||
**Deprecated as of Synapse v1.58.** [Use `synapse.app.generic_worker` with the
|
||||
`notify_appservices_from_worker` option instead.](#notifying-application-services)
|
||||
|
||||
Handles sending output traffic to Application Services. Doesn't handle any
|
||||
REST endpoints itself, but you should set `notify_appservices: False` in the
|
||||
shared configuration file to stop the main synapse sending appservice notifications.
|
||||
|
||||
72
poetry.lock
generated
72
poetry.lock
generated
@@ -572,7 +572,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "mypy"
|
||||
version = "0.950"
|
||||
version = "0.931"
|
||||
description = "Optional static typing for Python"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -580,14 +580,13 @@ python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
mypy-extensions = ">=0.4.3"
|
||||
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
|
||||
tomli = ">=1.1.0"
|
||||
typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""}
|
||||
typing-extensions = ">=3.10"
|
||||
|
||||
[package.extras]
|
||||
dmypy = ["psutil (>=4.0)"]
|
||||
python2 = ["typed-ast (>=1.4.0,<2)"]
|
||||
reports = ["lxml"]
|
||||
|
||||
[[package]]
|
||||
name = "mypy-extensions"
|
||||
@@ -599,14 +598,14 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "mypy-zope"
|
||||
version = "0.3.7"
|
||||
version = "0.3.5"
|
||||
description = "Plugin for mypy to support zope interfaces"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.dependencies]
|
||||
mypy = "0.950"
|
||||
mypy = "0.931"
|
||||
"zope.interface" = "*"
|
||||
"zope.schema" = "*"
|
||||
|
||||
@@ -1021,7 +1020,7 @@ jeepney = ">=0.6"
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.5.11"
|
||||
version = "1.5.7"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
category = "main"
|
||||
optional = true
|
||||
@@ -1371,7 +1370,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-pillow"
|
||||
version = "9.0.15"
|
||||
version = "9.0.6"
|
||||
description = "Typing stubs for Pillow"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1546,7 +1545,7 @@ docs = ["sphinx", "repoze.sphinx.autointerface"]
|
||||
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
|
||||
|
||||
[extras]
|
||||
all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "pyjwt", "txredisapi", "hiredis", "Pympler"]
|
||||
all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "pyjwt", "txredisapi", "hiredis"]
|
||||
cache_memory = ["Pympler"]
|
||||
jwt = ["pyjwt"]
|
||||
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
|
||||
@@ -1563,7 +1562,7 @@ url_preview = ["lxml"]
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "d39d5ac5d51c014581186b7691999b861058b569084c525523baf70b77f292b1"
|
||||
content-hash = "2bda1a7cfc8cc02832b4a7d16bf7e1615cb05e0639bdb30688aadf692d851942"
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
@@ -2090,37 +2089,34 @@ msgpack = [
|
||||
{file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"},
|
||||
]
|
||||
mypy = [
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"},
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"},
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"},
|
||||
{file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"},
|
||||
{file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"},
|
||||
{file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"},
|
||||
{file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"},
|
||||
{file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"},
|
||||
{file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"},
|
||||
{file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"},
|
||||
{file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"},
|
||||
{file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"},
|
||||
{file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"},
|
||||
{file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"},
|
||||
{file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"},
|
||||
{file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"},
|
||||
{file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"},
|
||||
{file = "mypy-0.931-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3c5b42d0815e15518b1f0990cff7a705805961613e701db60387e6fb663fe78a"},
|
||||
{file = "mypy-0.931-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c89702cac5b302f0c5d33b172d2b55b5df2bede3344a2fbed99ff96bddb2cf00"},
|
||||
{file = "mypy-0.931-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:300717a07ad09525401a508ef5d105e6b56646f7942eb92715a1c8d610149714"},
|
||||
{file = "mypy-0.931-cp310-cp310-win_amd64.whl", hash = "sha256:7b3f6f557ba4afc7f2ce6d3215d5db279bcf120b3cfd0add20a5d4f4abdae5bc"},
|
||||
{file = "mypy-0.931-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1bf752559797c897cdd2c65f7b60c2b6969ffe458417b8d947b8340cc9cec08d"},
|
||||
{file = "mypy-0.931-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4365c60266b95a3f216a3047f1d8e3f895da6c7402e9e1ddfab96393122cc58d"},
|
||||
{file = "mypy-0.931-cp36-cp36m-win_amd64.whl", hash = "sha256:1b65714dc296a7991000b6ee59a35b3f550e0073411ac9d3202f6516621ba66c"},
|
||||
{file = "mypy-0.931-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e839191b8da5b4e5d805f940537efcaa13ea5dd98418f06dc585d2891d228cf0"},
|
||||
{file = "mypy-0.931-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:50c7346a46dc76a4ed88f3277d4959de8a2bd0a0fa47fa87a4cde36fe247ac05"},
|
||||
{file = "mypy-0.931-cp37-cp37m-win_amd64.whl", hash = "sha256:d8f1ff62f7a879c9fe5917b3f9eb93a79b78aad47b533911b853a757223f72e7"},
|
||||
{file = "mypy-0.931-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f9fe20d0872b26c4bba1c1be02c5340de1019530302cf2dcc85c7f9fc3252ae0"},
|
||||
{file = "mypy-0.931-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1b06268df7eb53a8feea99cbfff77a6e2b205e70bf31743e786678ef87ee8069"},
|
||||
{file = "mypy-0.931-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8c11003aaeaf7cc2d0f1bc101c1cc9454ec4cc9cb825aef3cafff8a5fdf4c799"},
|
||||
{file = "mypy-0.931-cp38-cp38-win_amd64.whl", hash = "sha256:d9d2b84b2007cea426e327d2483238f040c49405a6bf4074f605f0156c91a47a"},
|
||||
{file = "mypy-0.931-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ff3bf387c14c805ab1388185dd22d6b210824e164d4bb324b195ff34e322d166"},
|
||||
{file = "mypy-0.931-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5b56154f8c09427bae082b32275a21f500b24d93c88d69a5e82f3978018a0266"},
|
||||
{file = "mypy-0.931-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ca7f8c4b1584d63c9a0f827c37ba7a47226c19a23a753d52e5b5eddb201afcd"},
|
||||
{file = "mypy-0.931-cp39-cp39-win_amd64.whl", hash = "sha256:74f7eccbfd436abe9c352ad9fb65872cc0f1f0a868e9d9c44db0893440f0c697"},
|
||||
{file = "mypy-0.931-py3-none-any.whl", hash = "sha256:1171f2e0859cfff2d366da2c7092b06130f232c636a3f7301e3feb8b41f6377d"},
|
||||
{file = "mypy-0.931.tar.gz", hash = "sha256:0038b21890867793581e4cb0d810829f5fd4441aa75796b53033af3aa30430ce"},
|
||||
]
|
||||
mypy-extensions = [
|
||||
{file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
|
||||
{file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
|
||||
]
|
||||
mypy-zope = [
|
||||
{file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"},
|
||||
{file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"},
|
||||
{file = "mypy-zope-0.3.5.tar.gz", hash = "sha256:489e7da1c2af887f2cfe3496995fc247f296512b495b57817edddda9d22308f3"},
|
||||
{file = "mypy_zope-0.3.5-py3-none-any.whl", hash = "sha256:3bd0cc9a3e5933b02931af4b214ba32a4f4ff98adb30c979ce733857db91a18b"},
|
||||
]
|
||||
netaddr = [
|
||||
{file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"},
|
||||
@@ -2390,8 +2386,8 @@ secretstorage = [
|
||||
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
|
||||
]
|
||||
sentry-sdk = [
|
||||
{file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"},
|
||||
{file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"},
|
||||
{file = "sentry-sdk-1.5.7.tar.gz", hash = "sha256:aa52da941c56b5a76fd838f8e9e92a850bf893a9eb1e33ffce6c21431d07ee30"},
|
||||
{file = "sentry_sdk-1.5.7-py2.py3-none-any.whl", hash = "sha256:411a8495bd18cf13038e5749e4710beb4efa53da6351f67b4c2f307c2d9b6d49"},
|
||||
]
|
||||
service-identity = [
|
||||
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
|
||||
@@ -2626,8 +2622,8 @@ types-opentracing = [
|
||||
{file = "types_opentracing-2.4.7-py3-none-any.whl", hash = "sha256:861fb8103b07cf717f501dd400cb274ca9992552314d4d6c7a824b11a215e512"},
|
||||
]
|
||||
types-pillow = [
|
||||
{file = "types-Pillow-9.0.15.tar.gz", hash = "sha256:d2e385fe5c192e75970f18accce69f5c2a9f186f3feb578a9b91cd6fdf64211d"},
|
||||
{file = "types_Pillow-9.0.15-py3-none-any.whl", hash = "sha256:c9646595dfafdf8b63d4b1443292ead17ee0fc7b18a143e497b68e0ea2dc1eb6"},
|
||||
{file = "types-Pillow-9.0.6.tar.gz", hash = "sha256:79b350b1188c080c27558429f1e119e69c9f020b877a82df761d9283070e0185"},
|
||||
{file = "types_Pillow-9.0.6-py3-none-any.whl", hash = "sha256:bd1e0a844fc718398aa265bf50fcad550fc520cc54f80e5ffeb7b3226b3cc507"},
|
||||
]
|
||||
types-psycopg2 = [
|
||||
{file = "types-psycopg2-2.9.9.tar.gz", hash = "sha256:4f9d4d52eeb343dc00fd5ed4f1513a8a5c18efba0a072eb82706d15cf4f20a2e"},
|
||||
|
||||
@@ -54,7 +54,7 @@ skip_gitignore = true
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.58.1"
|
||||
version = "1.58.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -142,10 +142,8 @@ netaddr = ">=0.7.18"
|
||||
# add a lower bound to the Jinja2 dependency.
|
||||
Jinja2 = ">=3.0"
|
||||
bleach = ">=1.4.3"
|
||||
# We use `ParamSpec` and `Concatenate`, which were added in `typing-extensions` 3.10.0.0.
|
||||
# Additionally we need https://github.com/python/typing/pull/817 to allow types to be
|
||||
# generic over ParamSpecs.
|
||||
typing-extensions = ">=3.10.0.1"
|
||||
# We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0.
|
||||
typing-extensions = ">=3.10.0"
|
||||
# We enforce that we have a `cryptography` version that bundles an `openssl`
|
||||
# with the latest security patches.
|
||||
cryptography = ">=3.4.7"
|
||||
@@ -233,11 +231,10 @@ all = [
|
||||
"jaeger-client", "opentracing",
|
||||
# jwt
|
||||
"pyjwt",
|
||||
# redis
|
||||
"txredisapi", "hiredis",
|
||||
# cache_memory
|
||||
"pympler",
|
||||
#redis
|
||||
"txredisapi", "hiredis"
|
||||
# omitted:
|
||||
# - cache_memory: this is an experimental option
|
||||
# - test: it's useful to have this separate from dev deps in the olddeps job
|
||||
# - systemd: this is a system-based requirement
|
||||
]
|
||||
@@ -251,8 +248,8 @@ flake8-bugbear = "==21.3.2"
|
||||
flake8 = "*"
|
||||
|
||||
# Typechecking
|
||||
mypy = "*"
|
||||
mypy-zope = "*"
|
||||
mypy = "==0.931"
|
||||
mypy-zope = "==0.3.5"
|
||||
types-bleach = ">=4.1.0"
|
||||
types-commonmark = ">=0.9.2"
|
||||
types-jsonschema = ">=3.2.0"
|
||||
|
||||
@@ -85,19 +85,12 @@ class SortedDict(Dict[_KT, _VT]):
|
||||
def popitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||
def peekitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||
def setdefault(self, key: _KT, default: Optional[_VT] = ...) -> _VT: ...
|
||||
# Mypy now reports the first overload as an error, because typeshed widened the type
|
||||
# of `__map` to its internal `_typeshed.SupportsKeysAndGetItem` type in
|
||||
# https://github.com/python/typeshed/pull/6653
|
||||
# Since sorteddicts don't change the signature of `update` from that of `dict`, we
|
||||
# let the stubs for `update` inherit from the stubs for `dict`. (I suspect we could
|
||||
# do the same for many othe methods.) We leave the stubs commented to better track
|
||||
# how this file has evolved from the original stubs.
|
||||
# @overload
|
||||
# def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
# @overload
|
||||
# def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
|
||||
# @overload
|
||||
# def update(self, **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, **kwargs: _VT) -> None: ...
|
||||
def __reduce__(
|
||||
self,
|
||||
) -> Tuple[
|
||||
|
||||
@@ -187,7 +187,7 @@ class Auth:
|
||||
Once get_user_by_req has set up the opentracing span, this does the actual work.
|
||||
"""
|
||||
try:
|
||||
ip_addr = request.getClientAddress().host
|
||||
ip_addr = request.getClientIP()
|
||||
user_agent = get_request_user_agent(request)
|
||||
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
@@ -356,7 +356,7 @@ class Auth:
|
||||
return None, None, None
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(request.getClientAddress().host)
|
||||
ip_address = IPAddress(request.getClientIP())
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
return None, None, None
|
||||
|
||||
@@ -417,8 +417,7 @@ class Auth:
|
||||
"""
|
||||
|
||||
if rights == "access":
|
||||
# First look in the database to see if the access token is present
|
||||
# as an opaque token.
|
||||
# first look in the database
|
||||
r = await self.store.get_user_by_access_token(token)
|
||||
if r:
|
||||
valid_until_ms = r.valid_until_ms
|
||||
@@ -435,8 +434,7 @@ class Auth:
|
||||
|
||||
return r
|
||||
|
||||
# If the token isn't found in the database, then it could still be a
|
||||
# macaroon, so we check that here.
|
||||
# otherwise it needs to be a valid macaroon
|
||||
try:
|
||||
user_id, guest = self._parse_and_validate_macaroon(token, rights)
|
||||
|
||||
@@ -484,12 +482,8 @@ class Auth:
|
||||
TypeError,
|
||||
ValueError,
|
||||
) as e:
|
||||
logger.warning(
|
||||
"Invalid access token in auth: %s %s.",
|
||||
type(e),
|
||||
e,
|
||||
)
|
||||
raise InvalidClientTokenError("Invalid access token passed.")
|
||||
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
def _parse_and_validate_macaroon(
|
||||
self, token: str, rights: str = "access"
|
||||
@@ -510,7 +504,10 @@ class Auth:
|
||||
try:
|
||||
macaroon = pymacaroons.Macaroon.deserialize(token)
|
||||
except Exception: # deserialize can throw more-or-less anything
|
||||
# The access token doesn't look like a macaroon.
|
||||
# doesn't look like a macaroon: treat it as an opaque token which
|
||||
# must be in the database.
|
||||
# TODO: it would be nice to get rid of this, but apparently some
|
||||
# people use access tokens which aren't macaroons
|
||||
raise _InvalidMacaroonException()
|
||||
|
||||
try:
|
||||
|
||||
@@ -255,5 +255,7 @@ class GuestAccess:
|
||||
|
||||
class ReceiptTypes:
|
||||
READ: Final = "m.read"
|
||||
READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
|
||||
FULLY_READ: Final = "m.fully_read"
|
||||
|
||||
|
||||
class ReadReceiptEventFields:
|
||||
MSC2285_HIDDEN: Final = "org.matrix.msc2285.hidden"
|
||||
|
||||
@@ -38,7 +38,6 @@ from typing import (
|
||||
|
||||
from cryptography.utils import CryptographyDeprecationWarning
|
||||
from matrix_common.versionstring import get_distribution_version_string
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
import twisted
|
||||
from twisted.internet import defer, error, reactor as _reactor
|
||||
@@ -82,12 +81,11 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# list of tuples of function, args list, kwargs dict
|
||||
_sighup_callbacks: List[
|
||||
Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]]
|
||||
Tuple[Callable[..., None], Tuple[Any, ...], Dict[str, Any]]
|
||||
] = []
|
||||
P = ParamSpec("P")
|
||||
|
||||
|
||||
def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None:
|
||||
def register_sighup(func: Callable[..., None], *args: Any, **kwargs: Any) -> None:
|
||||
"""
|
||||
Register a function to be called when a SIGHUP occurs.
|
||||
|
||||
@@ -95,9 +93,7 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
|
||||
func: Function to be called when sent a SIGHUP signal.
|
||||
*args, **kwargs: args and kwargs to be passed to the target function.
|
||||
"""
|
||||
# This type-ignore should be redundant once we use a mypy release with
|
||||
# https://github.com/python/mypy/pull/12668.
|
||||
_sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type]
|
||||
_sighup_callbacks.append((func, args, kwargs))
|
||||
|
||||
|
||||
def start_worker_reactor(
|
||||
@@ -218,9 +214,7 @@ def redirect_stdio_to_logs() -> None:
|
||||
print("Redirected stdout/stderr to logs")
|
||||
|
||||
|
||||
def register_start(
|
||||
cb: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs
|
||||
) -> None:
|
||||
def register_start(cb: Callable[..., Awaitable], *args: Any, **kwargs: Any) -> None:
|
||||
"""Register a callback with the reactor, to be called once it is running
|
||||
|
||||
This can be used to initialise parts of the system which require an asynchronous
|
||||
|
||||
@@ -441,6 +441,22 @@ def start(config_options: List[str]) -> None:
|
||||
"synapse.app.user_dir",
|
||||
)
|
||||
|
||||
if config.worker.worker_app == "synapse.app.appservice":
|
||||
if config.appservice.notify_appservices:
|
||||
sys.stderr.write(
|
||||
"\nThe appservices must be disabled in the main synapse process"
|
||||
"\nbefore they can be run in a separate worker."
|
||||
"\nPlease add ``notify_appservices: false`` to the main config"
|
||||
"\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Force the appservice to start since they will be disabled in the main config
|
||||
config.appservice.notify_appservices = True
|
||||
else:
|
||||
# For other worker types we force this to off.
|
||||
config.appservice.notify_appservices = False
|
||||
|
||||
if config.worker.worker_app == "synapse.app.user_dir":
|
||||
if config.server.update_user_directory:
|
||||
sys.stderr.write(
|
||||
|
||||
@@ -17,7 +17,6 @@ import urllib.parse
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
|
||||
from synapse.api.errors import CodeMessageException
|
||||
@@ -67,7 +66,7 @@ def _is_valid_3pe_metadata(info: JsonDict) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _is_valid_3pe_result(r: object, field: str) -> TypeGuard[JsonDict]:
|
||||
def _is_valid_3pe_result(r: JsonDict, field: str) -> bool:
|
||||
if not isinstance(r, dict):
|
||||
return False
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ class AppServiceConfig(Config):
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.app_service_config_files = config.get("app_service_config_files", [])
|
||||
self.notify_appservices = config.get("notify_appservices", True)
|
||||
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
|
||||
|
||||
def generate_config_section(cls, **kwargs: Any) -> str:
|
||||
@@ -55,8 +56,7 @@ def load_appservices(
|
||||
) -> List[ApplicationService]:
|
||||
"""Returns a list of Application Services from the config files."""
|
||||
if not isinstance(config_files, list):
|
||||
# type-ignore: this function gets arbitrary json value; we do use this path.
|
||||
logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable]
|
||||
logger.warning("Expected %s to be a list of AS config files.", config_files)
|
||||
return []
|
||||
|
||||
# Dicts of value -> filename
|
||||
|
||||
@@ -32,7 +32,7 @@ class ExperimentalConfig(Config):
|
||||
# MSC2716 (importing historical messages)
|
||||
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
|
||||
|
||||
# MSC2285 (private read receipts)
|
||||
# MSC2285 (hidden read receipts)
|
||||
self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
|
||||
|
||||
# MSC3244 (room version capabilities)
|
||||
|
||||
@@ -46,7 +46,7 @@ class FederationConfig(Config):
|
||||
)
|
||||
|
||||
self.allow_device_name_lookup_over_federation = config.get(
|
||||
"allow_device_name_lookup_over_federation", False
|
||||
"allow_device_name_lookup_over_federation", True
|
||||
)
|
||||
|
||||
def generate_config_section(self, **kwargs: Any) -> str:
|
||||
@@ -81,11 +81,11 @@ class FederationConfig(Config):
|
||||
#
|
||||
#allow_profile_lookup_over_federation: false
|
||||
|
||||
# Uncomment to allow device display name lookup over federation. By default, the
|
||||
# Federation API prevents other homeservers from obtaining the display names of
|
||||
# user devices on this homeserver. Defaults to 'false'.
|
||||
# Uncomment to disable device display name lookup over federation. By default, the
|
||||
# Federation API allows other homeservers to obtain device display names of any user
|
||||
# on this homeserver. Defaults to 'true'.
|
||||
#
|
||||
#allow_device_name_lookup_over_federation: true
|
||||
#allow_device_name_lookup_over_federation: false
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -43,8 +43,8 @@ class RegistrationConfig(Config):
|
||||
self.registration_requires_token = config.get(
|
||||
"registration_requires_token", False
|
||||
)
|
||||
self.enable_registration_token_3pid_bypass = config.get(
|
||||
"enable_registration_token_3pid_bypass", False
|
||||
self.enable_registration_token_3pid_bypasss = config.get(
|
||||
"enable_registration_token_3pid_bypasss", False
|
||||
)
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
|
||||
|
||||
@@ -413,7 +413,6 @@ class ServerConfig(Config):
|
||||
)
|
||||
|
||||
self.mau_trial_days = config.get("mau_trial_days", 0)
|
||||
self.mau_appservice_trial_days = config.get("mau_appservice_trial_days", {})
|
||||
self.mau_limit_alerting = config.get("mau_limit_alerting", True)
|
||||
|
||||
# How long to keep redacted events in the database in unredacted form
|
||||
@@ -1106,11 +1105,6 @@ class ServerConfig(Config):
|
||||
# sign up in a short space of time never to return after their initial
|
||||
# session.
|
||||
#
|
||||
# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but
|
||||
# applies a different trial number if the user was registered by an appservice.
|
||||
# A value of 0 means no trial days are applied. Appservices not listed in this
|
||||
# dictionary use the value of `mau_trial_days` instead.
|
||||
#
|
||||
# 'mau_limit_alerting' is a means of limiting client side alerting
|
||||
# should the mau limit be reached. This is useful for small instances
|
||||
# where the admin has 5 mau seats (say) for 5 specific people and no
|
||||
@@ -1121,8 +1115,6 @@ class ServerConfig(Config):
|
||||
#max_mau_value: 50
|
||||
#mau_trial_days: 2
|
||||
#mau_limit_alerting: false
|
||||
#mau_appservice_trial_days:
|
||||
# "appservice-id": 1
|
||||
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
|
||||
@@ -14,8 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from typing import Any, Dict, List, Union
|
||||
from typing import Any, List, Union
|
||||
|
||||
import attr
|
||||
|
||||
@@ -43,13 +42,6 @@ synapse process before they can be run in a separate worker.
|
||||
Please add ``start_pushers: false`` to the main config
|
||||
"""
|
||||
|
||||
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
|
||||
The '%s' configuration option is deprecated and will be removed in a future
|
||||
Synapse version. Please use ``%s: name_of_worker`` instead.
|
||||
"""
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
|
||||
"""Helper for allowing parsing a string or list of strings to a config
|
||||
@@ -304,105 +296,6 @@ class WorkerConfig(Config):
|
||||
self.worker_name is None and background_tasks_instance == "master"
|
||||
) or self.worker_name == background_tasks_instance
|
||||
|
||||
self.should_notify_appservices = self._should_this_worker_perform_duty(
|
||||
config,
|
||||
legacy_master_option_name="notify_appservices",
|
||||
legacy_worker_app_name="synapse.app.appservice",
|
||||
new_option_name="notify_appservices_from_worker",
|
||||
)
|
||||
|
||||
def _should_this_worker_perform_duty(
|
||||
self,
|
||||
config: Dict[str, Any],
|
||||
legacy_master_option_name: str,
|
||||
legacy_worker_app_name: str,
|
||||
new_option_name: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Figures out whether this worker should perform a certain duty.
|
||||
|
||||
This function is temporary and is only to deal with the complexity
|
||||
of allowing old, transitional and new configurations all at once.
|
||||
|
||||
Contradictions between the legacy and new part of a transitional configuration
|
||||
will lead to a ConfigError.
|
||||
|
||||
Parameters:
|
||||
config: The config dictionary
|
||||
legacy_master_option_name: The name of a legacy option, whose value is boolean,
|
||||
specifying whether it's the master that should handle a certain duty.
|
||||
e.g. "notify_appservices"
|
||||
legacy_worker_app_name: The name of a legacy Synapse worker application
|
||||
that would traditionally perform this duty.
|
||||
e.g. "synapse.app.appservice"
|
||||
new_option_name: The name of the new option, whose value is the name of a
|
||||
designated worker to perform the duty.
|
||||
e.g. "notify_appservices_from_worker"
|
||||
"""
|
||||
|
||||
# None means 'unspecified'; True means 'run here' and False means
|
||||
# 'don't run here'.
|
||||
new_option_should_run_here = None
|
||||
if new_option_name in config:
|
||||
designated_worker = config[new_option_name] or "master"
|
||||
new_option_should_run_here = (
|
||||
designated_worker == "master" and self.worker_name is None
|
||||
) or designated_worker == self.worker_name
|
||||
|
||||
legacy_option_should_run_here = None
|
||||
if legacy_master_option_name in config:
|
||||
run_on_master = bool(config[legacy_master_option_name])
|
||||
|
||||
legacy_option_should_run_here = (
|
||||
self.worker_name is None and run_on_master
|
||||
) or (self.worker_app == legacy_worker_app_name and not run_on_master)
|
||||
|
||||
# Suggest using the new option instead.
|
||||
logger.warning(
|
||||
_DEPRECATED_WORKER_DUTY_OPTION_USED,
|
||||
legacy_master_option_name,
|
||||
new_option_name,
|
||||
)
|
||||
|
||||
if self.worker_app == legacy_worker_app_name and config.get(
|
||||
legacy_master_option_name, True
|
||||
):
|
||||
# As an extra bit of complication, we need to check that the
|
||||
# specialised worker is only used if the legacy config says the
|
||||
# master isn't performing the duties.
|
||||
raise ConfigError(
|
||||
f"Cannot use deprecated worker app type '{legacy_worker_app_name}' whilst deprecated option '{legacy_master_option_name}' is not set to false.\n"
|
||||
f"Consider setting `worker_app: synapse.app.generic_worker` and using the '{new_option_name}' option instead.\n"
|
||||
f"The '{new_option_name}' option replaces '{legacy_master_option_name}'."
|
||||
)
|
||||
|
||||
if new_option_should_run_here is None and legacy_option_should_run_here is None:
|
||||
# Neither option specified; the fallback behaviour is to run on the main process
|
||||
return self.worker_name is None
|
||||
|
||||
if (
|
||||
new_option_should_run_here is not None
|
||||
and legacy_option_should_run_here is not None
|
||||
):
|
||||
# Both options specified; ensure they match!
|
||||
if new_option_should_run_here != legacy_option_should_run_here:
|
||||
update_worker_type = (
|
||||
" and set worker_app: synapse.app.generic_worker"
|
||||
if self.worker_app == legacy_worker_app_name
|
||||
else ""
|
||||
)
|
||||
# If the values conflict, we suggest the admin removes the legacy option
|
||||
# for simplicity.
|
||||
raise ConfigError(
|
||||
f"Conflicting configuration options: {legacy_master_option_name} (legacy), {new_option_name} (new).\n"
|
||||
f"Suggestion: remove {legacy_master_option_name}{update_worker_type}.\n"
|
||||
)
|
||||
|
||||
# We've already validated that these aren't conflicting; now just see if
|
||||
# either is True.
|
||||
# (By this point, these are either the same value or only one is not None.)
|
||||
return bool(new_option_should_run_here or legacy_option_should_run_here)
|
||||
|
||||
def generate_config_section(self, **kwargs: Any) -> str:
|
||||
return """\
|
||||
## Workers ##
|
||||
|
||||
@@ -22,16 +22,11 @@ from typing import (
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -45,10 +40,6 @@ GET_INTERESTED_USERS_CALLBACK = Callable[[str], Awaitable[Union[Set[str], str]]]
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
"""Wrapper that loads a presence router module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
@@ -72,15 +63,13 @@ def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
|
||||
# All methods that the module provides should be async, but this wasn't enforced
|
||||
# in the old module system, so we wrap them if needed
|
||||
def async_wrapper(
|
||||
f: Optional[Callable[P, R]]
|
||||
) -> Optional[Callable[P, Awaitable[R]]]:
|
||||
def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]:
|
||||
# f might be None if the callback isn't implemented by the module. In this
|
||||
# case we don't want to register a callback at all so we return None.
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
def run(*args: P.args, **kwargs: P.kwargs) -> Awaitable[R]:
|
||||
def run(*args: Any, **kwargs: Any) -> Awaitable:
|
||||
# Assertion required because mypy can't prove we won't change `f`
|
||||
# back to `None`. See
|
||||
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
||||
@@ -91,7 +80,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None:
|
||||
return run
|
||||
|
||||
# Register the hooks through the module API.
|
||||
hooks: Dict[str, Optional[Callable[..., Any]]] = {
|
||||
hooks = {
|
||||
hook: async_wrapper(getattr(presence_router, hook, None))
|
||||
for hook in presence_router_methods
|
||||
}
|
||||
@@ -158,11 +147,7 @@ class PresenceRouter:
|
||||
# run all the callbacks for get_users_for_states and combine the results
|
||||
for callback in self._get_users_for_states_callbacks:
|
||||
try:
|
||||
# Note: result is an object here, because we don't trust modules to
|
||||
# return the types they're supposed to.
|
||||
result: object = await delay_cancellation(callback(state_updates))
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(state_updates)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
@@ -214,9 +199,7 @@ class PresenceRouter:
|
||||
# run all the callbacks for get_interested_users and combine the results
|
||||
for callback in self._get_interested_users_callbacks:
|
||||
try:
|
||||
result = await delay_cancellation(callback(user_id))
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(user_id)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
|
||||
@@ -31,7 +31,7 @@ from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import RoomAlias, UserProfile
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import synapse.events
|
||||
@@ -255,7 +255,7 @@ class SpamChecker:
|
||||
will be used as the error message returned to the user.
|
||||
"""
|
||||
for callback in self._check_event_for_spam_callbacks:
|
||||
res: Union[bool, str] = await delay_cancellation(callback(event))
|
||||
res: Union[bool, str] = await callback(event)
|
||||
if res:
|
||||
return res
|
||||
|
||||
@@ -276,10 +276,7 @@ class SpamChecker:
|
||||
Whether the user may join the room
|
||||
"""
|
||||
for callback in self._user_may_join_room_callbacks:
|
||||
may_join_room = await delay_cancellation(
|
||||
callback(user_id, room_id, is_invited)
|
||||
)
|
||||
if may_join_room is False:
|
||||
if await callback(user_id, room_id, is_invited) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -300,10 +297,7 @@ class SpamChecker:
|
||||
True if the user may send an invite, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_invite_callbacks:
|
||||
may_invite = await delay_cancellation(
|
||||
callback(inviter_userid, invitee_userid, room_id)
|
||||
)
|
||||
if may_invite is False:
|
||||
if await callback(inviter_userid, invitee_userid, room_id) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -328,10 +322,7 @@ class SpamChecker:
|
||||
True if the user may send the invite, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_send_3pid_invite_callbacks:
|
||||
may_send_3pid_invite = await delay_cancellation(
|
||||
callback(inviter_userid, medium, address, room_id)
|
||||
)
|
||||
if may_send_3pid_invite is False:
|
||||
if await callback(inviter_userid, medium, address, room_id) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -348,8 +339,7 @@ class SpamChecker:
|
||||
True if the user may create a room, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_create_room_callbacks:
|
||||
may_create_room = await delay_cancellation(callback(userid))
|
||||
if may_create_room is False:
|
||||
if await callback(userid) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -369,10 +359,7 @@ class SpamChecker:
|
||||
True if the user may create a room alias, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_create_room_alias_callbacks:
|
||||
may_create_room_alias = await delay_cancellation(
|
||||
callback(userid, room_alias)
|
||||
)
|
||||
if may_create_room_alias is False:
|
||||
if await callback(userid, room_alias) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -390,8 +377,7 @@ class SpamChecker:
|
||||
True if the user may publish the room, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_publish_room_callbacks:
|
||||
may_publish_room = await delay_cancellation(callback(userid, room_id))
|
||||
if may_publish_room is False:
|
||||
if await callback(userid, room_id) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -414,7 +400,7 @@ class SpamChecker:
|
||||
for callback in self._check_username_for_spam_callbacks:
|
||||
# Make a copy of the user profile object to ensure the spam checker cannot
|
||||
# modify it.
|
||||
if await delay_cancellation(callback(user_profile.copy())):
|
||||
if await callback(user_profile.copy()):
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -442,7 +428,7 @@ class SpamChecker:
|
||||
"""
|
||||
|
||||
for callback in self._check_registration_for_spam_callbacks:
|
||||
behaviour = await delay_cancellation(
|
||||
behaviour = await (
|
||||
callback(email_threepid, username, request_info, auth_provider_id)
|
||||
)
|
||||
assert isinstance(behaviour, RegistrationBehaviour)
|
||||
@@ -486,7 +472,7 @@ class SpamChecker:
|
||||
"""
|
||||
|
||||
for callback in self._check_media_file_for_spam_callbacks:
|
||||
spam = await delay_cancellation(callback(file_wrapper, file_info))
|
||||
spam = await callback(file_wrapper, file_info)
|
||||
if spam:
|
||||
return True
|
||||
|
||||
|
||||
@@ -14,14 +14,12 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Optional, Tuple
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
|
||||
from synapse.api.errors import ModuleFailedException, SynapseError
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import Requester, StateMap
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -265,11 +263,7 @@ class ThirdPartyEventRules:
|
||||
|
||||
for callback in self._check_event_allowed_callbacks:
|
||||
try:
|
||||
res, replacement_data = await delay_cancellation(
|
||||
callback(event, state_events)
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
res, replacement_data = await callback(event, state_events)
|
||||
except SynapseError as e:
|
||||
# FIXME: Being able to throw SynapseErrors is relied upon by
|
||||
# some modules. PR #10386 accidentally broke this ability.
|
||||
@@ -339,13 +333,8 @@ class ThirdPartyEventRules:
|
||||
|
||||
for callback in self._check_threepid_can_be_invited_callbacks:
|
||||
try:
|
||||
threepid_can_be_invited = await delay_cancellation(
|
||||
callback(medium, address, state_events)
|
||||
)
|
||||
if threepid_can_be_invited is False:
|
||||
if await callback(medium, address, state_events) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
|
||||
@@ -372,13 +361,8 @@ class ThirdPartyEventRules:
|
||||
|
||||
for callback in self._check_visibility_can_be_modified_callbacks:
|
||||
try:
|
||||
visibility_can_be_modified = await delay_cancellation(
|
||||
callback(room_id, state_events, new_visibility)
|
||||
)
|
||||
if visibility_can_be_modified is False:
|
||||
if await callback(room_id, state_events, new_visibility) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
|
||||
@@ -416,11 +400,8 @@ class ThirdPartyEventRules:
|
||||
"""
|
||||
for callback in self._check_can_shutdown_room_callbacks:
|
||||
try:
|
||||
can_shutdown_room = await delay_cancellation(callback(user_id, room_id))
|
||||
if can_shutdown_room is False:
|
||||
if await callback(user_id, room_id) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to run module API callback %s: %s", callback, e
|
||||
@@ -441,13 +422,8 @@ class ThirdPartyEventRules:
|
||||
"""
|
||||
for callback in self._check_can_deactivate_user_callbacks:
|
||||
try:
|
||||
can_deactivate_user = await delay_cancellation(
|
||||
callback(user_id, by_admin)
|
||||
)
|
||||
if can_deactivate_user is False:
|
||||
if await callback(user_id, by_admin) is False:
|
||||
return False
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Failed to run module API callback %s: %s", callback, e
|
||||
|
||||
@@ -22,7 +22,6 @@ from typing import (
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Union,
|
||||
)
|
||||
@@ -581,20 +580,10 @@ class EventClientSerializer:
|
||||
]
|
||||
|
||||
|
||||
_PowerLevel = Union[str, int]
|
||||
|
||||
|
||||
def copy_and_fixup_power_levels_contents(
|
||||
old_power_levels: Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
|
||||
def copy_power_levels_contents(
|
||||
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
|
||||
) -> Dict[str, Union[int, Dict[str, int]]]:
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way.
|
||||
|
||||
We accept as input power level values which are strings, provided they represent an
|
||||
integer, e.g. `"`100"` instead of 100. Such strings are converted to integers
|
||||
in the returned dictionary (hence "fixup" in the function name).
|
||||
|
||||
Note that future room versions will outlaw such stringy power levels (see
|
||||
https://github.com/matrix-org/matrix-spec/issues/853).
|
||||
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
|
||||
|
||||
Raises:
|
||||
TypeError if the input does not look like a valid power levels event content
|
||||
@@ -603,47 +592,29 @@ def copy_and_fixup_power_levels_contents(
|
||||
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
|
||||
|
||||
power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
|
||||
|
||||
for k, v in old_power_levels.items():
|
||||
|
||||
if isinstance(v, int):
|
||||
power_levels[k] = v
|
||||
continue
|
||||
|
||||
if isinstance(v, collections.abc.Mapping):
|
||||
h: Dict[str, int] = {}
|
||||
power_levels[k] = h
|
||||
for k1, v1 in v.items():
|
||||
_copy_power_level_value_as_integer(v1, h, k1)
|
||||
# we should only have one level of nesting
|
||||
if not isinstance(v1, int):
|
||||
raise TypeError(
|
||||
"Invalid power_levels value for %s.%s: %r" % (k, k1, v1)
|
||||
)
|
||||
h[k1] = v1
|
||||
continue
|
||||
|
||||
else:
|
||||
_copy_power_level_value_as_integer(v, power_levels, k)
|
||||
raise TypeError("Invalid power_levels value for %s: %r" % (k, v))
|
||||
|
||||
return power_levels
|
||||
|
||||
|
||||
def _copy_power_level_value_as_integer(
|
||||
old_value: object,
|
||||
power_levels: MutableMapping[str, Any],
|
||||
key: str,
|
||||
) -> None:
|
||||
"""Set `power_levels[key]` to the integer represented by `old_value`.
|
||||
|
||||
:raises TypeError: if `old_value` is not an integer, nor a base-10 string
|
||||
representation of an integer.
|
||||
"""
|
||||
if isinstance(old_value, int):
|
||||
power_levels[key] = old_value
|
||||
return
|
||||
|
||||
if isinstance(old_value, str):
|
||||
try:
|
||||
parsed_value = int(old_value, base=10)
|
||||
except ValueError:
|
||||
# Fall through to the final TypeError.
|
||||
pass
|
||||
else:
|
||||
power_levels[key] = parsed_value
|
||||
return
|
||||
|
||||
raise TypeError(f"Invalid power_levels value for {key}: {old_value}")
|
||||
|
||||
|
||||
def validate_canonicaljson(value: Any) -> None:
|
||||
"""
|
||||
Ensure that the JSON object is valid according to the rules of canonical JSON.
|
||||
|
||||
@@ -618,7 +618,7 @@ class FederationClient(FederationBase):
|
||||
#
|
||||
# Dendrite returns a 404 (with a body of "404 page not found");
|
||||
# Conduit returns a 404 (with no body); and Synapse returns a 400
|
||||
# with M_UNRECOGNIZED.
|
||||
# with M_UNRECOGNISED.
|
||||
#
|
||||
# This needs to be rather specific as some endpoints truly do return 404
|
||||
# errors.
|
||||
@@ -1426,8 +1426,6 @@ class FederationClient(FederationBase):
|
||||
room = res.get("room")
|
||||
if not isinstance(room, dict):
|
||||
raise InvalidResponseError("'room' must be a dict")
|
||||
if room.get("room_id") != room_id:
|
||||
raise InvalidResponseError("wrong room returned in hierarchy response")
|
||||
|
||||
# Validate children_state of the room.
|
||||
children_state = room.pop("children_state", [])
|
||||
|
||||
@@ -23,7 +23,6 @@ from synapse.api.errors import AuthError, StoreError, SynapseError
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import delay_cancellation
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -151,7 +150,7 @@ class AccountValidityHandler:
|
||||
Whether the user has expired.
|
||||
"""
|
||||
for callback in self._is_user_expired_callbacks:
|
||||
expired = await delay_cancellation(callback(user_id))
|
||||
expired = await callback(user_id)
|
||||
if expired is not None:
|
||||
return expired
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ class ApplicationServicesHandler:
|
||||
self.scheduler = hs.get_application_service_scheduler()
|
||||
self.started_scheduler = False
|
||||
self.clock = hs.get_clock()
|
||||
self.notify_appservices = hs.config.worker.should_notify_appservices
|
||||
self.notify_appservices = hs.config.appservice.notify_appservices
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self._msc2409_to_device_messages_enabled = (
|
||||
hs.config.experimental.msc2409_to_device_messages_enabled
|
||||
|
||||
@@ -41,7 +41,6 @@ import pymacaroons
|
||||
import unpaddedbase64
|
||||
from pymacaroons.exceptions import MacaroonVerificationFailedException
|
||||
|
||||
from twisted.internet.defer import CancelledError
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.constants import LoginType
|
||||
@@ -68,7 +67,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.util import stringutils as stringutils
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.stringutils import base62_encode
|
||||
@@ -552,7 +551,7 @@ class AuthHandler:
|
||||
await self.store.set_ui_auth_clientdict(sid, clientdict)
|
||||
|
||||
user_agent = get_request_user_agent(request)
|
||||
clientip = request.getClientAddress().host
|
||||
clientip = request.getClientIP()
|
||||
|
||||
await self.store.add_user_agent_ip_to_ui_auth_session(
|
||||
session.session_id, user_agent, clientip
|
||||
@@ -2203,11 +2202,7 @@ class PasswordAuthProvider:
|
||||
# other than None (i.e. until a callback returns a success)
|
||||
for callback in self.auth_checker_callbacks[login_type]:
|
||||
try:
|
||||
result = await delay_cancellation(
|
||||
callback(username, login_type, login_dict)
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(username, login_type, login_dict)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
@@ -2268,9 +2263,7 @@ class PasswordAuthProvider:
|
||||
|
||||
for callback in self.check_3pid_auth_callbacks:
|
||||
try:
|
||||
result = await delay_cancellation(callback(medium, address, password))
|
||||
except CancelledError:
|
||||
raise
|
||||
result = await callback(medium, address, password)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to run module API callback %s: %s", callback, e)
|
||||
continue
|
||||
@@ -2352,7 +2345,7 @@ class PasswordAuthProvider:
|
||||
"""
|
||||
for callback in self.get_username_for_registration_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(uia_results, params))
|
||||
res = await callback(uia_results, params)
|
||||
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
@@ -2366,8 +2359,6 @@ class PasswordAuthProvider:
|
||||
callback,
|
||||
res,
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Module raised an exception in get_username_for_registration: %s",
|
||||
@@ -2397,7 +2388,7 @@ class PasswordAuthProvider:
|
||||
"""
|
||||
for callback in self.get_displayname_for_registration_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(uia_results, params))
|
||||
res = await callback(uia_results, params)
|
||||
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
@@ -2411,8 +2402,6 @@ class PasswordAuthProvider:
|
||||
callback,
|
||||
res,
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Module raised an exception in get_displayname_for_registration: %s",
|
||||
@@ -2440,7 +2429,7 @@ class PasswordAuthProvider:
|
||||
"""
|
||||
for callback in self.is_3pid_allowed_callbacks:
|
||||
try:
|
||||
res = await delay_cancellation(callback(medium, address, registration))
|
||||
res = await callback(medium, address, registration)
|
||||
|
||||
if res is False:
|
||||
return res
|
||||
@@ -2454,8 +2443,6 @@ class PasswordAuthProvider:
|
||||
callback,
|
||||
res,
|
||||
)
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Module raised an exception in is_3pid_allowed: %s", e)
|
||||
raise SynapseError(code=500, msg="Internal Server Error")
|
||||
|
||||
@@ -92,7 +92,7 @@ class IdentityHandler:
|
||||
"""
|
||||
|
||||
await self._3pid_validation_ratelimiter_ip.ratelimit(
|
||||
None, (medium, request.getClientAddress().host)
|
||||
None, (medium, request.getClientIP())
|
||||
)
|
||||
await self._3pid_validation_ratelimiter_address.ratelimit(
|
||||
None, (medium, address)
|
||||
|
||||
@@ -143,7 +143,7 @@ class InitialSyncHandler:
|
||||
to_key=int(now_token.receipt_key),
|
||||
)
|
||||
if self.hs.config.experimental.msc2285_enabled:
|
||||
receipt = ReceiptEventSource.filter_out_private(receipt, user_id)
|
||||
receipt = ReceiptEventSource.filter_out_hidden(receipt, user_id)
|
||||
|
||||
tags_by_room = await self.store.get_tags_for_user(user_id)
|
||||
|
||||
@@ -449,7 +449,7 @@ class InitialSyncHandler:
|
||||
if not receipts:
|
||||
return []
|
||||
if self.hs.config.experimental.msc2285_enabled:
|
||||
receipts = ReceiptEventSource.filter_out_private(receipts, user_id)
|
||||
receipts = ReceiptEventSource.filter_out_hidden(receipts, user_id)
|
||||
return receipts
|
||||
|
||||
presence, receipts, (messages, token) = await make_deferred_yieldable(
|
||||
|
||||
@@ -1427,7 +1427,7 @@ class EventCreationHandler:
|
||||
# Validate a newly added alias or newly added alt_aliases.
|
||||
|
||||
original_alias = None
|
||||
original_alt_aliases: object = []
|
||||
original_alt_aliases: List[str] = []
|
||||
|
||||
original_event_id = event.unsigned.get("replaces_state")
|
||||
if original_event_id:
|
||||
@@ -1455,7 +1455,6 @@ class EventCreationHandler:
|
||||
# If the old version of alt_aliases is of an unknown form,
|
||||
# completely replace it.
|
||||
if not isinstance(original_alt_aliases, (list, tuple)):
|
||||
# TODO: check that the original_alt_aliases' entries are all strings
|
||||
original_alt_aliases = []
|
||||
|
||||
# Check that each alias is currently valid.
|
||||
|
||||
@@ -659,28 +659,27 @@ class PresenceHandler(BasePresenceHandler):
|
||||
)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
if self._presence_enabled:
|
||||
for state in self.user_to_current_state.values():
|
||||
self.wheel_timer.insert(
|
||||
now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER
|
||||
)
|
||||
for state in self.user_to_current_state.values():
|
||||
self.wheel_timer.insert(
|
||||
now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER
|
||||
)
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=state.user_id,
|
||||
then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
||||
)
|
||||
if self.is_mine_id(state.user_id):
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=state.user_id,
|
||||
then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
||||
then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL,
|
||||
)
|
||||
else:
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=state.user_id,
|
||||
then=state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
||||
)
|
||||
if self.is_mine_id(state.user_id):
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=state.user_id,
|
||||
then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL,
|
||||
)
|
||||
else:
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=state.user_id,
|
||||
then=state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
||||
)
|
||||
|
||||
# Set of users who have presence in the `user_to_current_state` that
|
||||
# have not yet been persisted
|
||||
@@ -805,13 +804,6 @@ class PresenceHandler(BasePresenceHandler):
|
||||
This is currently used to bump the max presence stream ID without changing any
|
||||
user's presence (see PresenceHandler.add_users_to_send_full_presence_to).
|
||||
"""
|
||||
if not self._presence_enabled:
|
||||
# We shouldn't get here if presence is disabled, but we check anyway
|
||||
# to ensure that we don't a) send out presence federation and b)
|
||||
# don't add things to the wheel timer that will never be handled.
|
||||
logger.warning("Tried to update presence states when presence is disabled")
|
||||
return
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
||||
with Measure(self.clock, "presence_update_states"):
|
||||
@@ -1237,10 +1229,6 @@ class PresenceHandler(BasePresenceHandler):
|
||||
):
|
||||
raise SynapseError(400, "Invalid presence state")
|
||||
|
||||
# If presence is disabled, no-op
|
||||
if not self.hs.config.server.use_presence:
|
||||
return
|
||||
|
||||
user_id = target_user.to_string()
|
||||
|
||||
prev_state = await self.current_state_for_user(user_id)
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
||||
|
||||
from synapse.api.constants import ReceiptTypes
|
||||
from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.streams import EventSource
|
||||
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
|
||||
@@ -112,7 +112,7 @@ class ReceiptsHandler:
|
||||
)
|
||||
|
||||
if not res:
|
||||
# res will be None if this receipt is 'old'
|
||||
# res will be None if this read receipt is 'old'
|
||||
continue
|
||||
|
||||
stream_id, max_persisted_id = res
|
||||
@@ -138,7 +138,7 @@ class ReceiptsHandler:
|
||||
return True
|
||||
|
||||
async def received_client_receipt(
|
||||
self, room_id: str, receipt_type: str, user_id: str, event_id: str
|
||||
self, room_id: str, receipt_type: str, user_id: str, event_id: str, hidden: bool
|
||||
) -> None:
|
||||
"""Called when a client tells us a local user has read up to the given
|
||||
event_id in the room.
|
||||
@@ -148,14 +148,16 @@ class ReceiptsHandler:
|
||||
receipt_type=receipt_type,
|
||||
user_id=user_id,
|
||||
event_ids=[event_id],
|
||||
data={"ts": int(self.clock.time_msec())},
|
||||
data={"ts": int(self.clock.time_msec()), "hidden": hidden},
|
||||
)
|
||||
|
||||
is_new = await self._handle_new_receipts([receipt])
|
||||
if not is_new:
|
||||
return
|
||||
|
||||
if self.federation_sender and receipt_type != ReceiptTypes.READ_PRIVATE:
|
||||
if self.federation_sender and not (
|
||||
self.hs.config.experimental.msc2285_enabled and hidden
|
||||
):
|
||||
await self.federation_sender.send_read_receipt(receipt)
|
||||
|
||||
|
||||
@@ -165,37 +167,46 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
||||
self.config = hs.config
|
||||
|
||||
@staticmethod
|
||||
def filter_out_private(events: List[JsonDict], user_id: str) -> List[JsonDict]:
|
||||
"""
|
||||
This method takes in what is returned by
|
||||
get_linearized_receipts_for_rooms() and goes through read receipts
|
||||
filtering out m.read.private receipts if they were not sent by the
|
||||
current user.
|
||||
"""
|
||||
|
||||
def filter_out_hidden(events: List[JsonDict], user_id: str) -> List[JsonDict]:
|
||||
visible_events = []
|
||||
|
||||
# filter out private receipts the user shouldn't see
|
||||
# filter out hidden receipts the user shouldn't see
|
||||
for event in events:
|
||||
content = event.get("content", {})
|
||||
new_event = event.copy()
|
||||
new_event["content"] = {}
|
||||
|
||||
for event_id, event_content in content.items():
|
||||
receipt_event = {}
|
||||
for receipt_type, receipt_content in event_content.items():
|
||||
if receipt_type == ReceiptTypes.READ_PRIVATE:
|
||||
user_rr = receipt_content.get(user_id, None)
|
||||
if user_rr:
|
||||
receipt_event[ReceiptTypes.READ_PRIVATE] = {
|
||||
user_id: user_rr.copy()
|
||||
}
|
||||
else:
|
||||
receipt_event[receipt_type] = receipt_content.copy()
|
||||
for event_id in content.keys():
|
||||
event_content = content.get(event_id, {})
|
||||
m_read = event_content.get(ReceiptTypes.READ, {})
|
||||
|
||||
# Only include the receipt event if it is non-empty.
|
||||
if receipt_event:
|
||||
new_event["content"][event_id] = receipt_event
|
||||
# If m_read is missing copy over the original event_content as there is nothing to process here
|
||||
if not m_read:
|
||||
new_event["content"][event_id] = event_content.copy()
|
||||
continue
|
||||
|
||||
new_users = {}
|
||||
for rr_user_id, user_rr in m_read.items():
|
||||
try:
|
||||
hidden = user_rr.get("hidden")
|
||||
except AttributeError:
|
||||
# Due to https://github.com/matrix-org/synapse/issues/10376
|
||||
# there are cases where user_rr is a string, in those cases
|
||||
# we just ignore the read receipt
|
||||
continue
|
||||
|
||||
if hidden is not True or rr_user_id == user_id:
|
||||
new_users[rr_user_id] = user_rr.copy()
|
||||
# If hidden has a value replace hidden with the correct prefixed key
|
||||
if hidden is not None:
|
||||
new_users[rr_user_id].pop("hidden")
|
||||
new_users[rr_user_id][
|
||||
ReadReceiptEventFields.MSC2285_HIDDEN
|
||||
] = hidden
|
||||
|
||||
# Set new users unless empty
|
||||
if len(new_users.keys()) > 0:
|
||||
new_event["content"][event_id] = {ReceiptTypes.READ: new_users}
|
||||
|
||||
# Append new_event to visible_events unless empty
|
||||
if len(new_event["content"].keys()) > 0:
|
||||
@@ -223,7 +234,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
||||
)
|
||||
|
||||
if self.config.experimental.msc2285_enabled:
|
||||
events = ReceiptEventSource.filter_out_private(events, user.to_string())
|
||||
events = ReceiptEventSource.filter_out_hidden(events, user.to_string())
|
||||
|
||||
return events, to_key
|
||||
|
||||
|
||||
@@ -364,29 +364,21 @@ class RelationsHandler:
|
||||
The results may include additional events which are related to the
|
||||
requested events.
|
||||
"""
|
||||
# De-duplicated events by ID to handle the same event requested multiple times.
|
||||
events_by_id = {}
|
||||
# De-duplicate events by ID to handle the same event requested multiple times.
|
||||
#
|
||||
# State events do not get bundled aggregations.
|
||||
events_by_id = {
|
||||
event.event_id: event for event in events if not event.is_state()
|
||||
}
|
||||
|
||||
# A map of event ID to the relation in that event, if there is one.
|
||||
relations_by_id: Dict[str, str] = {}
|
||||
for event in events:
|
||||
# State events do not get bundled aggregations.
|
||||
if event.is_state():
|
||||
continue
|
||||
|
||||
for event_id, event in events_by_id.items():
|
||||
relates_to = event.content.get("m.relates_to")
|
||||
relation_type = None
|
||||
if isinstance(relates_to, collections.abc.Mapping):
|
||||
relation_type = relates_to.get("rel_type")
|
||||
# An event which is a replacement (ie edit) or annotation (ie,
|
||||
# reaction) may not have any other event related to it.
|
||||
if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE):
|
||||
continue
|
||||
|
||||
# The event should get bundled aggregations.
|
||||
events_by_id[event.event_id] = event
|
||||
# Track the event's relation information for later.
|
||||
if isinstance(relation_type, str):
|
||||
relations_by_id[event.event_id] = relation_type
|
||||
if isinstance(relation_type, str):
|
||||
relations_by_id[event_id] = relation_type
|
||||
|
||||
# event ID -> bundled aggregation in non-serialized form.
|
||||
results: Dict[str, BundledAggregations] = {}
|
||||
@@ -421,6 +413,16 @@ class RelationsHandler:
|
||||
|
||||
# Fetch other relations per event.
|
||||
for event in events_by_id.values():
|
||||
# An event which is a replacement (ie edit) or annotation (ie, reaction)
|
||||
# may not have any other event related to it.
|
||||
#
|
||||
# XXX This is buggy, see https://github.com/matrix-org/synapse/issues/12566
|
||||
if relations_by_id.get(event.event_id) in (
|
||||
RelationTypes.ANNOTATION,
|
||||
RelationTypes.REPLACE,
|
||||
):
|
||||
continue
|
||||
|
||||
# Fetch any annotations (ie, reactions) to bundle with this event.
|
||||
annotations = await self.get_annotations_for_event(
|
||||
event.event_id, event.room_id, ignored_users=ignored_users
|
||||
|
||||
@@ -57,7 +57,7 @@ from synapse.api.filtering import Filter
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.event_auth import validate_event_for_room_version
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import copy_and_fixup_power_levels_contents
|
||||
from synapse.events.utils import copy_power_levels_contents
|
||||
from synapse.federation.federation_client import InvalidResponseError
|
||||
from synapse.handlers.federation import get_domains_from_state
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
@@ -337,13 +337,13 @@ class RoomCreationHandler:
|
||||
# 50, but if the default PL in a room is 50 or more, then we set the
|
||||
# required PL above that.
|
||||
|
||||
pl_content = copy_and_fixup_power_levels_contents(old_room_pl_state.content)
|
||||
users_default: int = pl_content.get("users_default", 0) # type: ignore[assignment]
|
||||
pl_content = dict(old_room_pl_state.content)
|
||||
users_default = int(pl_content.get("users_default", 0))
|
||||
restricted_level = max(users_default + 1, 50)
|
||||
|
||||
updated = False
|
||||
for v in ("invite", "events_default"):
|
||||
current: int = pl_content.get(v, 0) # type: ignore[assignment]
|
||||
current = int(pl_content.get(v, 0))
|
||||
if current < restricted_level:
|
||||
logger.debug(
|
||||
"Setting level for %s in %s to %i (was %i)",
|
||||
@@ -380,9 +380,7 @@ class RoomCreationHandler:
|
||||
"state_key": "",
|
||||
"room_id": new_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": copy_and_fixup_power_levels_contents(
|
||||
old_room_pl_state.content
|
||||
),
|
||||
"content": old_room_pl_state.content,
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
@@ -473,7 +471,7 @@ class RoomCreationHandler:
|
||||
# dict so we can't just copy.deepcopy it.
|
||||
initial_state[
|
||||
(EventTypes.PowerLevels, "")
|
||||
] = power_levels = copy_and_fixup_power_levels_contents(
|
||||
] = power_levels = copy_power_levels_contents(
|
||||
initial_state[(EventTypes.PowerLevels, "")]
|
||||
)
|
||||
|
||||
|
||||
@@ -105,7 +105,6 @@ class RoomSummaryHandler:
|
||||
hs.get_clock(),
|
||||
"get_room_hierarchy",
|
||||
)
|
||||
self._msc3266_enabled = hs.config.experimental.msc3266_enabled
|
||||
|
||||
async def get_room_hierarchy(
|
||||
self,
|
||||
@@ -631,7 +630,7 @@ class RoomSummaryHandler:
|
||||
return False
|
||||
|
||||
async def _is_remote_room_accessible(
|
||||
self, requester: Optional[str], room_id: str, room: JsonDict
|
||||
self, requester: str, room_id: str, room: JsonDict
|
||||
) -> bool:
|
||||
"""
|
||||
Calculate whether the room received over federation should be shown to the requester.
|
||||
@@ -646,8 +645,7 @@ class RoomSummaryHandler:
|
||||
due to an invite, etc.
|
||||
|
||||
Args:
|
||||
requester: The user requesting the summary. If not passed only world
|
||||
readability is checked.
|
||||
requester: The user requesting the summary.
|
||||
room_id: The room ID returned over federation.
|
||||
room: The summary of the room returned over federation.
|
||||
|
||||
@@ -661,8 +659,6 @@ class RoomSummaryHandler:
|
||||
or room.get("world_readable") is True
|
||||
):
|
||||
return True
|
||||
elif not requester:
|
||||
return False
|
||||
|
||||
# Check if the user is a member of any of the allowed rooms from the response.
|
||||
allowed_rooms = room.get("allowed_room_ids")
|
||||
@@ -719,10 +715,6 @@ class RoomSummaryHandler:
|
||||
"room_type": create_event.content.get(EventContentFields.ROOM_TYPE),
|
||||
}
|
||||
|
||||
if self._msc3266_enabled:
|
||||
entry["im.nheko.summary.version"] = stats["version"]
|
||||
entry["im.nheko.summary.encryption"] = stats["encryption"]
|
||||
|
||||
# Federation requests need to provide additional information so the
|
||||
# requested server is able to filter the response appropriately.
|
||||
if for_federation:
|
||||
@@ -820,45 +812,9 @@ class RoomSummaryHandler:
|
||||
|
||||
room_summary["membership"] = membership or "leave"
|
||||
else:
|
||||
# Reuse the hierarchy query over federation
|
||||
if remote_room_hosts is None:
|
||||
raise SynapseError(400, "Missing via to query remote room")
|
||||
|
||||
(
|
||||
room_entry,
|
||||
children_room_entries,
|
||||
inaccessible_children,
|
||||
) = await self._summarize_remote_room_hierarchy(
|
||||
_RoomQueueEntry(room_id, remote_room_hosts),
|
||||
suggested_only=True,
|
||||
)
|
||||
|
||||
# The results over federation might include rooms that we, as the
|
||||
# requesting server, are allowed to see, but the requesting user is
|
||||
# not permitted to see.
|
||||
#
|
||||
# Filter the returned results to only what is accessible to the user.
|
||||
if not room_entry or not await self._is_remote_room_accessible(
|
||||
requester, room_entry.room_id, room_entry.room
|
||||
):
|
||||
raise NotFoundError("Room not found or is not accessible")
|
||||
|
||||
room = dict(room_entry.room)
|
||||
room.pop("allowed_room_ids", None)
|
||||
|
||||
# If there was a requester, add their membership.
|
||||
# We keep the membership in the local membership table unless the
|
||||
# room is purged even for remote rooms.
|
||||
if requester:
|
||||
(
|
||||
membership,
|
||||
_,
|
||||
) = await self._store.get_local_current_membership_for_user_in_room(
|
||||
requester, room_id
|
||||
)
|
||||
room["membership"] = membership or "leave"
|
||||
|
||||
return room
|
||||
# TODO federation API, descoped from initial unstable implementation
|
||||
# as MSC needs more maturing on that side.
|
||||
raise SynapseError(400, "Federation is not currently supported.")
|
||||
|
||||
return room_summary
|
||||
|
||||
|
||||
@@ -468,7 +468,7 @@ class SsoHandler:
|
||||
auth_provider_id,
|
||||
remote_user_id,
|
||||
get_request_user_agent(request),
|
||||
request.getClientAddress().host,
|
||||
request.getClientIP(),
|
||||
)
|
||||
new_user = True
|
||||
elif self._sso_update_profile_information:
|
||||
@@ -928,7 +928,7 @@ class SsoHandler:
|
||||
session.auth_provider_id,
|
||||
session.remote_user_id,
|
||||
get_request_user_agent(request),
|
||||
request.getClientAddress().host,
|
||||
request.getClientIP(),
|
||||
)
|
||||
|
||||
logger.info(
|
||||
|
||||
@@ -1045,7 +1045,7 @@ class SyncHandler:
|
||||
last_unread_event_id = await self.store.get_last_receipt_event_id_for_user(
|
||||
user_id=sync_config.user.to_string(),
|
||||
room_id=room_id,
|
||||
receipt_types=(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
|
||||
receipt_type=ReceiptTypes.READ,
|
||||
)
|
||||
|
||||
return await self.store.get_unread_event_push_actions_by_room_for_user(
|
||||
|
||||
@@ -258,7 +258,7 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker):
|
||||
self.hs = hs
|
||||
self._enabled = bool(
|
||||
hs.config.registration.registration_requires_token
|
||||
) or bool(hs.config.registration.enable_registration_token_3pid_bypass)
|
||||
) or bool(hs.config.registration.enable_registration_token_3pid_bypasss)
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
def is_enabled(self) -> bool:
|
||||
|
||||
@@ -238,7 +238,7 @@ class SynapseRequest(Request):
|
||||
request_id,
|
||||
request=ContextRequest(
|
||||
request_id=request_id,
|
||||
ip_address=self.getClientAddress().host,
|
||||
ip_address=self.getClientIP(),
|
||||
site_tag=self.synapse_site.site_tag,
|
||||
# The requester is going to be unknown at this point.
|
||||
requester=None,
|
||||
@@ -381,7 +381,7 @@ class SynapseRequest(Request):
|
||||
|
||||
self.synapse_site.access_logger.debug(
|
||||
"%s - %s - Received request: %s %s",
|
||||
self.getClientAddress().host,
|
||||
self.getClientIP(),
|
||||
self.synapse_site.site_tag,
|
||||
self.get_method(),
|
||||
self.get_redacted_uri(),
|
||||
@@ -429,7 +429,7 @@ class SynapseRequest(Request):
|
||||
"%s - %s - {%s}"
|
||||
" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
|
||||
' %sB %s "%s %s %s" "%s" [%d dbevts]',
|
||||
self.getClientAddress().host,
|
||||
self.getClientIP(),
|
||||
self.synapse_site.site_tag,
|
||||
requester,
|
||||
processing_time,
|
||||
|
||||
@@ -884,7 +884,7 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False):
|
||||
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
||||
tags.HTTP_METHOD: request.get_method(),
|
||||
tags.HTTP_URL: request.get_redacted_uri(),
|
||||
tags.PEER_HOST_IPV6: request.getClientAddress().host,
|
||||
tags.PEER_HOST_IPV6: request.getClientIP(),
|
||||
}
|
||||
|
||||
request_name = request.request_metrics.name
|
||||
|
||||
@@ -28,11 +28,11 @@ from typing import (
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from prometheus_client import Metric
|
||||
from prometheus_client.core import REGISTRY, Counter, Gauge
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -256,48 +256,24 @@ def run_as_background_process(
|
||||
return defer.ensureDeferred(run())
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
F = TypeVar("F", bound=Callable[..., Awaitable[Optional[Any]]])
|
||||
|
||||
|
||||
def wrap_as_background_process(
|
||||
desc: str,
|
||||
) -> Callable[
|
||||
[Callable[P, Awaitable[Optional[R]]]],
|
||||
Callable[P, "defer.Deferred[Optional[R]]"],
|
||||
]:
|
||||
"""Decorator that wraps an asynchronous function `func`, returning a synchronous
|
||||
decorated function. Calling the decorated version runs `func` as a background
|
||||
process, forwarding all arguments verbatim.
|
||||
def wrap_as_background_process(desc: str) -> Callable[[F], F]:
|
||||
"""Decorator that wraps a function that gets called as a background
|
||||
process.
|
||||
|
||||
That is,
|
||||
|
||||
@wrap_as_background_process
|
||||
def func(*args): ...
|
||||
func(1, 2, third=3)
|
||||
|
||||
is equivalent to:
|
||||
|
||||
def func(*args): ...
|
||||
run_as_background_process(func, 1, 2, third=3)
|
||||
|
||||
The former can be convenient if `func` needs to be run as a background process in
|
||||
multiple places.
|
||||
Equivalent to calling the function with `run_as_background_process`
|
||||
"""
|
||||
|
||||
def wrap_as_background_process_inner(
|
||||
func: Callable[P, Awaitable[Optional[R]]]
|
||||
) -> Callable[P, "defer.Deferred[Optional[R]]"]:
|
||||
def wrap_as_background_process_inner(func: F) -> F:
|
||||
@wraps(func)
|
||||
def wrap_as_background_process_inner_2(
|
||||
*args: P.args, **kwargs: P.kwargs
|
||||
*args: Any, **kwargs: Any
|
||||
) -> "defer.Deferred[Optional[R]]":
|
||||
# type-ignore: mypy is confusing kwargs with the bg_start_span kwarg.
|
||||
# Argument 4 to "run_as_background_process" has incompatible type
|
||||
# "**P.kwargs"; expected "bool"
|
||||
# See https://github.com/python/mypy/issues/8862
|
||||
return run_as_background_process(desc, func, *args, **kwargs) # type: ignore[arg-type]
|
||||
return run_as_background_process(desc, func, *args, **kwargs)
|
||||
|
||||
return wrap_as_background_process_inner_2
|
||||
return cast(F, wrap_as_background_process_inner_2)
|
||||
|
||||
return wrap_as_background_process_inner
|
||||
|
||||
|
||||
@@ -30,7 +30,6 @@ from typing import (
|
||||
|
||||
import attr
|
||||
import jinja2
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.web.resource import Resource
|
||||
@@ -130,7 +129,6 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
P = ParamSpec("P")
|
||||
|
||||
"""
|
||||
This package defines the 'stable' API which can be used by extension modules which
|
||||
@@ -801,9 +799,9 @@ class ModuleApi:
|
||||
def run_db_interaction(
|
||||
self,
|
||||
desc: str,
|
||||
func: Callable[P, T],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
func: Callable[..., T],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> "defer.Deferred[T]":
|
||||
"""Run a function with a database connection
|
||||
|
||||
@@ -819,9 +817,8 @@ class ModuleApi:
|
||||
Returns:
|
||||
Deferred[object]: result of func
|
||||
"""
|
||||
# type-ignore: See https://github.com/python/mypy/issues/8862
|
||||
return defer.ensureDeferred(
|
||||
self._store.db_pool.runInteraction(desc, func, *args, **kwargs) # type: ignore[arg-type]
|
||||
self._store.db_pool.runInteraction(desc, func, *args, **kwargs)
|
||||
)
|
||||
|
||||
def complete_sso_login(
|
||||
@@ -1299,9 +1296,9 @@ class ModuleApi:
|
||||
|
||||
async def defer_to_thread(
|
||||
self,
|
||||
f: Callable[P, T],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
f: Callable[..., T],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> T:
|
||||
"""Runs the given function in a separate thread from Synapse's thread pool.
|
||||
|
||||
|
||||
@@ -24,9 +24,7 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
|
||||
invites = await store.get_invited_rooms_for_local_user(user_id)
|
||||
joins = await store.get_rooms_for_user(user_id)
|
||||
|
||||
my_receipts_by_room = await store.get_receipts_for_user(
|
||||
user_id, (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE)
|
||||
)
|
||||
my_receipts_by_room = await store.get_receipts_for_user(user_id, ReceiptTypes.READ)
|
||||
|
||||
badge = len(invites)
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ from twisted.internet.interfaces import IAddress, IConnector
|
||||
from twisted.internet.protocol import ReconnectingClientFactory
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
from synapse.api.constants import EventTypes, ReceiptTypes
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.federation import send_queue
|
||||
from synapse.federation.sender import FederationSender
|
||||
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
||||
@@ -401,8 +401,10 @@ class FederationSenderHandler:
|
||||
# we only want to send on receipts for our own users
|
||||
if not self._is_mine_id(receipt.user_id):
|
||||
continue
|
||||
# Private read receipts never get sent over federation.
|
||||
if receipt.receipt_type == ReceiptTypes.READ_PRIVATE:
|
||||
if (
|
||||
receipt.data.get("hidden", False)
|
||||
and self._hs.config.experimental.msc2285_enabled
|
||||
):
|
||||
continue
|
||||
receipt_info = ReadReceipt(
|
||||
receipt.room_id,
|
||||
|
||||
@@ -537,7 +537,7 @@ class ReplicationCommandHandler:
|
||||
# Ignore POSITION that are just our own echoes
|
||||
return
|
||||
|
||||
logger.debug("Handling '%s %s'", cmd.NAME, cmd.to_line())
|
||||
logger.info("Handling '%s %s'", cmd.NAME, cmd.to_line())
|
||||
|
||||
self._add_command_to_stream_queue(conn, cmd)
|
||||
|
||||
@@ -567,11 +567,6 @@ class ReplicationCommandHandler:
|
||||
# between then and now.
|
||||
missing_updates = cmd.prev_token != current_token
|
||||
while missing_updates:
|
||||
# Note: There may very well not be any new updates, but we check to
|
||||
# make sure. This can particularly happen for the event stream where
|
||||
# event persisters continuously send `POSITION`. See `resource.py`
|
||||
# for why this can happen.
|
||||
|
||||
logger.info(
|
||||
"Fetching replication rows for '%s' between %i and %i",
|
||||
stream_name,
|
||||
@@ -595,7 +590,7 @@ class ReplicationCommandHandler:
|
||||
[stream.parse_row(row) for row in rows],
|
||||
)
|
||||
|
||||
logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token)
|
||||
logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token)
|
||||
|
||||
# We've now caught up to position sent to us, notify handler.
|
||||
await self._replication_data_handler.on_position(
|
||||
|
||||
@@ -204,15 +204,6 @@ class ReplicationStreamer:
|
||||
# turns out that e.g. account data streams share
|
||||
# their "current token" with each other, meaning
|
||||
# that it is *not* safe to send a POSITION.
|
||||
|
||||
# Note: `last_token` may not *actually* be the
|
||||
# last token we sent out in a RDATA or POSITION.
|
||||
# This can happen if we sent out an RDATA for
|
||||
# position X when our current token was say X+1.
|
||||
# Other workers will see RDATA for X and then a
|
||||
# POSITION with last token of X+1, which will
|
||||
# cause them to check if there were any missing
|
||||
# updates between X and X+1.
|
||||
logger.info(
|
||||
"Sending position: %s -> %s",
|
||||
stream.NAME,
|
||||
|
||||
@@ -112,7 +112,7 @@ class AuthRestServlet(RestServlet):
|
||||
|
||||
try:
|
||||
await self.auth_handler.add_oob_auth(
|
||||
LoginType.RECAPTCHA, authdict, request.getClientAddress().host
|
||||
LoginType.RECAPTCHA, authdict, request.getClientIP()
|
||||
)
|
||||
except LoginError as e:
|
||||
# Authentication failed, let user try again
|
||||
@@ -132,7 +132,7 @@ class AuthRestServlet(RestServlet):
|
||||
|
||||
try:
|
||||
await self.auth_handler.add_oob_auth(
|
||||
LoginType.TERMS, authdict, request.getClientAddress().host
|
||||
LoginType.TERMS, authdict, request.getClientIP()
|
||||
)
|
||||
except LoginError as e:
|
||||
# Authentication failed, let user try again
|
||||
@@ -161,9 +161,7 @@ class AuthRestServlet(RestServlet):
|
||||
|
||||
try:
|
||||
await self.auth_handler.add_oob_auth(
|
||||
LoginType.REGISTRATION_TOKEN,
|
||||
authdict,
|
||||
request.getClientAddress().host,
|
||||
LoginType.REGISTRATION_TOKEN, authdict, request.getClientIP()
|
||||
)
|
||||
except LoginError as e:
|
||||
html = self.registration_token_template.render(
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.http.server import HttpServer
|
||||
@@ -95,7 +97,7 @@ class KnockRoomAliasServlet(RestServlet):
|
||||
return 200, {"room_id": room_id}
|
||||
|
||||
def on_PUT(
|
||||
self, request: SynapseRequest, room_identifier: str, txn_id: str
|
||||
self, request: Request, room_identifier: str, txn_id: str
|
||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
||||
set_tag("txn_id", txn_id)
|
||||
|
||||
|
||||
@@ -176,7 +176,7 @@ class LoginRestServlet(RestServlet):
|
||||
|
||||
if appservice.is_rate_limited():
|
||||
await self._address_ratelimiter.ratelimit(
|
||||
None, request.getClientAddress().host
|
||||
None, request.getClientIP()
|
||||
)
|
||||
|
||||
result = await self._do_appservice_login(
|
||||
@@ -188,25 +188,19 @@ class LoginRestServlet(RestServlet):
|
||||
self.jwt_enabled
|
||||
and login_submission["type"] == LoginRestServlet.JWT_TYPE
|
||||
):
|
||||
await self._address_ratelimiter.ratelimit(
|
||||
None, request.getClientAddress().host
|
||||
)
|
||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
||||
result = await self._do_jwt_login(
|
||||
login_submission,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE:
|
||||
await self._address_ratelimiter.ratelimit(
|
||||
None, request.getClientAddress().host
|
||||
)
|
||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
||||
result = await self._do_token_login(
|
||||
login_submission,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
else:
|
||||
await self._address_ratelimiter.ratelimit(
|
||||
None, request.getClientAddress().host
|
||||
)
|
||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
||||
result = await self._do_other_login(
|
||||
login_submission,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
|
||||
@@ -58,7 +58,7 @@ class NotificationsServlet(RestServlet):
|
||||
)
|
||||
|
||||
receipts_by_room = await self.store.get_receipts_for_user_with_orderings(
|
||||
user_id, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]
|
||||
user_id, ReceiptTypes.READ
|
||||
)
|
||||
|
||||
notif_event_ids = [pa.event_id for pa in push_actions]
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from synapse.api.constants import ReceiptTypes
|
||||
from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
@@ -35,7 +36,6 @@ class ReadMarkerRestServlet(RestServlet):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.config = hs.config
|
||||
self.receipts_handler = hs.get_receipts_handler()
|
||||
self.read_marker_handler = hs.get_read_marker_handler()
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
@@ -48,42 +48,27 @@ class ReadMarkerRestServlet(RestServlet):
|
||||
await self.presence_handler.bump_presence_active_time(requester.user)
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
valid_receipt_types = {
|
||||
ReceiptTypes.READ,
|
||||
ReceiptTypes.FULLY_READ,
|
||||
ReceiptTypes.READ_PRIVATE,
|
||||
}
|
||||
|
||||
unrecognized_types = set(body.keys()) - valid_receipt_types
|
||||
if unrecognized_types:
|
||||
# It's fine if there are unrecognized receipt types, but let's log
|
||||
# it to help debug clients that have typoed the receipt type.
|
||||
#
|
||||
# We specifically *don't* error here, as a) it stops us processing
|
||||
# the valid receipts, and b) we need to be extensible on receipt
|
||||
# types.
|
||||
logger.info("Ignoring unrecognized receipt types: %s", unrecognized_types)
|
||||
|
||||
read_event_id = body.get(ReceiptTypes.READ, None)
|
||||
hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False)
|
||||
|
||||
if not isinstance(hidden, bool):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Param %s must be a boolean, if given"
|
||||
% ReadReceiptEventFields.MSC2285_HIDDEN,
|
||||
Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
if read_event_id:
|
||||
await self.receipts_handler.received_client_receipt(
|
||||
room_id,
|
||||
ReceiptTypes.READ,
|
||||
user_id=requester.user.to_string(),
|
||||
event_id=read_event_id,
|
||||
hidden=hidden,
|
||||
)
|
||||
|
||||
read_private_event_id = body.get(ReceiptTypes.READ_PRIVATE, None)
|
||||
if read_private_event_id and self.config.experimental.msc2285_enabled:
|
||||
await self.receipts_handler.received_client_receipt(
|
||||
room_id,
|
||||
ReceiptTypes.READ_PRIVATE,
|
||||
user_id=requester.user.to_string(),
|
||||
event_id=read_private_event_id,
|
||||
)
|
||||
|
||||
read_marker_event_id = body.get(ReceiptTypes.FULLY_READ, None)
|
||||
read_marker_event_id = body.get("m.fully_read", None)
|
||||
if read_marker_event_id:
|
||||
await self.read_marker_handler.received_client_read_marker(
|
||||
room_id,
|
||||
|
||||
@@ -16,8 +16,8 @@ import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from synapse.api.constants import ReceiptTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.http import get_request_user_agent
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
@@ -46,7 +46,6 @@ class ReceiptRestServlet(RestServlet):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.receipts_handler = hs.get_receipts_handler()
|
||||
self.read_marker_handler = hs.get_read_marker_handler()
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
|
||||
async def on_POST(
|
||||
@@ -54,19 +53,7 @@ class ReceiptRestServlet(RestServlet):
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
if self.hs.config.experimental.msc2285_enabled and receipt_type not in [
|
||||
ReceiptTypes.READ,
|
||||
ReceiptTypes.READ_PRIVATE,
|
||||
ReceiptTypes.FULLY_READ,
|
||||
]:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Receipt type must be 'm.read', 'org.matrix.msc2285.read.private' or 'm.fully_read'",
|
||||
)
|
||||
elif (
|
||||
not self.hs.config.experimental.msc2285_enabled
|
||||
and receipt_type != ReceiptTypes.READ
|
||||
):
|
||||
if receipt_type != ReceiptTypes.READ:
|
||||
raise SynapseError(400, "Receipt type must be 'm.read'")
|
||||
|
||||
# Do not allow older SchildiChat and Element Android clients (prior to Element/1.[012].x) to send an empty body.
|
||||
@@ -75,24 +62,26 @@ class ReceiptRestServlet(RestServlet):
|
||||
if "Android" in user_agent:
|
||||
if pattern.match(user_agent) or "Riot" in user_agent:
|
||||
allow_empty_body = True
|
||||
# This call makes sure possible empty body is handled correctly
|
||||
parse_json_object_from_request(request, allow_empty_body)
|
||||
body = parse_json_object_from_request(request, allow_empty_body)
|
||||
hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False)
|
||||
|
||||
if not isinstance(hidden, bool):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Param %s must be a boolean, if given"
|
||||
% ReadReceiptEventFields.MSC2285_HIDDEN,
|
||||
Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
await self.presence_handler.bump_presence_active_time(requester.user)
|
||||
|
||||
if receipt_type == ReceiptTypes.FULLY_READ:
|
||||
await self.read_marker_handler.received_client_read_marker(
|
||||
room_id,
|
||||
user_id=requester.user.to_string(),
|
||||
event_id=event_id,
|
||||
)
|
||||
else:
|
||||
await self.receipts_handler.received_client_receipt(
|
||||
room_id,
|
||||
receipt_type,
|
||||
user_id=requester.user.to_string(),
|
||||
event_id=event_id,
|
||||
)
|
||||
await self.receipts_handler.received_client_receipt(
|
||||
room_id,
|
||||
receipt_type,
|
||||
user_id=requester.user.to_string(),
|
||||
event_id=event_id,
|
||||
hidden=hidden,
|
||||
)
|
||||
|
||||
return 200, {}
|
||||
|
||||
|
||||
@@ -352,7 +352,7 @@ class UsernameAvailabilityRestServlet(RestServlet):
|
||||
if self.inhibit_user_in_use_error:
|
||||
return 200, {"available": True}
|
||||
|
||||
ip = request.getClientAddress().host
|
||||
ip = request.getClientIP()
|
||||
with self.ratelimiter.ratelimit(ip) as wait_deferred:
|
||||
await wait_deferred
|
||||
|
||||
@@ -394,7 +394,7 @@ class RegistrationTokenValidityRestServlet(RestServlet):
|
||||
)
|
||||
|
||||
async def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
|
||||
await self.ratelimiter.ratelimit(None, (request.getClientAddress().host,))
|
||||
await self.ratelimiter.ratelimit(None, (request.getClientIP(),))
|
||||
|
||||
if not self.hs.config.registration.enable_registration:
|
||||
raise SynapseError(
|
||||
@@ -441,7 +441,7 @@ class RegisterRestServlet(RestServlet):
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
client_addr = request.getClientAddress().host
|
||||
client_addr = request.getClientIP()
|
||||
|
||||
await self.ratelimiter.ratelimit(None, client_addr, update=False)
|
||||
|
||||
@@ -930,7 +930,7 @@ def _calculate_registration_flows(
|
||||
flows.append([LoginType.MSISDN, LoginType.EMAIL_IDENTITY])
|
||||
|
||||
# Add a flow that doesn't require any 3pids, if the config requests it.
|
||||
if config.registration.enable_registration_token_3pid_bypass:
|
||||
if config.registration.enable_registration_token_3pid_bypasss:
|
||||
flows.append([LoginType.REGISTRATION_TOKEN])
|
||||
|
||||
# Prepend m.login.terms to all flows if we're requiring consent
|
||||
|
||||
@@ -15,9 +15,7 @@
|
||||
"""This module contains logic for storing HTTP PUT transactions. This is used
|
||||
to ensure idempotency when performing PUTs using the REST API."""
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Tuple
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Tuple
|
||||
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.server import Request
|
||||
@@ -34,9 +32,6 @@ logger = logging.getLogger(__name__)
|
||||
CLEANUP_PERIOD_MS = 1000 * 60 * 30 # 30 mins
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
|
||||
|
||||
class HttpTransactionCache:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
@@ -70,9 +65,9 @@ class HttpTransactionCache:
|
||||
def fetch_or_execute_request(
|
||||
self,
|
||||
request: Request,
|
||||
fn: Callable[P, Awaitable[Tuple[int, JsonDict]]],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
fn: Callable[..., Awaitable[Tuple[int, JsonDict]]],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
||||
"""A helper function for fetch_or_execute which extracts
|
||||
a transaction key from the given request.
|
||||
@@ -87,9 +82,9 @@ class HttpTransactionCache:
|
||||
def fetch_or_execute(
|
||||
self,
|
||||
txn_key: str,
|
||||
fn: Callable[P, Awaitable[Tuple[int, JsonDict]]],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
fn: Callable[..., Awaitable[Tuple[int, JsonDict]]],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
||||
"""Fetches the response for this transaction, or executes the given function
|
||||
to produce a response for this transaction.
|
||||
|
||||
@@ -93,7 +93,7 @@ class VersionsRestServlet(RestServlet):
|
||||
"io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private,
|
||||
# Supports the busy presence state described in MSC3026.
|
||||
"org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled,
|
||||
# Supports receiving private read receipts as per MSC2285
|
||||
# Supports receiving hidden read receipts as per MSC2285
|
||||
"org.matrix.msc2285": self.config.experimental.msc2285_enabled,
|
||||
# Adds support for importing historical messages as per MSC2716
|
||||
"org.matrix.msc2716": self.config.experimental.msc2716_enabled,
|
||||
|
||||
@@ -121,10 +121,10 @@ class Thumbnailer:
|
||||
#
|
||||
# If the image has transparency, use RGBA instead.
|
||||
if self.image.mode in ["1", "L", "P"]:
|
||||
mode = "RGB"
|
||||
if self.image.info.get("transparency", None) is not None:
|
||||
self.image = self.image.convert("RGBA")
|
||||
else:
|
||||
self.image = self.image.convert("RGB")
|
||||
mode = "RGBA"
|
||||
self.image = self.image.convert(mode)
|
||||
return self.image.resize((width, height), Image.ANTIALIAS)
|
||||
|
||||
def scale(self, width: int, height: int, output_type: str) -> BytesIO:
|
||||
|
||||
@@ -38,7 +38,7 @@ from typing import (
|
||||
|
||||
import attr
|
||||
from prometheus_client import Histogram
|
||||
from typing_extensions import Concatenate, Literal, ParamSpec
|
||||
from typing_extensions import Literal
|
||||
|
||||
from twisted.enterprise import adbapi
|
||||
|
||||
@@ -192,9 +192,9 @@ class LoggingDatabaseConnection:
|
||||
|
||||
|
||||
# The type of entry which goes on our after_callbacks and exception_callbacks lists.
|
||||
_CallbackListEntry = Tuple[Callable[..., object], Tuple[object, ...], Dict[str, object]]
|
||||
_CallbackListEntry = Tuple[Callable[..., object], Iterable[Any], Dict[str, Any]]
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
@@ -239,9 +239,7 @@ class LoggingTransaction:
|
||||
self.after_callbacks = after_callbacks
|
||||
self.exception_callbacks = exception_callbacks
|
||||
|
||||
def call_after(
|
||||
self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs
|
||||
) -> None:
|
||||
def call_after(self, callback: Callable[..., object], *args: Any, **kwargs: Any):
|
||||
"""Call the given callback on the main twisted thread after the transaction has
|
||||
finished.
|
||||
|
||||
@@ -258,12 +256,11 @@ class LoggingTransaction:
|
||||
# LoggingTransaction isn't expecting there to be any callbacks; assert that
|
||||
# is not the case.
|
||||
assert self.after_callbacks is not None
|
||||
# type-ignore: need mypy containing https://github.com/python/mypy/pull/12668
|
||||
self.after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type]
|
||||
self.after_callbacks.append((callback, args, kwargs))
|
||||
|
||||
def call_on_exception(
|
||||
self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs
|
||||
) -> None:
|
||||
self, callback: Callable[..., object], *args: Any, **kwargs: Any
|
||||
):
|
||||
"""Call the given callback on the main twisted thread after the transaction has
|
||||
failed.
|
||||
|
||||
@@ -277,8 +274,7 @@ class LoggingTransaction:
|
||||
# LoggingTransaction isn't expecting there to be any callbacks; assert that
|
||||
# is not the case.
|
||||
assert self.exception_callbacks is not None
|
||||
# type-ignore: need mypy containing https://github.com/python/mypy/pull/12668
|
||||
self.exception_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type]
|
||||
self.exception_callbacks.append((callback, args, kwargs))
|
||||
|
||||
def fetchone(self) -> Optional[Tuple]:
|
||||
return self.txn.fetchone()
|
||||
@@ -343,13 +339,7 @@ class LoggingTransaction:
|
||||
"Strip newlines out of SQL so that the loggers in the DB are on one line"
|
||||
return " ".join(line.strip() for line in sql.splitlines() if line.strip())
|
||||
|
||||
def _do_execute(
|
||||
self,
|
||||
func: Callable[Concatenate[str, P], R],
|
||||
sql: str,
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> R:
|
||||
def _do_execute(self, func: Callable[..., R], sql: str, *args: Any) -> R:
|
||||
sql = self._make_sql_one_line(sql)
|
||||
|
||||
# TODO(paul): Maybe use 'info' and 'debug' for values?
|
||||
@@ -358,10 +348,7 @@ class LoggingTransaction:
|
||||
sql = self.database_engine.convert_param_style(sql)
|
||||
if args:
|
||||
try:
|
||||
# The type-ignore should be redundant once mypy releases a version with
|
||||
# https://github.com/python/mypy/pull/12668. (`args` might be empty,
|
||||
# (but we'll catch the index error if so.)
|
||||
sql_logger.debug("[SQL values] {%s} %r", self.name, args[0]) # type: ignore[index]
|
||||
sql_logger.debug("[SQL values] {%s} %r", self.name, args[0])
|
||||
except Exception:
|
||||
# Don't let logging failures stop SQL from working
|
||||
pass
|
||||
@@ -376,7 +363,7 @@ class LoggingTransaction:
|
||||
opentracing.tags.DATABASE_STATEMENT: sql,
|
||||
},
|
||||
):
|
||||
return func(sql, *args, **kwargs)
|
||||
return func(sql, *args)
|
||||
except Exception as e:
|
||||
sql_logger.debug("[SQL FAIL] {%s} %s", self.name, e)
|
||||
raise
|
||||
@@ -553,9 +540,9 @@ class DatabasePool:
|
||||
desc: str,
|
||||
after_callbacks: List[_CallbackListEntry],
|
||||
exception_callbacks: List[_CallbackListEntry],
|
||||
func: Callable[Concatenate[LoggingTransaction, P], R],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
func: Callable[..., R],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> R:
|
||||
"""Start a new database transaction with the given connection.
|
||||
|
||||
@@ -585,10 +572,7 @@ class DatabasePool:
|
||||
# will fail if we have to repeat the transaction.
|
||||
# For now, we just log an error, and hope that it works on the first attempt.
|
||||
# TODO: raise an exception.
|
||||
|
||||
# Type-ignore Mypy doesn't yet consider ParamSpec.args to be iterable; see
|
||||
# https://github.com/python/mypy/pull/12668
|
||||
for i, arg in enumerate(args): # type: ignore[arg-type, var-annotated]
|
||||
for i, arg in enumerate(args):
|
||||
if inspect.isgenerator(arg):
|
||||
logger.error(
|
||||
"Programming error: generator passed to new_transaction as "
|
||||
@@ -596,9 +580,7 @@ class DatabasePool:
|
||||
i,
|
||||
func,
|
||||
)
|
||||
# Type-ignore Mypy doesn't yet consider ParamSpec.args to be a mapping; see
|
||||
# https://github.com/python/mypy/pull/12668
|
||||
for name, val in kwargs.items(): # type: ignore[attr-defined]
|
||||
for name, val in kwargs.items():
|
||||
if inspect.isgenerator(val):
|
||||
logger.error(
|
||||
"Programming error: generator passed to new_transaction as "
|
||||
|
||||
@@ -1648,12 +1648,8 @@ class PersistEventsStore:
|
||||
txn.call_after(prefill)
|
||||
|
||||
def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None:
|
||||
"""Invalidate the caches for the redacted event.
|
||||
|
||||
Note that these caches are also cleared as part of event replication in
|
||||
_invalidate_caches_for_event.
|
||||
"""
|
||||
assert event.redacts is not None
|
||||
# Invalidate the caches for the redacted event, note that these caches
|
||||
# are also cleared as part of event replication in _invalidate_caches_for_event.
|
||||
txn.call_after(self.store._invalidate_get_event_cache, event.redacts)
|
||||
txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,))
|
||||
txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,))
|
||||
|
||||
@@ -22,6 +22,7 @@ from typing import (
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
cast,
|
||||
)
|
||||
@@ -116,43 +117,39 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
"""Get the current max stream ID for receipts stream"""
|
||||
return self._receipts_id_gen.get_current_token()
|
||||
|
||||
async def get_last_receipt_event_id_for_user(
|
||||
self, user_id: str, room_id: str, receipt_types: Iterable[str]
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Fetch the event ID for the latest receipt in a room with one of the given receipt types.
|
||||
|
||||
Args:
|
||||
user_id: The user to fetch receipts for.
|
||||
room_id: The room ID to fetch the receipt for.
|
||||
receipt_type: The receipt types to fetch. Earlier receipt types
|
||||
are given priority if multiple receipts point to the same event.
|
||||
|
||||
Returns:
|
||||
The latest receipt, if one exists.
|
||||
"""
|
||||
latest_event_id: Optional[str] = None
|
||||
latest_stream_ordering = 0
|
||||
for receipt_type in receipt_types:
|
||||
result = await self._get_last_receipt_event_id_for_user(
|
||||
user_id, room_id, receipt_type
|
||||
)
|
||||
if result is None:
|
||||
continue
|
||||
event_id, stream_ordering = result
|
||||
|
||||
if latest_event_id is None or latest_stream_ordering < stream_ordering:
|
||||
latest_event_id = event_id
|
||||
latest_stream_ordering = stream_ordering
|
||||
|
||||
return latest_event_id
|
||||
@cached()
|
||||
async def get_users_with_read_receipts_in_room(self, room_id: str) -> Set[str]:
|
||||
receipts = await self.get_receipts_for_room(room_id, ReceiptTypes.READ)
|
||||
return {r["user_id"] for r in receipts}
|
||||
|
||||
@cached()
|
||||
async def _get_last_receipt_event_id_for_user(
|
||||
self, user_id: str, room_id: str, receipt_type: str
|
||||
) -> Optional[Tuple[str, int]]:
|
||||
async def get_receipts_for_room(
|
||||
self, room_id: str, receipt_type: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Fetch the event ID and stream ordering for the latest receipt.
|
||||
Fetch the event IDs for the latest receipt for all users in a room with the given receipt type.
|
||||
|
||||
Args:
|
||||
room_id: The room ID to fetch the receipt for.
|
||||
receipt_type: The receipt type to fetch.
|
||||
|
||||
Returns:
|
||||
A list of dictionaries, one for each user ID. Each dictionary
|
||||
contains a user ID and the event ID of that user's latest receipt.
|
||||
"""
|
||||
return await self.db_pool.simple_select_list(
|
||||
table="receipts_linearized",
|
||||
keyvalues={"room_id": room_id, "receipt_type": receipt_type},
|
||||
retcols=("user_id", "event_id"),
|
||||
desc="get_receipts_for_room",
|
||||
)
|
||||
|
||||
@cached()
|
||||
async def get_last_receipt_event_id_for_user(
|
||||
self, user_id: str, room_id: str, receipt_type: str
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Fetch the event ID for the latest receipt in a room with the given receipt type.
|
||||
|
||||
Args:
|
||||
user_id: The user to fetch receipts for.
|
||||
@@ -160,33 +157,30 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
receipt_type: The receipt type to fetch.
|
||||
|
||||
Returns:
|
||||
The event ID and stream ordering of the latest receipt, if one exists;
|
||||
otherwise `None`.
|
||||
"""
|
||||
sql = """
|
||||
SELECT event_id, stream_ordering
|
||||
FROM receipts_linearized
|
||||
INNER JOIN events USING (room_id, event_id)
|
||||
WHERE user_id = ?
|
||||
AND room_id = ?
|
||||
AND receipt_type = ?
|
||||
The event ID of the latest receipt, if one exists; otherwise `None`.
|
||||
"""
|
||||
return await self.db_pool.simple_select_one_onecol(
|
||||
table="receipts_linearized",
|
||||
keyvalues={
|
||||
"room_id": room_id,
|
||||
"receipt_type": receipt_type,
|
||||
"user_id": user_id,
|
||||
},
|
||||
retcol="event_id",
|
||||
desc="get_own_receipt_for_user",
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
def f(txn: LoggingTransaction) -> Optional[Tuple[str, int]]:
|
||||
txn.execute(sql, (user_id, room_id, receipt_type))
|
||||
return cast(Optional[Tuple[str, int]], txn.fetchone())
|
||||
|
||||
return await self.db_pool.runInteraction("get_own_receipt_for_user", f)
|
||||
|
||||
@cached()
|
||||
async def get_receipts_for_user(
|
||||
self, user_id: str, receipt_types: Iterable[str]
|
||||
self, user_id: str, receipt_type: str
|
||||
) -> Dict[str, str]:
|
||||
"""
|
||||
Fetch the event IDs for the latest receipts sent by the given user.
|
||||
|
||||
Args:
|
||||
user_id: The user to fetch receipts for.
|
||||
receipt_types: The receipt types to check.
|
||||
receipt_type: The receipt type to fetch.
|
||||
|
||||
Returns:
|
||||
A map of room ID to the event ID of the latest receipt for that room.
|
||||
@@ -194,48 +188,16 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
If the user has not sent a receipt to a room then it will not appear
|
||||
in the returned dictionary.
|
||||
"""
|
||||
results = await self.get_receipts_for_user_with_orderings(
|
||||
user_id, receipt_types
|
||||
rows = await self.db_pool.simple_select_list(
|
||||
table="receipts_linearized",
|
||||
keyvalues={"user_id": user_id, "receipt_type": receipt_type},
|
||||
retcols=("room_id", "event_id"),
|
||||
desc="get_receipts_for_user",
|
||||
)
|
||||
|
||||
# Reduce the result to room ID -> event ID.
|
||||
return {
|
||||
room_id: room_result["event_id"] for room_id, room_result in results.items()
|
||||
}
|
||||
return {row["room_id"]: row["event_id"] for row in rows}
|
||||
|
||||
async def get_receipts_for_user_with_orderings(
|
||||
self, user_id: str, receipt_types: Iterable[str]
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Fetch receipts for all rooms that the given user is joined to.
|
||||
|
||||
Args:
|
||||
user_id: The user to fetch receipts for.
|
||||
receipt_types: The receipt types to fetch. Earlier receipt types
|
||||
are given priority if multiple receipts point to the same event.
|
||||
|
||||
Returns:
|
||||
A map of room ID to the latest receipt (for the given types).
|
||||
"""
|
||||
results: JsonDict = {}
|
||||
for receipt_type in receipt_types:
|
||||
partial_result = await self._get_receipts_for_user_with_orderings(
|
||||
user_id, receipt_type
|
||||
)
|
||||
for room_id, room_result in partial_result.items():
|
||||
# If the room has not yet been seen, or the receipt is newer,
|
||||
# use it.
|
||||
if (
|
||||
room_id not in results
|
||||
or results[room_id]["stream_ordering"]
|
||||
< room_result["stream_ordering"]
|
||||
):
|
||||
results[room_id] = room_result
|
||||
|
||||
return results
|
||||
|
||||
@cached()
|
||||
async def _get_receipts_for_user_with_orderings(
|
||||
self, user_id: str, receipt_type: str
|
||||
) -> JsonDict:
|
||||
"""
|
||||
@@ -258,9 +220,8 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
" WHERE rl.room_id = e.room_id"
|
||||
" AND rl.event_id = e.event_id"
|
||||
" AND user_id = ?"
|
||||
" AND receipt_type = ?"
|
||||
)
|
||||
txn.execute(sql, (user_id, receipt_type))
|
||||
txn.execute(sql, (user_id,))
|
||||
return cast(List[Tuple[str, str, int, int]], txn.fetchall())
|
||||
|
||||
rows = await self.db_pool.runInteraction(
|
||||
@@ -571,14 +532,33 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
"get_all_updated_receipts", get_all_updated_receipts_txn
|
||||
)
|
||||
|
||||
def _invalidate_get_users_with_receipts_in_room(
|
||||
self, room_id: str, receipt_type: str, user_id: str
|
||||
) -> None:
|
||||
if receipt_type != ReceiptTypes.READ:
|
||||
return
|
||||
|
||||
res = self.get_users_with_read_receipts_in_room.cache.get_immediate(
|
||||
room_id, None, update_metrics=False
|
||||
)
|
||||
|
||||
if res and user_id in res:
|
||||
# We'd only be adding to the set, so no point invalidating if the
|
||||
# user is already there
|
||||
return
|
||||
|
||||
self.get_users_with_read_receipts_in_room.invalidate((room_id,))
|
||||
|
||||
def invalidate_caches_for_receipt(
|
||||
self, room_id: str, receipt_type: str, user_id: str
|
||||
) -> None:
|
||||
self._get_receipts_for_user_with_orderings.invalidate((user_id, receipt_type))
|
||||
self.get_receipts_for_user.invalidate((user_id, receipt_type))
|
||||
self._get_linearized_receipts_for_room.invalidate((room_id,))
|
||||
self._get_last_receipt_event_id_for_user.invalidate(
|
||||
self.get_last_receipt_event_id_for_user.invalidate(
|
||||
(user_id, room_id, receipt_type)
|
||||
)
|
||||
self._invalidate_get_users_with_receipts_in_room(room_id, receipt_type, user_id)
|
||||
self.get_receipts_for_room.invalidate((room_id, receipt_type))
|
||||
|
||||
def process_replication_rows(
|
||||
self,
|
||||
@@ -610,8 +590,8 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
"""Inserts a receipt into the database if it's newer than the current one.
|
||||
|
||||
Returns:
|
||||
None if the receipt is older than the current receipt
|
||||
otherwise, the rx timestamp of the event that the receipt corresponds to
|
||||
None if the RR is older than the current RR
|
||||
otherwise, the rx timestamp of the event that the RR corresponds to
|
||||
(or 0 if the event is unknown)
|
||||
"""
|
||||
assert self._can_write_to_receipts
|
||||
@@ -632,7 +612,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
if stream_ordering is not None:
|
||||
sql = (
|
||||
"SELECT stream_ordering, event_id FROM events"
|
||||
" INNER JOIN receipts_linearized AS r USING (event_id, room_id)"
|
||||
" INNER JOIN receipts_linearized as r USING (event_id, room_id)"
|
||||
" WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ?"
|
||||
)
|
||||
txn.execute(sql, (room_id, receipt_type, user_id))
|
||||
@@ -673,10 +653,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
lock=False,
|
||||
)
|
||||
|
||||
if (
|
||||
receipt_type in (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE)
|
||||
and stream_ordering is not None
|
||||
):
|
||||
if receipt_type == ReceiptTypes.READ and stream_ordering is not None:
|
||||
self._remove_old_push_actions_before_txn( # type: ignore[attr-defined]
|
||||
txn, room_id=room_id, user_id=user_id, stream_ordering=stream_ordering
|
||||
)
|
||||
@@ -695,10 +672,6 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
|
||||
Automatically does conversion between linearized and graph
|
||||
representations.
|
||||
|
||||
Returns:
|
||||
The new receipts stream ID and token, if the receipt is newer than
|
||||
what was previously persisted. None, otherwise.
|
||||
"""
|
||||
assert self._can_write_to_receipts
|
||||
|
||||
@@ -746,7 +719,6 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
stream_id=stream_id,
|
||||
)
|
||||
|
||||
# If the receipt was older than the currently persisted one, nothing to do.
|
||||
if event_ts is None:
|
||||
return None
|
||||
|
||||
@@ -795,10 +767,14 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
) -> None:
|
||||
assert self._can_write_to_receipts
|
||||
|
||||
txn.call_after(self.get_receipts_for_room.invalidate, (room_id, receipt_type))
|
||||
txn.call_after(
|
||||
self._get_receipts_for_user_with_orderings.invalidate,
|
||||
(user_id, receipt_type),
|
||||
self._invalidate_get_users_with_receipts_in_room,
|
||||
room_id,
|
||||
receipt_type,
|
||||
user_id,
|
||||
)
|
||||
txn.call_after(self.get_receipts_for_user.invalidate, (user_id, receipt_type))
|
||||
# FIXME: This shouldn't invalidate the whole cache
|
||||
txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,))
|
||||
|
||||
|
||||
@@ -215,8 +215,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
|
||||
async def is_trial_user(self, user_id: str) -> bool:
|
||||
"""Checks if user is in the "trial" period, i.e. within the first
|
||||
N days of registration defined by `mau_trial_days` config or the
|
||||
`mau_appservice_trial_days` config.
|
||||
N days of registration defined by `mau_trial_days` config
|
||||
|
||||
Args:
|
||||
user_id: The user to check for trial status.
|
||||
@@ -227,10 +226,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
return False
|
||||
|
||||
now = self._clock.time_msec()
|
||||
days = self.config.server.mau_appservice_trial_days.get(
|
||||
info["appservice_id"], self.config.server.mau_trial_days
|
||||
)
|
||||
trial_duration_ms = days * 24 * 60 * 60 * 1000
|
||||
trial_duration_ms = self.config.server.mau_trial_days * 24 * 60 * 60 * 1000
|
||||
is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms
|
||||
return is_trial
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ from typing import (
|
||||
)
|
||||
|
||||
import attr
|
||||
from typing_extensions import AsyncContextManager, Concatenate, Literal, ParamSpec
|
||||
from typing_extensions import AsyncContextManager, Literal
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import CancelledError
|
||||
@@ -237,16 +237,9 @@ async def concurrently_execute(
|
||||
)
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
async def yieldable_gather_results(
|
||||
func: Callable[Concatenate[T, P], Awaitable[R]],
|
||||
iter: Iterable[T],
|
||||
*args: P.args,
|
||||
**kwargs: P.kwargs,
|
||||
) -> List[R]:
|
||||
func: Callable[..., Awaitable[T]], iter: Iterable, *args: Any, **kwargs: Any
|
||||
) -> List[T]:
|
||||
"""Executes the function with each argument concurrently.
|
||||
|
||||
Args:
|
||||
@@ -262,15 +255,7 @@ async def yieldable_gather_results(
|
||||
try:
|
||||
return await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
# type-ignore: mypy reports two errors:
|
||||
# error: Argument 1 to "run_in_background" has incompatible type
|
||||
# "Callable[[T, **P], Awaitable[R]]"; expected
|
||||
# "Callable[[T, **P], Awaitable[R]]" [arg-type]
|
||||
# error: Argument 2 to "run_in_background" has incompatible type
|
||||
# "T"; expected "[T, **P.args]" [arg-type]
|
||||
# The former looks like a mypy bug, and the latter looks like a
|
||||
# false positive.
|
||||
[run_in_background(func, item, *args, **kwargs) for item in iter], # type: ignore[arg-type]
|
||||
[run_in_background(func, item, *args, **kwargs) for item in iter],
|
||||
consumeErrors=True,
|
||||
)
|
||||
)
|
||||
@@ -592,6 +577,9 @@ class ReadWriteLock:
|
||||
return _ctx_manager()
|
||||
|
||||
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def timeout_deferred(
|
||||
deferred: "defer.Deferred[_T]", timeout: float, reactor: IReactorTime
|
||||
) -> "defer.Deferred[_T]":
|
||||
|
||||
@@ -12,19 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Dict,
|
||||
Generic,
|
||||
List,
|
||||
Optional,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
from typing import Any, Callable, Dict, List
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -87,11 +75,7 @@ class Distributor:
|
||||
run_as_background_process(name, self.signals[name].fire, *args, **kwargs)
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
class Signal(Generic[P]):
|
||||
class Signal:
|
||||
"""A Signal is a dispatch point that stores a list of callables as
|
||||
observers of it.
|
||||
|
||||
@@ -103,16 +87,16 @@ class Signal(Generic[P]):
|
||||
|
||||
def __init__(self, name: str):
|
||||
self.name: str = name
|
||||
self.observers: List[Callable[P, Any]] = []
|
||||
self.observers: List[Callable] = []
|
||||
|
||||
def observe(self, observer: Callable[P, Any]) -> None:
|
||||
def observe(self, observer: Callable) -> None:
|
||||
"""Adds a new callable to the observer list which will be invoked by
|
||||
the 'fire' method.
|
||||
|
||||
Each observer callable may return a Deferred."""
|
||||
self.observers.append(observer)
|
||||
|
||||
def fire(self, *args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[List[Any]]":
|
||||
def fire(self, *args: Any, **kwargs: Any) -> "defer.Deferred[List[Any]]":
|
||||
"""Invokes every callable in the observer list, passing in the args and
|
||||
kwargs. Exceptions thrown by observers are logged but ignored. It is
|
||||
not an error to fire a signal with no observers.
|
||||
@@ -120,7 +104,7 @@ class Signal(Generic[P]):
|
||||
Returns a Deferred that will complete when all the observers have
|
||||
completed."""
|
||||
|
||||
async def do(observer: Callable[P, Union[R, Awaitable[R]]]) -> Optional[R]:
|
||||
async def do(observer: Callable[..., Any]) -> Any:
|
||||
try:
|
||||
return await maybe_awaitable(observer(*args, **kwargs))
|
||||
except Exception as e:
|
||||
@@ -130,7 +114,6 @@ class Signal(Generic[P]):
|
||||
observer,
|
||||
e,
|
||||
)
|
||||
return None
|
||||
|
||||
deferreds = [run_in_background(do, o) for o in self.observers]
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user