Compare commits
29 Commits
anoa/get_u
...
anoa/testi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18cf53376d | ||
|
|
e49a90899b | ||
|
|
6317eba770 | ||
|
|
da51afdc6b | ||
|
|
2ccad9a1b6 | ||
|
|
714e75dc1b | ||
|
|
0c1b27ecd0 | ||
|
|
ac1bbfdd2b | ||
|
|
7dd06332a9 | ||
|
|
76f15f4bf2 | ||
|
|
887ec58556 | ||
|
|
b3b2da56b3 | ||
|
|
cb56a51ada | ||
|
|
40042dec0d | ||
|
|
2c881bf8a4 | ||
|
|
667e9ca5be | ||
|
|
8490a8793c | ||
|
|
2ff55e02c1 | ||
|
|
0ac339cfe6 | ||
|
|
f4064862b2 | ||
|
|
67851671e5 | ||
|
|
a7dadf87be | ||
|
|
c215378411 | ||
|
|
e91373c1fa | ||
|
|
4e48515bbf | ||
|
|
70807c83c6 | ||
|
|
44cf7cf56e | ||
|
|
d2a9b45df0 | ||
|
|
81e74fbae5 |
44
CHANGES.md
44
CHANGES.md
@@ -1,40 +1,15 @@
|
||||
Next version
|
||||
============
|
||||
|
||||
* New templates (`sso_auth_confirm.html`, `sso_auth_success.html`, and
|
||||
`sso_account_deactivated.html`) were added to Synapse. If your Synapse is
|
||||
configured to use SSO and a custom `sso_redirect_confirm_template_dir`
|
||||
configuration then these templates will need to be duplicated into that
|
||||
directory.
|
||||
* Two new templates (`sso_auth_confirm.html` and `sso_account_deactivated.html`)
|
||||
were added to Synapse. If your Synapse is configured to use SSO and a custom
|
||||
`sso_redirect_confirm_template_dir` configuration then these templates will
|
||||
need to be duplicated into that directory.
|
||||
|
||||
* Plugins using the `complete_sso_login` method of `synapse.module_api.ModuleApi`
|
||||
should update to using the async/await version `complete_sso_login_async` which
|
||||
includes additional checks. The non-async version is considered deprecated.
|
||||
|
||||
|
||||
Synapse 1.12.4 (2020-04-23)
|
||||
===========================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.12.4rc1 (2020-04-22)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Always send users their own device updates. ([\#7160](https://github.com/matrix-org/synapse/issues/7160))
|
||||
- Add support for handling GET requests for `account_data` on a worker. ([\#7311](https://github.com/matrix-org/synapse/issues/7311))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug that prevented cross-signing with users on worker-mode synapses. ([\#7255](https://github.com/matrix-org/synapse/issues/7255))
|
||||
- Do not treat display names as globs in push rules. ([\#7271](https://github.com/matrix-org/synapse/issues/7271))
|
||||
- Fix a bug with cross-signing devices belonging to remote users who did not share a room with any user on the local homeserver. ([\#7289](https://github.com/matrix-org/synapse/issues/7289))
|
||||
|
||||
Synapse 1.12.3 (2020-04-03)
|
||||
===========================
|
||||
|
||||
@@ -67,19 +42,12 @@ Bugfixes
|
||||
Synapse 1.12.0 (2020-03-23)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.12.0rc1.
|
||||
|
||||
Debian packages and Docker images are rebuilt using the latest versions of
|
||||
dependency libraries, including Twisted 20.3.0. **Please see security advisory
|
||||
below**.
|
||||
|
||||
Potential slow database update during upgrade
|
||||
---------------------------------------------
|
||||
|
||||
Synapse 1.12.0 includes a database update which is run as part of the upgrade,
|
||||
and which may take some time (several hours in the case of a large
|
||||
server). Synapse will not respond to HTTP requests while this update is taking
|
||||
place. For imformation on seeing if you are affected, and workaround if you
|
||||
are, see the [upgrade notes](UPGRADE.rst#upgrading-to-v1120).
|
||||
|
||||
Security advisory
|
||||
-----------------
|
||||
|
||||
|
||||
13
MANIFEST.in
13
MANIFEST.in
@@ -30,24 +30,23 @@ recursive-include synapse/static *.gif
|
||||
recursive-include synapse/static *.html
|
||||
recursive-include synapse/static *.js
|
||||
|
||||
exclude .codecov.yml
|
||||
exclude .coveragerc
|
||||
exclude .dockerignore
|
||||
exclude .editorconfig
|
||||
exclude Dockerfile
|
||||
exclude mypy.ini
|
||||
exclude sytest-blacklist
|
||||
exclude .dockerignore
|
||||
exclude test_postgresql.sh
|
||||
exclude .editorconfig
|
||||
exclude sytest-blacklist
|
||||
|
||||
include pyproject.toml
|
||||
recursive-include changelog.d *
|
||||
|
||||
prune .buildkite
|
||||
prune .circleci
|
||||
prune .codecov.yml
|
||||
prune .coveragerc
|
||||
prune .github
|
||||
prune contrib
|
||||
prune debian
|
||||
prune demo/etc
|
||||
prune docker
|
||||
prune mypy.ini
|
||||
prune snap
|
||||
prune stubs
|
||||
|
||||
96
UPGRADE.rst
96
UPGRADE.rst
@@ -75,102 +75,6 @@ for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
|
||||
Upgrading to v1.13.0
|
||||
====================
|
||||
|
||||
Incorrect database migration in old synapse versions
|
||||
----------------------------------------------------
|
||||
|
||||
A bug was introduced in Synapse 1.4.0 which could cause the room directory to
|
||||
be incomplete or empty if Synapse was upgraded directly from v1.2.1 or earlier,
|
||||
to versions between v1.4.0 and v1.12.x.
|
||||
|
||||
This will *not* be a problem for Synapse installations which were:
|
||||
* created at v1.4.0 or later,
|
||||
* upgraded via v1.3.x, or
|
||||
* upgraded straight from v1.2.1 or earlier to v1.13.0 or later.
|
||||
|
||||
If completeness of the room directory is a concern, installations which are
|
||||
affected can be repaired as follows:
|
||||
|
||||
1. Run the following sql from a `psql` or `sqlite3` console:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
||||
('populate_stats_process_rooms', '{}', 'current_state_events_membership');
|
||||
|
||||
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
|
||||
('populate_stats_process_users', '{}', 'populate_stats_process_rooms');
|
||||
|
||||
2. Restart synapse.
|
||||
|
||||
|
||||
Upgrading to v1.12.0
|
||||
====================
|
||||
|
||||
This version includes a database update which is run as part of the upgrade,
|
||||
and which may take some time (several hours in the case of a large
|
||||
server). Synapse will not respond to HTTP requests while this update is taking
|
||||
place.
|
||||
|
||||
This is only likely to be a problem in the case of a server which is
|
||||
participating in many rooms.
|
||||
|
||||
0. As with all upgrades, it is recommended that you have a recent backup of
|
||||
your database which can be used for recovery in the event of any problems.
|
||||
|
||||
1. As an initial check to see if you will be affected, you can try running the
|
||||
following query from the `psql` or `sqlite3` console. It is safe to run it
|
||||
while Synapse is still running.
|
||||
|
||||
.. code:: sql
|
||||
|
||||
SELECT MAX(q.v) FROM (
|
||||
SELECT (
|
||||
SELECT ej.json AS v
|
||||
FROM state_events se INNER JOIN event_json ej USING (event_id)
|
||||
WHERE se.room_id=rooms.room_id AND se.type='m.room.create' AND se.state_key=''
|
||||
LIMIT 1
|
||||
) FROM rooms WHERE rooms.room_version IS NULL
|
||||
) q;
|
||||
|
||||
This query will take about the same amount of time as the upgrade process: ie,
|
||||
if it takes 5 minutes, then it is likely that Synapse will be unresponsive for
|
||||
5 minutes during the upgrade.
|
||||
|
||||
If you consider an outage of this duration to be acceptable, no further
|
||||
action is necessary and you can simply start Synapse 1.12.0.
|
||||
|
||||
If you would prefer to reduce the downtime, continue with the steps below.
|
||||
|
||||
2. The easiest workaround for this issue is to manually
|
||||
create a new index before upgrading. On PostgreSQL, his can be done as follows:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
CREATE INDEX CONCURRENTLY tmp_upgrade_1_12_0_index
|
||||
ON state_events(room_id) WHERE type = 'm.room.create';
|
||||
|
||||
The above query may take some time, but is also safe to run while Synapse is
|
||||
running.
|
||||
|
||||
We assume that no SQLite users have databases large enough to be
|
||||
affected. If you *are* affected, you can run a similar query, omitting the
|
||||
``CONCURRENTLY`` keyword. Note however that this operation may in itself cause
|
||||
Synapse to stop running for some time. Synapse admins are reminded that
|
||||
`SQLite is not recommended for use outside a test
|
||||
environment <https://github.com/matrix-org/synapse/blob/master/README.rst#using-postgresql>`_.
|
||||
|
||||
3. Once the index has been created, the ``SELECT`` query in step 1 above should
|
||||
complete quickly. It is therefore safe to upgrade to Synapse 1.12.0.
|
||||
|
||||
4. Once Synapse 1.12.0 has successfully started and is responding to HTTP
|
||||
requests, the temporary index can be removed:
|
||||
|
||||
.. code:: sql
|
||||
|
||||
DROP INDEX tmp_upgrade_1_12_0_index;
|
||||
|
||||
Upgrading to v1.10.0
|
||||
====================
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Return total number of users and profile attributes in admin users endpoint. Contributed by Awesome Technologies Innovationslabor GmbH.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for running replication over Redis when using workers.
|
||||
@@ -1 +0,0 @@
|
||||
Improve the documentation of application service configuration files.
|
||||
@@ -1 +0,0 @@
|
||||
Run replication streamers on workers.
|
||||
1
changelog.d/7160.feature
Normal file
1
changelog.d/7160.feature
Normal file
@@ -0,0 +1 @@
|
||||
Always send users their own device updates.
|
||||
@@ -1 +0,0 @@
|
||||
Use `stream.current_token()` and remove `stream_positions()`.
|
||||
@@ -1 +0,0 @@
|
||||
Add explicit Python build tooling as dependencies for the snapcraft build.
|
||||
@@ -1 +1 @@
|
||||
Add typing annotations in `synapse.federation`.
|
||||
Add typing information to federation server code.
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Extend room admin api (`GET /_synapse/admin/v1/rooms`) with additional attributes.
|
||||
1
changelog.d/7255.bugfix
Normal file
1
changelog.d/7255.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug that prevented cross-signing with users on worker-mode synapses.
|
||||
@@ -1 +0,0 @@
|
||||
Reject unknown session IDs during user interactive authentication instead of silently creating a new session.
|
||||
@@ -1 +0,0 @@
|
||||
Documentation of media_storage_providers options updated to avoid misunderstandings. Contributed by Tristan Lins.
|
||||
@@ -1 +0,0 @@
|
||||
Add some unit tests for replication.
|
||||
@@ -1 +0,0 @@
|
||||
Support SSO in the user interactive authentication workflow.
|
||||
@@ -1 +0,0 @@
|
||||
Add MultiWriterIdGenerator to support multiple concurrent writers of streams.
|
||||
@@ -1 +0,0 @@
|
||||
Move catchup of replication streams logic to worker.
|
||||
1
changelog.d/7289.bugfix
Normal file
1
changelog.d/7289.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug with cross-signing devices with remote users when they did not share a room with any user on the local homeserver.
|
||||
@@ -1 +0,0 @@
|
||||
Move catchup of replication streams logic to worker.
|
||||
@@ -1 +0,0 @@
|
||||
Improve typing annotations in `synapse.replication.tcp.streams.Stream`.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce log verbosity of url cache cleanup tasks.
|
||||
@@ -1 +0,0 @@
|
||||
Fix sample SAML Service Provider configuration. Contributed by @frcl.
|
||||
@@ -1 +0,0 @@
|
||||
Persist user interactive authentication sessions across workers and Synapse restarts.
|
||||
@@ -1 +0,0 @@
|
||||
Fix StreamChangeCache to work with multiple entities changing on the same stream id.
|
||||
@@ -1 +0,0 @@
|
||||
Allow `/requestToken` endpoints to hide the existence (or lack thereof) of 3PID associations on the homeserver.
|
||||
@@ -1 +0,0 @@
|
||||
Fixed backwards compatibility logic of the first value of `trusted_third_party_id_servers` being used for `account_threepid_delegates.email`, which occurs when the former, deprecated option is set and the latter is not.
|
||||
@@ -1 +0,0 @@
|
||||
Move catchup of replication streams logic to worker.
|
||||
@@ -1 +0,0 @@
|
||||
Fix an incorrect import in IdentityHandler.
|
||||
@@ -1 +0,0 @@
|
||||
Reduce logging verbosity for successful federation requests.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for running replication over Redis when using workers.
|
||||
@@ -1 +0,0 @@
|
||||
Move catchup of replication streams logic to worker.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug where event updates might not be sent over replication to worker processes after the stream falls behind.
|
||||
@@ -1 +0,0 @@
|
||||
Convert some federation handler code to async/await.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bad error handling that would cause Synapse to crash if it's provided with a YAML configuration file that's either empty or doesn't parse into a key-value map.
|
||||
@@ -1 +0,0 @@
|
||||
Support SSO in the user interactive authentication workflow.
|
||||
@@ -1 +0,0 @@
|
||||
Fix incorrect metrics reporting for `renew_attestations` background task.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for running replication over Redis when using workers.
|
||||
@@ -1 +0,0 @@
|
||||
Add documentation on monitoring workers with Prometheus.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug where event updates might not be sent over replication to worker processes after the stream falls behind.
|
||||
@@ -1 +0,0 @@
|
||||
Fix collation for postgres for unit tests.
|
||||
@@ -1 +0,0 @@
|
||||
Clarify endpoint usage in the users admin api documentation.
|
||||
@@ -1 +0,0 @@
|
||||
Convert RegistrationWorkerStore.is_server_admin and dependent code to async/await.
|
||||
@@ -1 +0,0 @@
|
||||
Add an `instance_name` to `RDATA` and `POSITION` replication commands.
|
||||
@@ -1 +0,0 @@
|
||||
Prevent non-federating rooms from appearing in responses to federated `POST /publicRoom` requests when a filter was included.
|
||||
@@ -1 +0,0 @@
|
||||
Improve error responses when accessing remote public room lists.
|
||||
@@ -1 +0,0 @@
|
||||
Thread through instance name to replication client.
|
||||
@@ -1 +0,0 @@
|
||||
Move catchup of replication streams logic to worker.
|
||||
@@ -1 +0,0 @@
|
||||
Move catchup of replication streams logic to worker.
|
||||
@@ -1 +0,0 @@
|
||||
Add typing annotations in `synapse.federation`.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug which would cause the room durectory to be incorrectly populated if Synapse was upgraded directly from v1.2.1 or earlier to v1.4.0 or later. Note that this fix does not apply retrospectively; see the [upgrade notes](UPGRADE.rst#upgrading-to-v1130) for more information.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug in `EventContext.deserialize`.
|
||||
@@ -1 +0,0 @@
|
||||
Convert synapse.server_notices to async/await.
|
||||
@@ -1 +0,0 @@
|
||||
Convert synapse.notifier to async/await.
|
||||
@@ -1 +0,0 @@
|
||||
Convert the room handler to async/await.
|
||||
@@ -1 +0,0 @@
|
||||
Update docker runtime image to Alpine v3.11. Contributed by @Starbix.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for running replication over Redis when using workers.
|
||||
@@ -1 +0,0 @@
|
||||
Fix issues with the Python package manifest.
|
||||
@@ -1 +0,0 @@
|
||||
Clean up some LoggingContext code.
|
||||
@@ -1 +0,0 @@
|
||||
Prevent methods in `synapse.handlers.auth` from polling the homeserver config every request.
|
||||
@@ -1 +0,0 @@
|
||||
Move catchup of replication streams logic to worker.
|
||||
@@ -1 +0,0 @@
|
||||
Speed up fetching device lists changes when handling `/sync` requests.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for running replication over Redis when using workers.
|
||||
@@ -1 +0,0 @@
|
||||
Improve performance of `get_e2e_cross_signing_key`.
|
||||
@@ -1 +0,0 @@
|
||||
Improve performance of `mark_as_sent_devices_by_remote`.
|
||||
8
debian/changelog
vendored
8
debian/changelog
vendored
@@ -1,16 +1,8 @@
|
||||
<<<<<<< HEAD
|
||||
matrix-synapse-py3 (1.12.3ubuntu1) UNRELEASED; urgency=medium
|
||||
|
||||
* Add information about .well-known files to Debian installation scripts.
|
||||
|
||||
-- Patrick Cloke <patrickc@matrix.org> Mon, 06 Apr 2020 10:10:38 -0400
|
||||
=======
|
||||
matrix-synapse-py3 (1.12.4) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.12.4.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 23 Apr 2020 10:58:14 -0400
|
||||
>>>>>>> master
|
||||
|
||||
matrix-synapse-py3 (1.12.3) stable; urgency=medium
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \
|
||||
### Stage 1: runtime
|
||||
###
|
||||
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10
|
||||
|
||||
# xmlsec is required for saml support
|
||||
RUN apk add --no-cache --virtual .runtime_deps \
|
||||
|
||||
@@ -11,21 +11,8 @@ The following query parameters are available:
|
||||
* `from` - Offset in the returned list. Defaults to `0`.
|
||||
* `limit` - Maximum amount of rooms to return. Defaults to `100`.
|
||||
* `order_by` - The method in which to sort the returned list of rooms. Valid values are:
|
||||
- `alphabetical` - Same as `name`. This is deprecated.
|
||||
- `size` - Same as `joined_members`. This is deprecated.
|
||||
- `name` - Rooms are ordered alphabetically by room name. This is the default.
|
||||
- `canonical_alias` - Rooms are ordered alphabetically by main alias address of the room.
|
||||
- `joined_members` - Rooms are ordered by the number of members. Largest to smallest.
|
||||
- `joined_local_members` - Rooms are ordered by the number of local members. Largest to smallest.
|
||||
- `version` - Rooms are ordered by room version. Largest to smallest.
|
||||
- `creator` - Rooms are ordered alphabetically by creator of the room.
|
||||
- `encryption` - Rooms are ordered alphabetically by the end-to-end encryption algorithm.
|
||||
- `federatable` - Rooms are ordered by whether the room is federatable.
|
||||
- `public` - Rooms are ordered by visibility in room list.
|
||||
- `join_rules` - Rooms are ordered alphabetically by join rules of the room.
|
||||
- `guest_access` - Rooms are ordered alphabetically by guest access option of the room.
|
||||
- `history_visibility` - Rooms are ordered alphabetically by visibility of history of the room.
|
||||
- `state_events` - Rooms are ordered by number of state events. Largest to smallest.
|
||||
- `alphabetical` - Rooms are ordered alphabetically by room name. This is the default.
|
||||
- `size` - Rooms are ordered by the number of members. Largest to smallest.
|
||||
* `dir` - Direction of room order. Either `f` for forwards or `b` for backwards. Setting
|
||||
this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
* `search_term` - Filter rooms by their room name. Search term can be contained in any
|
||||
@@ -39,16 +26,6 @@ The following fields are possible in the JSON response body:
|
||||
- `name` - The name of the room.
|
||||
- `canonical_alias` - The canonical (main) alias address of the room.
|
||||
- `joined_members` - How many users are currently in the room.
|
||||
- `joined_local_members` - How many local users are currently in the room.
|
||||
- `version` - The version of the room as a string.
|
||||
- `creator` - The `user_id` of the room creator.
|
||||
- `encryption` - Algorithm of end-to-end encryption of messages. Is `null` if encryption is not active.
|
||||
- `federatable` - Whether users on other servers can join this room.
|
||||
- `public` - Whether the room is visible in room directory.
|
||||
- `join_rules` - The type of rules used for users wishing to join this room. One of: ["public", "knock", "invite", "private"].
|
||||
- `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"].
|
||||
- `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
|
||||
- `state_events` - Total number of state_events of a room. Complexity of the room.
|
||||
* `offset` - The current pagination offset in rooms. This parameter should be
|
||||
used instead of `next_token` for room offset as `next_token` is
|
||||
not intended to be parsed.
|
||||
@@ -83,34 +60,14 @@ Response:
|
||||
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
|
||||
"name": "Matrix HQ",
|
||||
"canonical_alias": "#matrix:matrix.org",
|
||||
"joined_members": 8326,
|
||||
"joined_local_members": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": null,
|
||||
"federatable": true,
|
||||
"public": true,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
"joined_members": 8326
|
||||
},
|
||||
... (8 hidden items) ...
|
||||
{
|
||||
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
|
||||
"name": "This Week In Matrix (TWIM)",
|
||||
"canonical_alias": "#twim:matrix.org",
|
||||
"joined_members": 314,
|
||||
"joined_local_members": 20,
|
||||
"version": "4",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": "m.megolm.v1.aes-sha2",
|
||||
"federatable": true,
|
||||
"public": false,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8345
|
||||
"joined_members": 314
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
@@ -135,17 +92,7 @@ Response:
|
||||
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
|
||||
"name": "This Week In Matrix (TWIM)",
|
||||
"canonical_alias": "#twim:matrix.org",
|
||||
"joined_members": 314,
|
||||
"joined_local_members": 20,
|
||||
"version": "4",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": "m.megolm.v1.aes-sha2",
|
||||
"federatable": true,
|
||||
"public": false,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8
|
||||
"joined_members": 314
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
@@ -170,34 +117,14 @@ Response:
|
||||
"room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
|
||||
"name": "Matrix HQ",
|
||||
"canonical_alias": "#matrix:matrix.org",
|
||||
"joined_members": 8326,
|
||||
"joined_local_members": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": null,
|
||||
"federatable": true,
|
||||
"public": true,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
"joined_members": 8326
|
||||
},
|
||||
... (98 hidden items) ...
|
||||
{
|
||||
"room_id": "!xYvNcQPhnkrdUmYczI:matrix.org",
|
||||
"name": "This Week In Matrix (TWIM)",
|
||||
"canonical_alias": "#twim:matrix.org",
|
||||
"joined_members": 314,
|
||||
"joined_local_members": 20,
|
||||
"version": "4",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": "m.megolm.v1.aes-sha2",
|
||||
"federatable": true,
|
||||
"public": false,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8345
|
||||
"joined_members": 314
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
@@ -227,16 +154,6 @@ Response:
|
||||
"name": "Music Theory",
|
||||
"canonical_alias": "#musictheory:matrix.org",
|
||||
"joined_members": 127
|
||||
"joined_local_members": 2,
|
||||
"version": "1",
|
||||
"creator": "@foo:matrix.org",
|
||||
"encryption": null,
|
||||
"federatable": true,
|
||||
"public": true,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
},
|
||||
... (48 hidden items) ...
|
||||
{
|
||||
@@ -244,16 +161,6 @@ Response:
|
||||
"name": "weechat-matrix",
|
||||
"canonical_alias": "#weechat-matrix:termina.org.uk",
|
||||
"joined_members": 137
|
||||
"joined_local_members": 20,
|
||||
"version": "4",
|
||||
"creator": "@foo:termina.org.uk",
|
||||
"encryption": null,
|
||||
"federatable": true,
|
||||
"public": true,
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8345
|
||||
}
|
||||
],
|
||||
"offset": 100,
|
||||
|
||||
@@ -33,22 +33,12 @@ with a body of:
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
The parameter ``displayname`` is optional and defaults to the value of
|
||||
``user_id``.
|
||||
|
||||
The parameter ``threepids`` is optional and allows setting the third-party IDs
|
||||
(email, msisdn) belonging to a user.
|
||||
|
||||
The parameter ``avatar_url`` is optional. Must be a [MXC
|
||||
URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
|
||||
|
||||
The parameter ``admin`` is optional and defaults to ``false``.
|
||||
|
||||
The parameter ``deactivated`` is optional and defaults to ``false``.
|
||||
|
||||
The parameter ``password`` is optional. If provided, the user's password is
|
||||
updated and all devices are logged out.
|
||||
|
||||
The parameter ``displayname`` is optional and defaults to ``user_id``.
|
||||
The parameter ``threepids`` is optional.
|
||||
The parameter ``avatar_url`` is optional.
|
||||
The parameter ``admin`` is optional and defaults to 'false'.
|
||||
The parameter ``deactivated`` is optional and defaults to 'false'.
|
||||
The parameter ``password`` is optional. If provided the user's password is updated and all devices are logged out.
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
|
||||
List Accounts
|
||||
@@ -61,25 +51,16 @@ The api is::
|
||||
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
|
||||
|
||||
including an ``access_token`` of a server admin.
|
||||
|
||||
The parameter ``from`` is optional but used for pagination, denoting the
|
||||
offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token``
|
||||
from a previous call.
|
||||
|
||||
The parameter ``limit`` is optional but is used for pagination, denoting the
|
||||
maximum number of items to return in this call. Defaults to ``100``.
|
||||
|
||||
The parameter ``user_id`` is optional and filters to only users with user IDs
|
||||
that contain this value.
|
||||
|
||||
The parameter ``guests`` is optional and if ``false`` will **exclude** guest users.
|
||||
Defaults to ``true`` to include guest users.
|
||||
|
||||
The parameter ``deactivated`` is optional and if ``true`` will **include** deactivated users.
|
||||
Defaults to ``false`` to exclude deactivated users.
|
||||
|
||||
A JSON body is returned with the following shape:
|
||||
The parameters ``from`` and ``limit`` are required only for pagination.
|
||||
By default, a ``limit`` of 100 is used.
|
||||
The parameter ``user_id`` can be used to select only users with user ids that
|
||||
contain this value.
|
||||
The parameter ``guests=false`` can be used to exclude guest users,
|
||||
default is to include guest users.
|
||||
The parameter ``deactivated=true`` can be used to include deactivated users,
|
||||
default is to exclude deactivated users.
|
||||
If the endpoint does not return a ``next_token`` then there are no more users left.
|
||||
It returns a JSON body like the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
@@ -91,29 +72,19 @@ A JSON body is returned with the following shape:
|
||||
"is_guest": 0,
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null
|
||||
"deactivated": 0
|
||||
}, {
|
||||
"name": "<user_id2>",
|
||||
"password_hash": "<password_hash2>",
|
||||
"is_guest": 0,
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>"
|
||||
"deactivated": 0
|
||||
}
|
||||
],
|
||||
"next_token": "100",
|
||||
"total": 200
|
||||
"next_token": "100"
|
||||
}
|
||||
|
||||
To paginate, check for ``next_token`` and if present, call the endpoint again
|
||||
with ``from`` set to the value of ``next_token``. This will return a new page.
|
||||
|
||||
If the endpoint does not return a ``next_token`` then there are no more users
|
||||
to paginate through.
|
||||
|
||||
Query Account
|
||||
=============
|
||||
|
||||
@@ -23,13 +23,9 @@ namespaces:
|
||||
users: # List of users we're interested in
|
||||
- exclusive: <bool>
|
||||
regex: <regex>
|
||||
group_id: <group>
|
||||
- ...
|
||||
aliases: [] # List of aliases we're interested in
|
||||
rooms: [] # List of room ids we're interested in
|
||||
```
|
||||
|
||||
`exclusive`: If enabled, only this application service is allowed to register users in its namespace(s).
|
||||
`group_id`: All users of this application service are dynamically joined to this group. This is useful for e.g user organisation or flairs.
|
||||
|
||||
See the [spec](https://matrix.org/docs/spec/application_service/unstable.html) for further details on how application services work.
|
||||
|
||||
@@ -60,31 +60,6 @@
|
||||
|
||||
1. Restart Prometheus.
|
||||
|
||||
## Monitoring workers
|
||||
|
||||
To monitor a Synapse installation using
|
||||
[workers](https://github.com/matrix-org/synapse/blob/master/docs/workers.md),
|
||||
every worker needs to be monitored independently, in addition to
|
||||
the main homeserver process. This is because workers don't send
|
||||
their metrics to the main homeserver process, but expose them
|
||||
directly (if they are configured to do so).
|
||||
|
||||
To allow collecting metrics from a worker, you need to add a
|
||||
`metrics` listener to its configuration, by adding the following
|
||||
under `worker_listeners`:
|
||||
|
||||
```yaml
|
||||
- type: metrics
|
||||
bind_address: ''
|
||||
port: 9101
|
||||
```
|
||||
|
||||
The `bind_address` and `port` parameters should be set so that
|
||||
the resulting listener can be reached by prometheus, and they
|
||||
don't clash with an existing worker.
|
||||
With this example, the worker's metrics would then be available
|
||||
on `http://127.0.0.1:9101`.
|
||||
|
||||
## Renaming of metrics & deprecation of old names in 1.2
|
||||
|
||||
Synapse 1.2 updates the Prometheus metrics to match the naming
|
||||
|
||||
@@ -414,16 +414,6 @@ retention:
|
||||
# longest_max_lifetime: 1y
|
||||
# interval: 1d
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
# information about whether an e-mail address is in use or not on this
|
||||
# homeserver.
|
||||
# Note that for some endpoints the error situation is the e-mail already being
|
||||
# used, and for others the error is entering the e-mail being unused.
|
||||
# If this option is enabled, instead of returning an error, these endpoints will
|
||||
# act as if no error happened and return a fake session ID ('sid') to clients.
|
||||
#
|
||||
#request_token_inhibit_3pid_errors: true
|
||||
|
||||
|
||||
## TLS ##
|
||||
|
||||
@@ -745,11 +735,12 @@ media_store_path: "DATADIR/media_store"
|
||||
#
|
||||
#media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to store newly uploaded local files
|
||||
# # Whether to write new local files.
|
||||
# store_local: false
|
||||
# # Whether to store newly downloaded remote files
|
||||
# # Whether to write new remote media
|
||||
# store_remote: false
|
||||
# # Whether to wait for successful storage for local uploads
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
@@ -1349,32 +1340,32 @@ saml2_config:
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#
|
||||
# # By default, the user has to go to our login page first. If you'd like
|
||||
# # to allow IdP-initiated login, set 'allow_unsolicited: true' in a
|
||||
# # 'service.sp' section:
|
||||
# #
|
||||
# #service:
|
||||
# # sp:
|
||||
# # allow_unsolicited: true
|
||||
# # By default, the user has to go to our login page first. If you'd like
|
||||
# # to allow IdP-initiated login, set 'allow_unsolicited: true' in a
|
||||
# # 'service.sp' section:
|
||||
# #
|
||||
# #service:
|
||||
# # sp:
|
||||
# # allow_unsolicited: true
|
||||
#
|
||||
# # The examples below are just used to generate our metadata xml, and you
|
||||
# # may well not need them, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
# # The examples below are just used to generate our metadata xml, and you
|
||||
# # may well not need them, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
#
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
#
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
#
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
|
||||
# Instead of putting the config inline as above, you can specify a
|
||||
# separate pysaml2 configuration file:
|
||||
@@ -1518,30 +1509,6 @@ sso:
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * HTML page which notifies the user that they are authenticating to confirm
|
||||
# an operation on their account during the user interactive authentication
|
||||
# process: 'sso_auth_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL the user is about to be redirected to. Needs
|
||||
# manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
#
|
||||
# * description: the operation which the user is being asked to confirm
|
||||
#
|
||||
# * HTML page shown after a successful user interactive authentication session:
|
||||
# 'sso_auth_success.html'.
|
||||
#
|
||||
# Note that this page must include the JavaScript which notifies of a successful authentication
|
||||
# (see https://matrix.org/docs/spec/client_server/r0.6.0#fallback).
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# * HTML page shown during single sign-on if a deactivated user (according to Synapse's database)
|
||||
# attempts to login: 'sso_account_deactivated.html'.
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
#
|
||||
|
||||
@@ -15,17 +15,15 @@ example flow would be (where '>' indicates master to worker and
|
||||
|
||||
> SERVER example.com
|
||||
< REPLICATE
|
||||
> POSITION events master 53
|
||||
> RDATA events master 54 ["$foo1:bar.com", ...]
|
||||
> RDATA events master 55 ["$foo4:bar.com", ...]
|
||||
> POSITION events 53
|
||||
> RDATA events 54 ["$foo1:bar.com", ...]
|
||||
> RDATA events 55 ["$foo4:bar.com", ...]
|
||||
|
||||
The example shows the server accepting a new connection and sending its identity
|
||||
with the `SERVER` command, followed by the client server to respond with the
|
||||
position of all streams. The server then periodically sends `RDATA` commands
|
||||
which have the format `RDATA <stream_name> <instance_name> <token> <row>`, where
|
||||
the format of `<row>` is defined by the individual streams. The
|
||||
`<instance_name>` is the name of the Synapse process that generated the data
|
||||
(usually "master").
|
||||
which have the format `RDATA <stream_name> <token> <row>`, where the format of
|
||||
`<row>` is defined by the individual streams.
|
||||
|
||||
Error reporting happens by either the client or server sending an ERROR
|
||||
command, and usually the connection will be closed.
|
||||
@@ -54,7 +52,7 @@ The basic structure of the protocol is line based, where the initial
|
||||
word of each line specifies the command. The rest of the line is parsed
|
||||
based on the command. For example, the RDATA command is defined as:
|
||||
|
||||
RDATA <stream_name> <instance_name> <token> <row_json>
|
||||
RDATA <stream_name> <token> <row_json>
|
||||
|
||||
(Note that <row_json> may contains spaces, but cannot contain
|
||||
newlines.)
|
||||
@@ -138,11 +136,11 @@ the wire:
|
||||
< NAME synapse.app.appservice
|
||||
< PING 1490197665618
|
||||
< REPLICATE
|
||||
> POSITION events master 1
|
||||
> POSITION backfill master 1
|
||||
> POSITION caches master 1
|
||||
> RDATA caches master 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||
> RDATA events master 14 ["$149019767112vOHxz:localhost:8823",
|
||||
> POSITION events 1
|
||||
> POSITION backfill 1
|
||||
> POSITION caches 1
|
||||
> RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
|
||||
> RDATA events 14 ["$149019767112vOHxz:localhost:8823",
|
||||
"!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
|
||||
< PING 1490197675618
|
||||
> ERROR server stopping
|
||||
@@ -153,10 +151,10 @@ position without needing to send data with the `RDATA` command.
|
||||
|
||||
An example of a batched set of `RDATA` is:
|
||||
|
||||
> RDATA caches master batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
||||
> RDATA caches master batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
||||
> RDATA caches master batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
||||
> RDATA caches master 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
||||
> RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
|
||||
> RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
|
||||
> RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
|
||||
> RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
|
||||
|
||||
In this case the client shouldn't advance their caches token until it
|
||||
sees the the last `RDATA`.
|
||||
@@ -180,11 +178,6 @@ client (C):
|
||||
updates, and if so then fetch them out of band. Sent in response to a
|
||||
REPLICATE command (but can happen at any time).
|
||||
|
||||
The POSITION command includes the source of the stream. Currently all streams
|
||||
are written by a single process (usually "master"). If fetching missing
|
||||
updates via HTTP API, rather than via the DB, then processes should make the
|
||||
request to the appropriate process.
|
||||
|
||||
#### ERROR (S, C)
|
||||
|
||||
There was an error
|
||||
@@ -203,7 +196,7 @@ Asks the server for the current position of all streams.
|
||||
|
||||
#### USER_SYNC (C)
|
||||
|
||||
A user has started or stopped syncing on this process.
|
||||
A user has started or stopped syncing
|
||||
|
||||
#### CLEAR_USER_SYNC (C)
|
||||
|
||||
@@ -223,6 +216,10 @@ Asks the server for the current position of all streams.
|
||||
|
||||
Inform the server a cache should be invalidated
|
||||
|
||||
#### SYNC (S, C)
|
||||
|
||||
Used exclusively in tests
|
||||
|
||||
### REMOTE_SERVER_UP (S, C)
|
||||
|
||||
Inform other processes that a remote server may have come back online.
|
||||
@@ -241,12 +238,12 @@ Each individual cache invalidation results in a row being sent down
|
||||
replication, which includes the cache name (the name of the function)
|
||||
and they key to invalidate. For example:
|
||||
|
||||
> RDATA caches master 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
|
||||
> RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
|
||||
|
||||
Alternatively, an entire cache can be invalidated by sending down a `null`
|
||||
instead of the key. For example:
|
||||
|
||||
> RDATA caches master 550953772 ["get_user_by_id", null, 1550574873252]
|
||||
> RDATA caches 550953772 ["get_user_by_id", null, 1550574873252]
|
||||
|
||||
However, there are times when a number of caches need to be invalidated
|
||||
at the same time with the same key. To reduce traffic we batch those
|
||||
|
||||
@@ -120,7 +120,7 @@ Your home server configuration file needs the following extra keys:
|
||||
As an example, here is the relevant section of the config file for matrix.org:
|
||||
|
||||
turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
|
||||
turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons"
|
||||
turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
|
||||
turn_user_lifetime: 86400000
|
||||
turn_allow_guests: True
|
||||
|
||||
|
||||
@@ -286,8 +286,6 @@ Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/groups/.*$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/user/[^/]*/account_data/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/user/[^/]*/rooms/[^/]*/account_data/
|
||||
|
||||
Additionally, the following REST endpoints can be handled, but all requests must
|
||||
be routed to the same instance:
|
||||
|
||||
@@ -33,10 +33,6 @@ parts:
|
||||
python-version: python3
|
||||
python-packages:
|
||||
- '.[all]'
|
||||
- pip
|
||||
- setuptools
|
||||
- setuptools-scm
|
||||
- wheel
|
||||
build-packages:
|
||||
- libffi-dev
|
||||
- libturbojpeg0-dev
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
from .sorteddict import (
|
||||
SortedDict,
|
||||
SortedKeysView,
|
||||
SortedItemsView,
|
||||
SortedValuesView,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"SortedDict",
|
||||
"SortedKeysView",
|
||||
"SortedItemsView",
|
||||
"SortedValuesView",
|
||||
]
|
||||
@@ -1,124 +0,0 @@
|
||||
# stub for SortedDict. This is a lightly edited copy of
|
||||
# https://github.com/grantjenks/python-sortedcontainers/blob/eea42df1f7bad2792e8da77335ff888f04b9e5ae/sortedcontainers/sorteddict.pyi
|
||||
# (from https://github.com/grantjenks/python-sortedcontainers/pull/107)
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Hashable,
|
||||
Iterator,
|
||||
Iterable,
|
||||
ItemsView,
|
||||
KeysView,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Type,
|
||||
TypeVar,
|
||||
Tuple,
|
||||
Union,
|
||||
ValuesView,
|
||||
overload,
|
||||
)
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_S = TypeVar("_S")
|
||||
_T_h = TypeVar("_T_h", bound=Hashable)
|
||||
_KT = TypeVar("_KT", bound=Hashable) # Key type.
|
||||
_VT = TypeVar("_VT") # Value type.
|
||||
_KT_co = TypeVar("_KT_co", covariant=True, bound=Hashable)
|
||||
_VT_co = TypeVar("_VT_co", covariant=True)
|
||||
_SD = TypeVar("_SD", bound=SortedDict)
|
||||
_Key = Callable[[_T], Any]
|
||||
|
||||
class SortedDict(Dict[_KT, _VT]):
|
||||
@overload
|
||||
def __init__(self, **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def __init__(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def __init__(
|
||||
self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
|
||||
) -> None: ...
|
||||
@overload
|
||||
def __init__(self, __key: _Key[_KT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def __init__(
|
||||
self, __key: _Key[_KT], __map: Mapping[_KT, _VT], **kwargs: _VT
|
||||
) -> None: ...
|
||||
@overload
|
||||
def __init__(
|
||||
self, __key: _Key[_KT], __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
|
||||
) -> None: ...
|
||||
@property
|
||||
def key(self) -> Optional[_Key[_KT]]: ...
|
||||
@property
|
||||
def iloc(self) -> SortedKeysView[_KT]: ...
|
||||
def clear(self) -> None: ...
|
||||
def __delitem__(self, key: _KT) -> None: ...
|
||||
def __iter__(self) -> Iterator[_KT]: ...
|
||||
def __reversed__(self) -> Iterator[_KT]: ...
|
||||
def __setitem__(self, key: _KT, value: _VT) -> None: ...
|
||||
def _setitem(self, key: _KT, value: _VT) -> None: ...
|
||||
def copy(self: _SD) -> _SD: ...
|
||||
def __copy__(self: _SD) -> _SD: ...
|
||||
@classmethod
|
||||
@overload
|
||||
def fromkeys(cls, seq: Iterable[_T_h]) -> SortedDict[_T_h, None]: ...
|
||||
@classmethod
|
||||
@overload
|
||||
def fromkeys(cls, seq: Iterable[_T_h], value: _S) -> SortedDict[_T_h, _S]: ...
|
||||
def keys(self) -> SortedKeysView[_KT]: ...
|
||||
def items(self) -> SortedItemsView[_KT, _VT]: ...
|
||||
def values(self) -> SortedValuesView[_VT]: ...
|
||||
@overload
|
||||
def pop(self, key: _KT) -> _VT: ...
|
||||
@overload
|
||||
def pop(self, key: _KT, default: _T = ...) -> Union[_VT, _T]: ...
|
||||
def popitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||
def peekitem(self, index: int = ...) -> Tuple[_KT, _VT]: ...
|
||||
def setdefault(self, key: _KT, default: Optional[_VT] = ...) -> _VT: ...
|
||||
@overload
|
||||
def update(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
|
||||
@overload
|
||||
def update(self, **kwargs: _VT) -> None: ...
|
||||
def __reduce__(
|
||||
self,
|
||||
) -> Tuple[
|
||||
Type[SortedDict[_KT, _VT]], Tuple[Callable[[_KT], Any], List[Tuple[_KT, _VT]]],
|
||||
]: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def _check(self) -> None: ...
|
||||
def islice(
|
||||
self, start: Optional[int] = ..., stop: Optional[int] = ..., reverse=bool,
|
||||
) -> Iterator[_KT]: ...
|
||||
def bisect_left(self, value: _KT) -> int: ...
|
||||
def bisect_right(self, value: _KT) -> int: ...
|
||||
|
||||
class SortedKeysView(KeysView[_KT_co], Sequence[_KT_co]):
|
||||
@overload
|
||||
def __getitem__(self, index: int) -> _KT_co: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> List[_KT_co]: ...
|
||||
def __delitem__(self, index: Union[int, slice]) -> None: ...
|
||||
|
||||
class SortedItemsView( # type: ignore
|
||||
ItemsView[_KT_co, _VT_co], Sequence[Tuple[_KT_co, _VT_co]]
|
||||
):
|
||||
def __iter__(self) -> Iterator[Tuple[_KT_co, _VT_co]]: ...
|
||||
@overload
|
||||
def __getitem__(self, index: int) -> Tuple[_KT_co, _VT_co]: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> List[Tuple[_KT_co, _VT_co]]: ...
|
||||
def __delitem__(self, index: Union[int, slice]) -> None: ...
|
||||
|
||||
class SortedValuesView(ValuesView[_VT_co], Sequence[_VT_co]):
|
||||
@overload
|
||||
def __getitem__(self, index: int) -> _VT_co: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> List[_VT_co]: ...
|
||||
def __delitem__(self, index: Union[int, slice]) -> None: ...
|
||||
@@ -1,43 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains *incomplete* type hints for txredisapi.
|
||||
"""
|
||||
|
||||
from typing import List, Optional, Union
|
||||
|
||||
class RedisProtocol:
|
||||
def publish(self, channel: str, message: bytes): ...
|
||||
|
||||
class SubscriberProtocol:
|
||||
password: Optional[str]
|
||||
def subscribe(self, channels: Union[str, List[str]]): ...
|
||||
def connectionMade(self): ...
|
||||
def connectionLost(self, reason): ...
|
||||
|
||||
def lazyConnection(
|
||||
host: str = ...,
|
||||
port: int = ...,
|
||||
dbid: Optional[int] = ...,
|
||||
reconnect: bool = ...,
|
||||
charset: str = ...,
|
||||
password: Optional[str] = ...,
|
||||
connectTimeout: Optional[int] = ...,
|
||||
replyTimeout: Optional[int] = ...,
|
||||
convertNumbers: bool = ...,
|
||||
) -> RedisProtocol: ...
|
||||
|
||||
class SubscriberFactory:
|
||||
def buildProtocol(self, addr): ...
|
||||
@@ -36,7 +36,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.12.4"
|
||||
__version__ = "1.12.3"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -26,15 +26,16 @@ from twisted.internet import defer
|
||||
import synapse.logging.opentracing as opentracing
|
||||
import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.auth_blocking import AuthBlocking
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.constants import EventTypes, LimitBlockingTypes, Membership, UserTypes
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
ResourceLimitError,
|
||||
)
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import StateMap, UserID
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||
@@ -76,11 +77,7 @@ class Auth(object):
|
||||
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||
register_cache("cache", "token_cache", self.token_cache)
|
||||
|
||||
self._auth_blocking = AuthBlocking(self.hs)
|
||||
|
||||
self._account_validity = hs.config.account_validity
|
||||
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
|
||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, room_version: str, event, context, do_sig_check=True):
|
||||
@@ -194,7 +191,7 @@ class Auth(object):
|
||||
opentracing.set_tag("authenticated_entity", user_id)
|
||||
opentracing.set_tag("appservice_id", app_service.id)
|
||||
|
||||
if ip_addr and self._track_appservice_user_ips:
|
||||
if ip_addr and self.hs.config.track_appservice_user_ips:
|
||||
yield self.store.insert_client_ip(
|
||||
user_id=user_id,
|
||||
access_token=access_token,
|
||||
@@ -457,7 +454,7 @@ class Auth(object):
|
||||
# access_tokens include a nonce for uniqueness: any value is acceptable
|
||||
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||
|
||||
v.verify(macaroon, self._macaroon_secret_key)
|
||||
v.verify(macaroon, self.hs.config.macaroon_secret_key)
|
||||
|
||||
def _verify_expiry(self, caveat):
|
||||
prefix = "time < "
|
||||
@@ -540,7 +537,8 @@ class Auth(object):
|
||||
|
||||
return defer.succeed(auth_ids)
|
||||
|
||||
async def check_can_change_room_list(self, room_id: str, user: UserID):
|
||||
@defer.inlineCallbacks
|
||||
def check_can_change_room_list(self, room_id: str, user: UserID):
|
||||
"""Determine whether the user is allowed to edit the room's entry in the
|
||||
published room list.
|
||||
|
||||
@@ -549,17 +547,17 @@ class Auth(object):
|
||||
user
|
||||
"""
|
||||
|
||||
is_admin = await self.is_server_admin(user)
|
||||
is_admin = yield self.is_server_admin(user)
|
||||
if is_admin:
|
||||
return True
|
||||
|
||||
user_id = user.to_string()
|
||||
await self.check_user_in_room(room_id, user_id)
|
||||
yield self.check_user_in_room(room_id, user_id)
|
||||
|
||||
# We currently require the user is a "moderator" in the room. We do this
|
||||
# by checking if they would (theoretically) be able to change the
|
||||
# m.room.canonical_alias events
|
||||
power_level_event = await self.state.get_current_state(
|
||||
power_level_event = yield self.state.get_current_state(
|
||||
room_id, EventTypes.PowerLevels, ""
|
||||
)
|
||||
|
||||
@@ -666,5 +664,71 @@ class Auth(object):
|
||||
% (user_id, room_id),
|
||||
)
|
||||
|
||||
def check_auth_blocking(self, *args, **kwargs):
|
||||
return self._auth_blocking.check_auth_blocking(*args, **kwargs)
|
||||
@defer.inlineCallbacks
|
||||
def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
Args:
|
||||
user_id(str|None): If present, checks for presence against existing
|
||||
MAU cohort
|
||||
|
||||
threepid(dict|None): If present, checks for presence against configured
|
||||
reserved threepid. Used in cases where the user is trying register
|
||||
with a MAU blocked server, normally they would be rejected but their
|
||||
threepid is on the reserved list. user_id and
|
||||
threepid should never be set at the same time.
|
||||
|
||||
user_type(str|None): If present, is used to decide whether to check against
|
||||
certain blocking reasons like MAU.
|
||||
"""
|
||||
|
||||
# Never fail an auth check for the server notices users or support user
|
||||
# This can be a problem where event creation is prohibited due to blocking
|
||||
if user_id is not None:
|
||||
if user_id == self.hs.config.server_notices_mxid:
|
||||
return
|
||||
if (yield self.store.is_support_user(user_id)):
|
||||
return
|
||||
|
||||
if self.hs.config.hs_disabled:
|
||||
raise ResourceLimitError(
|
||||
403,
|
||||
self.hs.config.hs_disabled_message,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=self.hs.config.admin_contact,
|
||||
limit_type=LimitBlockingTypes.HS_DISABLED,
|
||||
)
|
||||
if self.hs.config.limit_usage_by_mau is True:
|
||||
assert not (user_id and threepid)
|
||||
|
||||
# If the user is already part of the MAU cohort or a trial user
|
||||
if user_id:
|
||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp:
|
||||
return
|
||||
|
||||
is_trial = yield self.store.is_trial_user(user_id)
|
||||
if is_trial:
|
||||
return
|
||||
elif threepid:
|
||||
# If the user does not exist yet, but is signing up with a
|
||||
# reserved threepid then pass auth check
|
||||
if is_threepid_reserved(
|
||||
self.hs.config.mau_limits_reserved_threepids, threepid
|
||||
):
|
||||
return
|
||||
elif user_type == UserTypes.SUPPORT:
|
||||
# If the user does not exist yet and is of type "support",
|
||||
# allow registration. Support users are excluded from MAU checks.
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
if current_mau >= self.hs.config.max_mau_value:
|
||||
raise ResourceLimitError(
|
||||
403,
|
||||
"Monthly Active User Limit Exceeded",
|
||||
admin_contact=self.hs.config.admin_contact,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER,
|
||||
)
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import LimitBlockingTypes, UserTypes
|
||||
from synapse.api.errors import Codes, ResourceLimitError
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuthBlocking(object):
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
self._hs_disabled = hs.config.hs_disabled
|
||||
self._hs_disabled_message = hs.config.hs_disabled_message
|
||||
self._admin_contact = hs.config.admin_contact
|
||||
self._max_mau_value = hs.config.max_mau_value
|
||||
self._limit_usage_by_mau = hs.config.limit_usage_by_mau
|
||||
self._mau_limits_reserved_threepids = hs.config.mau_limits_reserved_threepids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
|
||||
"""Checks if the user should be rejected for some external reason,
|
||||
such as monthly active user limiting or global disable flag
|
||||
|
||||
Args:
|
||||
user_id(str|None): If present, checks for presence against existing
|
||||
MAU cohort
|
||||
|
||||
threepid(dict|None): If present, checks for presence against configured
|
||||
reserved threepid. Used in cases where the user is trying register
|
||||
with a MAU blocked server, normally they would be rejected but their
|
||||
threepid is on the reserved list. user_id and
|
||||
threepid should never be set at the same time.
|
||||
|
||||
user_type(str|None): If present, is used to decide whether to check against
|
||||
certain blocking reasons like MAU.
|
||||
"""
|
||||
|
||||
# Never fail an auth check for the server notices users or support user
|
||||
# This can be a problem where event creation is prohibited due to blocking
|
||||
if user_id is not None:
|
||||
if user_id == self._server_notices_mxid:
|
||||
return
|
||||
if (yield self.store.is_support_user(user_id)):
|
||||
return
|
||||
|
||||
if self._hs_disabled:
|
||||
raise ResourceLimitError(
|
||||
403,
|
||||
self._hs_disabled_message,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
admin_contact=self._admin_contact,
|
||||
limit_type=LimitBlockingTypes.HS_DISABLED,
|
||||
)
|
||||
if self._limit_usage_by_mau is True:
|
||||
assert not (user_id and threepid)
|
||||
|
||||
# If the user is already part of the MAU cohort or a trial user
|
||||
if user_id:
|
||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||
if timestamp:
|
||||
return
|
||||
|
||||
is_trial = yield self.store.is_trial_user(user_id)
|
||||
if is_trial:
|
||||
return
|
||||
elif threepid:
|
||||
# If the user does not exist yet, but is signing up with a
|
||||
# reserved threepid then pass auth check
|
||||
if is_threepid_reserved(self._mau_limits_reserved_threepids, threepid):
|
||||
return
|
||||
elif user_type == UserTypes.SUPPORT:
|
||||
# If the user does not exist yet and is of type "support",
|
||||
# allow registration. Support users are excluded from MAU checks.
|
||||
return
|
||||
# Else if there is no room in the MAU bucket, bail
|
||||
current_mau = yield self.store.get_monthly_active_count()
|
||||
if current_mau >= self._max_mau_value:
|
||||
raise ResourceLimitError(
|
||||
403,
|
||||
"Monthly Active User Limit Exceeded",
|
||||
admin_contact=self._admin_contact,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER,
|
||||
)
|
||||
@@ -97,8 +97,6 @@ class EventTypes(object):
|
||||
|
||||
Retention = "m.room.retention"
|
||||
|
||||
Presence = "m.presence"
|
||||
|
||||
|
||||
class RejectedReason(object):
|
||||
AUTH_ERROR = "auth_error"
|
||||
|
||||
@@ -270,7 +270,7 @@ def start(hs, listeners=None):
|
||||
|
||||
# Start the tracer
|
||||
synapse.logging.opentracing.init_tracer( # type: ignore[attr-defined] # noqa
|
||||
hs
|
||||
hs.config
|
||||
)
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
@@ -316,7 +316,7 @@ def setup_sentry(hs):
|
||||
scope.set_tag("matrix_server_name", hs.config.server_name)
|
||||
|
||||
app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver"
|
||||
name = hs.get_instance_name()
|
||||
name = hs.config.worker_name if hs.config.worker_name else "master"
|
||||
scope.set_tag("worker_app", app)
|
||||
scope.set_tag("worker_name", name)
|
||||
|
||||
|
||||
@@ -17,9 +17,6 @@
|
||||
import contextlib
|
||||
import logging
|
||||
import sys
|
||||
from typing import Dict, Iterable
|
||||
|
||||
from typing_extensions import ContextManager
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import NoResource
|
||||
@@ -41,14 +38,14 @@ from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.federation import send_queue
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.handlers.presence import BasePresenceHandler, get_interested_parties
|
||||
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
@@ -113,10 +110,6 @@ from synapse.rest.client.v1.voip import VoipRestServlet
|
||||
from synapse.rest.client.v2_alpha import groups, sync, user_directory
|
||||
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
|
||||
from synapse.rest.client.v2_alpha.account_data import (
|
||||
AccountDataServlet,
|
||||
RoomAccountDataServlet,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
|
||||
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
|
||||
from synapse.rest.client.versions import VersionsRestServlet
|
||||
@@ -127,7 +120,6 @@ from synapse.storage.data_stores.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.data_stores.main.presence import UserPresenceState
|
||||
from synapse.storage.data_stores.main.ui_auth import UIAuthWorkerStore
|
||||
from synapse.storage.data_stores.main.user_directory import UserDirectoryStore
|
||||
from synapse.types import ReadReceipt
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
@@ -229,32 +221,23 @@ class KeyUploadServlet(RestServlet):
|
||||
return 200, {"one_time_key_counts": result}
|
||||
|
||||
|
||||
class _NullContextManager(ContextManager[None]):
|
||||
"""A context manager which does nothing."""
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
|
||||
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
||||
|
||||
|
||||
class GenericWorkerPresence(BasePresenceHandler):
|
||||
class GenericWorkerPresence(object):
|
||||
def __init__(self, hs):
|
||||
super().__init__(hs)
|
||||
self.hs = hs
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
self._presence_enabled = hs.config.use_presence
|
||||
|
||||
# The number of ongoing syncs on this process, by user id.
|
||||
# Empty if _presence_enabled is false.
|
||||
self._user_to_num_current_syncs = {} # type: Dict[str, int]
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.user_to_num_current_syncs = {}
|
||||
self.clock = hs.get_clock()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.instance_id = hs.get_instance_id()
|
||||
|
||||
active_presence = self.store.take_presence_startup_info()
|
||||
self.user_to_current_state = {state.user_id: state for state in active_presence}
|
||||
|
||||
# user_id -> last_sync_ms. Lists the users that have stopped syncing
|
||||
# but we haven't notified the master of that yet
|
||||
self.users_going_offline = {}
|
||||
@@ -272,13 +255,13 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
)
|
||||
|
||||
def _on_shutdown(self):
|
||||
if self._presence_enabled:
|
||||
if self.hs.config.use_presence:
|
||||
self.hs.get_tcp_replication().send_command(
|
||||
ClearUserSyncsCommand(self.instance_id)
|
||||
)
|
||||
|
||||
def send_user_sync(self, user_id, is_syncing, last_sync_ms):
|
||||
if self._presence_enabled:
|
||||
if self.hs.config.use_presence:
|
||||
self.hs.get_tcp_replication().send_user_sync(
|
||||
self.instance_id, user_id, is_syncing, last_sync_ms
|
||||
)
|
||||
@@ -320,33 +303,28 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
# TODO Hows this supposed to work?
|
||||
return defer.succeed(None)
|
||||
|
||||
async def user_syncing(
|
||||
self, user_id: str, affect_presence: bool
|
||||
) -> ContextManager[None]:
|
||||
"""Record that a user is syncing.
|
||||
get_states = __func__(PresenceHandler.get_states)
|
||||
get_state = __func__(PresenceHandler.get_state)
|
||||
current_state_for_users = __func__(PresenceHandler.current_state_for_users)
|
||||
|
||||
Called by the sync and events servlets to record that a user has connected to
|
||||
this worker and is waiting for some events.
|
||||
"""
|
||||
if not affect_presence or not self._presence_enabled:
|
||||
return _NullContextManager()
|
||||
def user_syncing(self, user_id, affect_presence):
|
||||
if affect_presence:
|
||||
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
||||
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
|
||||
curr_sync = self._user_to_num_current_syncs.get(user_id, 0)
|
||||
self._user_to_num_current_syncs[user_id] = curr_sync + 1
|
||||
|
||||
# If we went from no in flight sync to some, notify replication
|
||||
if self._user_to_num_current_syncs[user_id] == 1:
|
||||
self.mark_as_coming_online(user_id)
|
||||
# If we went from no in flight sync to some, notify replication
|
||||
if self.user_to_num_current_syncs[user_id] == 1:
|
||||
self.mark_as_coming_online(user_id)
|
||||
|
||||
def _end():
|
||||
# We check that the user_id is in user_to_num_current_syncs because
|
||||
# user_to_num_current_syncs may have been cleared if we are
|
||||
# shutting down.
|
||||
if user_id in self._user_to_num_current_syncs:
|
||||
self._user_to_num_current_syncs[user_id] -= 1
|
||||
if affect_presence and user_id in self.user_to_num_current_syncs:
|
||||
self.user_to_num_current_syncs[user_id] -= 1
|
||||
|
||||
# If we went from one in flight sync to non, notify replication
|
||||
if self._user_to_num_current_syncs[user_id] == 0:
|
||||
if self.user_to_num_current_syncs[user_id] == 0:
|
||||
self.mark_as_going_offline(user_id)
|
||||
|
||||
@contextlib.contextmanager
|
||||
@@ -356,7 +334,7 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
finally:
|
||||
_end()
|
||||
|
||||
return _user_syncing()
|
||||
return defer.succeed(_user_syncing())
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def notify_from_replication(self, states, stream_id):
|
||||
@@ -391,12 +369,15 @@ class GenericWorkerPresence(BasePresenceHandler):
|
||||
stream_id = token
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
|
||||
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
|
||||
return [
|
||||
user_id
|
||||
for user_id, count in self._user_to_num_current_syncs.items()
|
||||
if count > 0
|
||||
]
|
||||
def get_currently_syncing_users(self):
|
||||
if self.hs.config.use_presence:
|
||||
return [
|
||||
user_id
|
||||
for user_id, count in self.user_to_num_current_syncs.items()
|
||||
if count > 0
|
||||
]
|
||||
else:
|
||||
return set()
|
||||
|
||||
|
||||
class GenericWorkerTyping(object):
|
||||
@@ -413,6 +394,12 @@ class GenericWorkerTyping(object):
|
||||
# map room IDs to sets of users currently typing
|
||||
self._room_typing = {}
|
||||
|
||||
def stream_positions(self):
|
||||
# We must update this typing token from the response of the previous
|
||||
# sync. In particular, the stream id may "reset" back to zero/a low
|
||||
# value which we *must* use for the next replication request.
|
||||
return {"typing": self._latest_room_serial}
|
||||
|
||||
def process_replication_rows(self, token, rows):
|
||||
if self._latest_room_serial > token:
|
||||
# The master has gone backwards. To prevent inconsistent data, just
|
||||
@@ -434,7 +421,6 @@ class GenericWorkerSlavedStore(
|
||||
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||
# rather than going via the correct worker.
|
||||
UserDirectoryStore,
|
||||
UIAuthWorkerStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedReceiptsStore,
|
||||
@@ -515,8 +501,6 @@ class GenericWorkerServer(HomeServer):
|
||||
ProfileDisplaynameRestServlet(self).register(resource)
|
||||
ProfileRestServlet(self).register(resource)
|
||||
KeyUploadServlet(self).register(resource)
|
||||
AccountDataServlet(self).register(resource)
|
||||
RoomAccountDataServlet(self).register(resource)
|
||||
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
@@ -635,7 +619,8 @@ class GenericWorkerReplicationHandler(ReplicationDataHandler):
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.typing_handler = hs.get_typing_handler()
|
||||
self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence
|
||||
# NB this is a SynchrotronPresence, not a normal PresenceHandler
|
||||
self.presence_handler = hs.get_presence_handler()
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
self.notify_pushers = hs.config.start_pushers
|
||||
@@ -646,11 +631,20 @@ class GenericWorkerReplicationHandler(ReplicationDataHandler):
|
||||
else:
|
||||
self.send_handler = None
|
||||
|
||||
async def on_rdata(self, stream_name, instance_name, token, rows):
|
||||
await super().on_rdata(stream_name, instance_name, token, rows)
|
||||
await self._process_and_notify(stream_name, instance_name, token, rows)
|
||||
async def on_rdata(self, stream_name, token, rows):
|
||||
await super(GenericWorkerReplicationHandler, self).on_rdata(
|
||||
stream_name, token, rows
|
||||
)
|
||||
await self.process_and_notify(stream_name, token, rows)
|
||||
|
||||
async def _process_and_notify(self, stream_name, instance_name, token, rows):
|
||||
def get_streams_to_replicate(self):
|
||||
args = super(GenericWorkerReplicationHandler, self).get_streams_to_replicate()
|
||||
args.update(self.typing_handler.stream_positions())
|
||||
if self.send_handler:
|
||||
args.update(self.send_handler.stream_positions())
|
||||
return args
|
||||
|
||||
async def process_and_notify(self, stream_name, token, rows):
|
||||
try:
|
||||
if self.send_handler:
|
||||
await self.send_handler.process_replication_rows(
|
||||
@@ -784,6 +778,9 @@ class FederationSenderHandler(object):
|
||||
def wake_destination(self, server: str):
|
||||
self.federation_sender.wake_destination(server)
|
||||
|
||||
def stream_positions(self):
|
||||
return {"federation": self.federation_position}
|
||||
|
||||
async def process_replication_rows(self, stream_name, token, rows):
|
||||
# The federation stream contains things that we want to send out, e.g.
|
||||
# presence, typing, etc.
|
||||
@@ -944,22 +941,17 @@ def start(config_options):
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
hs = GenericWorkerServer(
|
||||
ss = GenericWorkerServer(
|
||||
config.server_name,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
)
|
||||
|
||||
setup_logging(hs, config, use_worker_options=True)
|
||||
|
||||
hs.setup()
|
||||
|
||||
# Ensure the replication streamer is always started in case we write to any
|
||||
# streams. Will no-op if no streams can be written to by this worker.
|
||||
hs.get_replication_streamer()
|
||||
setup_logging(ss, config, use_worker_options=True)
|
||||
|
||||
ss.setup()
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "startup", _base.start, hs, config.worker_listeners
|
||||
"before", "startup", _base.start, ss, config.worker_listeners
|
||||
)
|
||||
|
||||
_base.start_worker_reactor("synapse-generic-worker", config)
|
||||
|
||||
@@ -273,12 +273,6 @@ class SynapseHomeServer(HomeServer):
|
||||
def start_listening(self, listeners):
|
||||
config = self.get_config()
|
||||
|
||||
if config.redis_enabled:
|
||||
# If redis is enabled we connect via the replication command handler
|
||||
# in the same way as the workers (since we're effectively a client
|
||||
# rather than a server).
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
for listener in listeners:
|
||||
if listener["type"] == "http":
|
||||
self._listening_services.extend(self._listener_http(config, listener))
|
||||
|
||||
@@ -657,12 +657,6 @@ def read_config_files(config_files):
|
||||
for config_file in config_files:
|
||||
with open(config_file) as file_stream:
|
||||
yaml_config = yaml.safe_load(file_stream)
|
||||
|
||||
if not isinstance(yaml_config, dict):
|
||||
err = "File %r is empty or doesn't parse into a key-value map. IGNORING."
|
||||
print(err % (config_file,))
|
||||
continue
|
||||
|
||||
specified_config.update(yaml_config)
|
||||
|
||||
if "server_name" not in specified_config:
|
||||
|
||||
@@ -138,7 +138,7 @@ class DatabaseConfig(Config):
|
||||
database_path = config.get("database_path")
|
||||
|
||||
if multi_database_config and database_config:
|
||||
raise ConfigError("Can't specify both 'database' and 'databases' in config")
|
||||
raise ConfigError("Can't specify both 'database' and 'datbases' in config")
|
||||
|
||||
if multi_database_config:
|
||||
if database_path:
|
||||
|
||||
@@ -108,14 +108,9 @@ class EmailConfig(Config):
|
||||
if self.trusted_third_party_id_servers:
|
||||
# XXX: It's a little confusing that account_threepid_delegate_email is modified
|
||||
# both in RegistrationConfig and here. We should factor this bit out
|
||||
|
||||
first_trusted_identity_server = self.trusted_third_party_id_servers[0]
|
||||
|
||||
# trusted_third_party_id_servers does not contain a scheme whereas
|
||||
# account_threepid_delegate_email is expected to. Presume https
|
||||
self.account_threepid_delegate_email = (
|
||||
"https://" + first_trusted_identity_server
|
||||
) # type: Optional[str]
|
||||
self.account_threepid_delegate_email = self.trusted_third_party_id_servers[
|
||||
0
|
||||
] # type: Optional[str]
|
||||
self.using_identity_server_from_trusted_list = True
|
||||
else:
|
||||
raise ConfigError(
|
||||
|
||||
@@ -31,7 +31,6 @@ from .password import PasswordConfig
|
||||
from .password_auth_providers import PasswordAuthProviderConfig
|
||||
from .push import PushConfig
|
||||
from .ratelimiting import RatelimitConfig
|
||||
from .redis import RedisConfig
|
||||
from .registration import RegistrationConfig
|
||||
from .repository import ContentRepositoryConfig
|
||||
from .room_directory import RoomDirectoryConfig
|
||||
@@ -83,5 +82,4 @@ class HomeServerConfig(RootConfig):
|
||||
RoomDirectoryConfig,
|
||||
ThirdPartyRulesConfig,
|
||||
TracerConfig,
|
||||
RedisConfig,
|
||||
]
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.config._base import Config
|
||||
from synapse.python_dependencies import check_requirements
|
||||
|
||||
|
||||
class RedisConfig(Config):
|
||||
section = "redis"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
redis_config = config.get("redis", {})
|
||||
self.redis_enabled = redis_config.get("enabled", False)
|
||||
|
||||
if not self.redis_enabled:
|
||||
return
|
||||
|
||||
check_requirements("redis")
|
||||
|
||||
self.redis_host = redis_config.get("host", "localhost")
|
||||
self.redis_port = redis_config.get("port", 6379)
|
||||
self.redis_dbid = redis_config.get("dbid")
|
||||
self.redis_password = redis_config.get("password")
|
||||
@@ -224,11 +224,12 @@ class ContentRepositoryConfig(Config):
|
||||
#
|
||||
#media_storage_providers:
|
||||
# - module: file_system
|
||||
# # Whether to store newly uploaded local files
|
||||
# # Whether to write new local files.
|
||||
# store_local: false
|
||||
# # Whether to store newly downloaded remote files
|
||||
# # Whether to write new remote media
|
||||
# store_remote: false
|
||||
# # Whether to wait for successful storage for local uploads
|
||||
# # Whether to block upload requests waiting for write to this
|
||||
# # provider to complete
|
||||
# store_synchronous: false
|
||||
# config:
|
||||
# directory: /mnt/some/other/directory
|
||||
|
||||
@@ -248,32 +248,32 @@ class SAML2Config(Config):
|
||||
# remote:
|
||||
# - url: https://our_idp/metadata.xml
|
||||
#
|
||||
# # By default, the user has to go to our login page first. If you'd like
|
||||
# # to allow IdP-initiated login, set 'allow_unsolicited: true' in a
|
||||
# # 'service.sp' section:
|
||||
# #
|
||||
# #service:
|
||||
# # sp:
|
||||
# # allow_unsolicited: true
|
||||
# # By default, the user has to go to our login page first. If you'd like
|
||||
# # to allow IdP-initiated login, set 'allow_unsolicited: true' in a
|
||||
# # 'service.sp' section:
|
||||
# #
|
||||
# #service:
|
||||
# # sp:
|
||||
# # allow_unsolicited: true
|
||||
#
|
||||
# # The examples below are just used to generate our metadata xml, and you
|
||||
# # may well not need them, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
# # The examples below are just used to generate our metadata xml, and you
|
||||
# # may well not need them, depending on your setup. Alternatively you
|
||||
# # may need a whole lot more detail - see the pysaml2 docs!
|
||||
#
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
# description: ["My awesome SP", "en"]
|
||||
# name: ["Test SP", "en"]
|
||||
#
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
# organization:
|
||||
# name: Example com
|
||||
# display_name:
|
||||
# - ["Example co", "en"]
|
||||
# url: "http://example.com"
|
||||
#
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
# contact_person:
|
||||
# - given_name: Bob
|
||||
# sur_name: "the Sysadmin"
|
||||
# email_address": ["admin@example.com"]
|
||||
# contact_type": technical
|
||||
|
||||
# Instead of putting the config inline as above, you can specify a
|
||||
# separate pysaml2 configuration file:
|
||||
|
||||
@@ -507,17 +507,6 @@ class ServerConfig(Config):
|
||||
|
||||
self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
# information about whether an e-mail address is in use or not on this
|
||||
# homeserver, and instead return a 200 with a fake sid if this kind of error is
|
||||
# met, without sending anything.
|
||||
# This is a compromise between sending an email, which could be a spam vector,
|
||||
# and letting the client know which email address is bound to an account and
|
||||
# which one isn't.
|
||||
self.request_token_inhibit_3pid_errors = config.get(
|
||||
"request_token_inhibit_3pid_errors", False,
|
||||
)
|
||||
|
||||
def has_tls_listener(self) -> bool:
|
||||
return any(l["tls"] for l in self.listeners)
|
||||
|
||||
@@ -983,16 +972,6 @@ class ServerConfig(Config):
|
||||
# - shortest_max_lifetime: 3d
|
||||
# longest_max_lifetime: 1y
|
||||
# interval: 1d
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
# information about whether an e-mail address is in use or not on this
|
||||
# homeserver.
|
||||
# Note that for some endpoints the error situation is the e-mail already being
|
||||
# used, and for others the error is entering the e-mail being unused.
|
||||
# If this option is enabled, instead of returning an error, these endpoints will
|
||||
# act as if no error happened and return a fake session ID ('sid') to clients.
|
||||
#
|
||||
#request_token_inhibit_3pid_errors: true
|
||||
"""
|
||||
% locals()
|
||||
)
|
||||
|
||||
@@ -43,12 +43,6 @@ class SSOConfig(Config):
|
||||
),
|
||||
"sso_account_deactivated_template",
|
||||
)
|
||||
self.sso_auth_success_template = self.read_file(
|
||||
os.path.join(
|
||||
self.sso_redirect_confirm_template_dir, "sso_auth_success.html"
|
||||
),
|
||||
"sso_auth_success_template",
|
||||
)
|
||||
|
||||
self.sso_client_whitelist = sso_config.get("client_whitelist") or []
|
||||
|
||||
@@ -113,30 +107,6 @@ class SSOConfig(Config):
|
||||
#
|
||||
# * server_name: the homeserver's name.
|
||||
#
|
||||
# * HTML page which notifies the user that they are authenticating to confirm
|
||||
# an operation on their account during the user interactive authentication
|
||||
# process: 'sso_auth_confirm.html'.
|
||||
#
|
||||
# When rendering, this template is given the following variables:
|
||||
# * redirect_url: the URL the user is about to be redirected to. Needs
|
||||
# manual escaping (see
|
||||
# https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping).
|
||||
#
|
||||
# * description: the operation which the user is being asked to confirm
|
||||
#
|
||||
# * HTML page shown after a successful user interactive authentication session:
|
||||
# 'sso_auth_success.html'.
|
||||
#
|
||||
# Note that this page must include the JavaScript which notifies of a successful authentication
|
||||
# (see https://matrix.org/docs/spec/client_server/r0.6.0#fallback).
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# * HTML page shown during single sign-on if a deactivated user (according to Synapse's database)
|
||||
# attempts to login: 'sso_account_deactivated.html'.
|
||||
#
|
||||
# This template has no additional variables.
|
||||
#
|
||||
# You can see the default templates at:
|
||||
# https://github.com/matrix-org/synapse/tree/master/synapse/res/templates
|
||||
#
|
||||
|
||||
@@ -322,14 +322,11 @@ class _AsyncEventContextImpl(EventContext):
|
||||
self._current_state_ids = yield self._storage.state.get_state_ids_for_group(
|
||||
self.state_group
|
||||
)
|
||||
if self._event_state_key is not None:
|
||||
if self._prev_state_id and self._event_state_key is not None:
|
||||
self._prev_state_ids = dict(self._current_state_ids)
|
||||
|
||||
key = (self._event_type, self._event_state_key)
|
||||
if self._prev_state_id:
|
||||
self._prev_state_ids[key] = self._prev_state_id
|
||||
else:
|
||||
self._prev_state_ids.pop(key, None)
|
||||
self._prev_state_ids[key] = self._prev_state_id
|
||||
else:
|
||||
self._prev_state_ids = self._current_state_ids
|
||||
|
||||
|
||||
@@ -883,37 +883,18 @@ class FederationClient(FederationBase):
|
||||
|
||||
def get_public_rooms(
|
||||
self,
|
||||
remote_server: str,
|
||||
limit: Optional[int] = None,
|
||||
since_token: Optional[str] = None,
|
||||
search_filter: Optional[Dict] = None,
|
||||
include_all_networks: bool = False,
|
||||
third_party_instance_id: Optional[str] = None,
|
||||
destination,
|
||||
limit=None,
|
||||
since_token=None,
|
||||
search_filter=None,
|
||||
include_all_networks=False,
|
||||
third_party_instance_id=None,
|
||||
):
|
||||
"""Get the list of public rooms from a remote homeserver
|
||||
if destination == self.server_name:
|
||||
return
|
||||
|
||||
Args:
|
||||
remote_server: The name of the remote server
|
||||
limit: Maximum amount of rooms to return
|
||||
since_token: Used for result pagination
|
||||
search_filter: A filter dictionary to send the remote homeserver
|
||||
and filter the result set
|
||||
include_all_networks: Whether to include results from all third party instances
|
||||
third_party_instance_id: Whether to only include results from a specific third
|
||||
party instance
|
||||
|
||||
Returns:
|
||||
Deferred[Dict[str, Any]]: The response from the remote server, or None if
|
||||
`remote_server` is the same as the local server_name
|
||||
|
||||
Raises:
|
||||
HttpResponseException: There was an exception returned from the remote server
|
||||
SynapseException: M_FORBIDDEN when the remote server has disallowed publicRoom
|
||||
requests over federation
|
||||
|
||||
"""
|
||||
return self.transport_layer.get_public_rooms(
|
||||
remote_server,
|
||||
destination,
|
||||
limit,
|
||||
since_token,
|
||||
search_filter,
|
||||
@@ -976,13 +957,14 @@ class FederationClient(FederationBase):
|
||||
|
||||
return signed_events
|
||||
|
||||
async def forward_third_party_invite(self, destinations, room_id, event_dict):
|
||||
@defer.inlineCallbacks
|
||||
def forward_third_party_invite(self, destinations, room_id, event_dict):
|
||||
for destination in destinations:
|
||||
if destination == self.server_name:
|
||||
continue
|
||||
|
||||
try:
|
||||
await self.transport_layer.exchange_third_party_invite(
|
||||
yield self.transport_layer.exchange_third_party_invite(
|
||||
destination=destination, room_id=room_id, event_dict=event_dict
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -31,7 +31,6 @@ Events are replicated via a separate events stream.
|
||||
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from typing import Dict, List, Tuple, Type
|
||||
|
||||
from six import iteritems
|
||||
|
||||
@@ -57,35 +56,21 @@ class FederationRemoteSendQueue(object):
|
||||
self.notifier = hs.get_notifier()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
# Pending presence map user_id -> UserPresenceState
|
||||
self.presence_map = {} # type: Dict[str, UserPresenceState]
|
||||
|
||||
# Stream position -> list[user_id]
|
||||
self.presence_changed = SortedDict() # type: SortedDict[int, List[str]]
|
||||
self.presence_map = {} # Pending presence map user_id -> UserPresenceState
|
||||
self.presence_changed = SortedDict() # Stream position -> list[user_id]
|
||||
|
||||
# Stores the destinations we need to explicitly send presence to about a
|
||||
# given user.
|
||||
# Stream position -> (user_id, destinations)
|
||||
self.presence_destinations = (
|
||||
SortedDict()
|
||||
) # type: SortedDict[int, Tuple[str, List[str]]]
|
||||
self.presence_destinations = SortedDict()
|
||||
|
||||
# (destination, key) -> EDU
|
||||
self.keyed_edu = {} # type: Dict[Tuple[str, tuple], Edu]
|
||||
self.keyed_edu = {} # (destination, key) -> EDU
|
||||
self.keyed_edu_changed = SortedDict() # stream position -> (destination, key)
|
||||
|
||||
# stream position -> (destination, key)
|
||||
self.keyed_edu_changed = (
|
||||
SortedDict()
|
||||
) # type: SortedDict[int, Tuple[str, tuple]]
|
||||
self.edus = SortedDict() # stream position -> Edu
|
||||
|
||||
self.edus = SortedDict() # type: SortedDict[int, Edu]
|
||||
|
||||
# stream ID for the next entry into presence_changed/keyed_edu_changed/edus.
|
||||
self.pos = 1
|
||||
|
||||
# map from stream ID to the time that stream entry was generated, so that we
|
||||
# can clear out entries after a while
|
||||
self.pos_time = SortedDict() # type: SortedDict[int, int]
|
||||
self.pos_time = SortedDict()
|
||||
|
||||
# EVERYTHING IS SAD. In particular, python only makes new scopes when
|
||||
# we make a new function, so we need to make a new function so the inner
|
||||
@@ -173,10 +158,8 @@ class FederationRemoteSendQueue(object):
|
||||
for edu_key in self.keyed_edu_changed.values():
|
||||
live_keys.add(edu_key)
|
||||
|
||||
keys_to_del = [
|
||||
edu_key for edu_key in self.keyed_edu if edu_key not in live_keys
|
||||
]
|
||||
for edu_key in keys_to_del:
|
||||
to_del = [edu_key for edu_key in self.keyed_edu if edu_key not in live_keys]
|
||||
for edu_key in to_del:
|
||||
del self.keyed_edu[edu_key]
|
||||
|
||||
# Delete things out of edu map
|
||||
@@ -267,23 +250,19 @@ class FederationRemoteSendQueue(object):
|
||||
self._clear_queue_before_pos(token)
|
||||
|
||||
async def get_replication_rows(
|
||||
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
|
||||
) -> Tuple[List[Tuple[int, Tuple]], int, bool]:
|
||||
self, from_token, to_token, limit, federation_ack=None
|
||||
):
|
||||
"""Get rows to be sent over federation between the two tokens
|
||||
|
||||
Args:
|
||||
instance_name: the name of the current process
|
||||
from_token: the previous stream token: the starting point for fetching the
|
||||
updates
|
||||
to_token: the new stream token: the point to get updates up to
|
||||
target_row_count: a target for the number of rows to be returned.
|
||||
|
||||
Returns: a triplet `(updates, new_last_token, limited)`, where:
|
||||
* `updates` is a list of `(token, row)` entries.
|
||||
* `new_last_token` is the new position in stream.
|
||||
* `limited` is whether there are more updates to fetch.
|
||||
from_token (int)
|
||||
to_token(int)
|
||||
limit (int)
|
||||
federation_ack (int): Optional. The position where the worker is
|
||||
explicitly acknowledged it has handled. Allows us to drop
|
||||
data from before that point
|
||||
"""
|
||||
# TODO: Handle target_row_count.
|
||||
# TODO: Handle limit.
|
||||
|
||||
# To handle restarts where we wrap around
|
||||
if from_token > self.pos:
|
||||
@@ -291,7 +270,12 @@ class FederationRemoteSendQueue(object):
|
||||
|
||||
# list of tuple(int, BaseFederationRow), where the first is the position
|
||||
# of the federation stream.
|
||||
rows = [] # type: List[Tuple[int, BaseFederationRow]]
|
||||
rows = []
|
||||
|
||||
# There should be only one reader, so lets delete everything its
|
||||
# acknowledged its seen.
|
||||
if federation_ack:
|
||||
self._clear_queue_before_pos(federation_ack)
|
||||
|
||||
# Fetch changed presence
|
||||
i = self.presence_changed.bisect_right(from_token)
|
||||
@@ -348,11 +332,7 @@ class FederationRemoteSendQueue(object):
|
||||
# Sort rows based on pos
|
||||
rows.sort()
|
||||
|
||||
return (
|
||||
[(pos, (row.TypeId, row.to_data())) for pos, row in rows],
|
||||
to_token,
|
||||
False,
|
||||
)
|
||||
return [(pos, row.TypeId, row.to_data()) for pos, row in rows]
|
||||
|
||||
|
||||
class BaseFederationRow(object):
|
||||
@@ -361,7 +341,7 @@ class BaseFederationRow(object):
|
||||
Specifies how to identify, serialize and deserialize the different types.
|
||||
"""
|
||||
|
||||
TypeId = "" # Unique string that ids the type. Must be overriden in sub classes.
|
||||
TypeId = None # Unique string that ids the type. Must be overriden in sub classes.
|
||||
|
||||
@staticmethod
|
||||
def from_data(data):
|
||||
@@ -474,14 +454,10 @@ class EduRow(BaseFederationRow, namedtuple("EduRow", ("edu",))): # Edu
|
||||
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
|
||||
|
||||
|
||||
_rowtypes = (
|
||||
PresenceRow,
|
||||
PresenceDestinationsRow,
|
||||
KeyedEduRow,
|
||||
EduRow,
|
||||
) # type: Tuple[Type[BaseFederationRow], ...]
|
||||
|
||||
TypeToRow = {Row.TypeId: Row for Row in _rowtypes}
|
||||
TypeToRow = {
|
||||
Row.TypeId: Row
|
||||
for Row in (PresenceRow, PresenceDestinationsRow, KeyedEduRow, EduRow,)
|
||||
}
|
||||
|
||||
|
||||
ParsedFederationStreamData = namedtuple(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user