Compare commits
41 Commits
anoa/modul
...
rei/userdi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4fdd64799e | ||
|
|
d4df71d857 | ||
|
|
fc6c5ae213 | ||
|
|
0db4dc8132 | ||
|
|
d49f230249 | ||
|
|
48a637a6ff | ||
|
|
1552fa44db | ||
|
|
d448469ea7 | ||
|
|
461cdb631f | ||
|
|
04d091fbcb | ||
|
|
ac566f45f6 | ||
|
|
1f5473465d | ||
|
|
4953cd71df | ||
|
|
f54f877f27 | ||
|
|
3bf973edc7 | ||
|
|
121fce7500 | ||
|
|
63d87c08c8 | ||
|
|
d0fe417f5c | ||
|
|
de92fb6a28 | ||
|
|
003a25ae5c | ||
|
|
8b1af08c6e | ||
|
|
e7b559d2ca | ||
|
|
a1c9869394 | ||
|
|
5e21e15f96 | ||
|
|
edcf938173 | ||
|
|
c071cd5a0e | ||
|
|
d4eba4409f | ||
|
|
408f60540f | ||
|
|
023f215c68 | ||
|
|
f167b35de9 | ||
|
|
6326d744c9 | ||
|
|
ff155f7891 | ||
|
|
4bb26c95a9 | ||
|
|
e157c63f68 | ||
|
|
ce54477f6f | ||
|
|
caf43c3d7c | ||
|
|
3d060eae6c | ||
|
|
e7c3832ba6 | ||
|
|
be4ea209e8 | ||
|
|
88efc75bab | ||
|
|
9418344db4 |
4
.github/workflows/release-artifacts.yml
vendored
4
.github/workflows/release-artifacts.yml
vendored
@@ -4,13 +4,15 @@ name: Build release artifacts
|
||||
|
||||
on:
|
||||
# we build on PRs and develop to (hopefully) get early warning
|
||||
# of things breaking (but only build one set of debs)
|
||||
# of things breaking (but only build one set of debs). PRs skip
|
||||
# building wheels on macOS & ARM.
|
||||
pull_request:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
|
||||
# we do the full build on tags.
|
||||
tags: ["v*"]
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
|
||||
1
.github/workflows/tests.yml
vendored
1
.github/workflows/tests.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
|
||||
24
CHANGES.md
24
CHANGES.md
@@ -1,3 +1,25 @@
|
||||
Synapse 1.79.0 (2023-03-14)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.79.0rc2.
|
||||
|
||||
|
||||
Synapse 1.79.0rc2 (2023-03-13)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. ([\#15227](https://github.com/matrix-org/synapse/issues/15227))
|
||||
- Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. ([\#15248](https://github.com/matrix-org/synapse/issues/15248))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Refactor `filter_events_for_server`. ([\#15240](https://github.com/matrix-org/synapse/issues/15240))
|
||||
|
||||
|
||||
Synapse 1.79.0rc1 (2023-03-07)
|
||||
==============================
|
||||
|
||||
@@ -47,7 +69,7 @@ Improved Documentation
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044]
|
||||
- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044](https://github.com/matrix-org/synapse/issues/15044))
|
||||
- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093))
|
||||
- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189))
|
||||
- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137))
|
||||
|
||||
8
Cargo.lock
generated
8
Cargo.lock
generated
@@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.152"
|
||||
version = "1.0.155"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
|
||||
checksum = "71f2b4817415c6d4210bfe1c7bfcf4801b2d904cb4d0e1a8fdb651013c9e86b8"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.152"
|
||||
version = "1.0.155"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
|
||||
checksum = "d071a94a3fac4aff69d023a7f411e33f40f3483f8c5190b1953822b6b76d7630"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
1
changelog.d/14755.bugfix
Normal file
1
changelog.d/14755.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change.
|
||||
1
changelog.d/14756.bugfix
Normal file
1
changelog.d/14756.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change.
|
||||
1
changelog.d/14921.misc
Normal file
1
changelog.d/14921.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add additional functionality to declaring worker types when starting Complement in worker mode.
|
||||
1
changelog.d/15091.bugfix
Normal file
1
changelog.d/15091.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change.
|
||||
1
changelog.d/15222.misc
Normal file
1
changelog.d/15222.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve log lines when purging rooms.
|
||||
1
changelog.d/15229.misc
Normal file
1
changelog.d/15229.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add topic and name events to group of events that are batch persisted when creating a room.
|
||||
1
changelog.d/15230.misc
Normal file
1
changelog.d/15230.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve type hints.
|
||||
1
changelog.d/15231.misc
Normal file
1
changelog.d/15231.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve type hints.
|
||||
1
changelog.d/15232.bugfix
Normal file
1
changelog.d/15232.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged.
|
||||
1
changelog.d/15237.misc
Normal file
1
changelog.d/15237.misc
Normal file
@@ -0,0 +1 @@
|
||||
Move various module API callback registration methods to a dedicated class.
|
||||
1
changelog.d/15238.misc
Normal file
1
changelog.d/15238.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve type hints.
|
||||
1
changelog.d/15239.docker
Normal file
1
changelog.d/15239.docker
Normal file
@@ -0,0 +1 @@
|
||||
Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel.
|
||||
1
changelog.d/15244.misc
Normal file
1
changelog.d/15244.misc
Normal file
@@ -0,0 +1 @@
|
||||
Configure GitHub Actions for merge queues.
|
||||
1
changelog.d/15247.misc
Normal file
1
changelog.d/15247.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add schema comments about the `destinations` and `destination_rooms` tables.
|
||||
1
changelog.d/15252.misc
Normal file
1
changelog.d/15252.misc
Normal file
@@ -0,0 +1 @@
|
||||
Bump hiredis from 2.2.1 to 2.2.2.
|
||||
1
changelog.d/15253.misc
Normal file
1
changelog.d/15253.misc
Normal file
@@ -0,0 +1 @@
|
||||
Bump serde from 1.0.152 to 1.0.155.
|
||||
1
changelog.d/15254.misc
Normal file
1
changelog.d/15254.misc
Normal file
@@ -0,0 +1 @@
|
||||
Bump pysaml2 from 7.2.1 to 7.3.1.
|
||||
1
changelog.d/15255.misc
Normal file
1
changelog.d/15255.misc
Normal file
@@ -0,0 +1 @@
|
||||
Bump msgpack from 1.0.4 to 1.0.5.
|
||||
1
changelog.d/15256.misc
Normal file
1
changelog.d/15256.misc
Normal file
@@ -0,0 +1 @@
|
||||
Bump gitpython from 3.1.30 to 3.1.31.
|
||||
1
changelog.d/15257.misc
Normal file
1
changelog.d/15257.misc
Normal file
@@ -0,0 +1 @@
|
||||
Bump cryptography from 39.0.1 to 39.0.2.
|
||||
1
changelog.d/15262.misc
Normal file
1
changelog.d/15262.misc
Normal file
@@ -0,0 +1 @@
|
||||
Skip processing of auto-join room behaviour if there are not auto-join rooms configured.
|
||||
1
changelog.d/15266.misc
Normal file
1
changelog.d/15266.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove unused store method `_set_destination_retry_timings_emulated`.
|
||||
1
changelog.d/15272.misc
Normal file
1
changelog.d/15272.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove unused class `DirectTcpReplicationClientFactory`.
|
||||
12
debian/changelog
vendored
12
debian/changelog
vendored
@@ -1,3 +1,15 @@
|
||||
matrix-synapse-py3 (1.79.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Mar 2023 16:14:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 13 Mar 2023 12:54:21 +0000
|
||||
|
||||
matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.79.0rc1.
|
||||
|
||||
@@ -37,9 +37,24 @@ RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update -qq && apt-get install -yqq \
|
||||
build-essential git libffi-dev libssl-dev \
|
||||
build-essential curl git libffi-dev libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install rust and ensure its in the PATH.
|
||||
# (Rust may be needed to compile `cryptography`---which is one of poetry's
|
||||
# dependencies---on platforms that don't have a `cryptography` wheel.
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
|
||||
# set to true, so we expose it as a build-arg.
|
||||
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
||||
# synapse's dependencies.
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
|
||||
@@ -51,8 +51,7 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
# -z True if the length of string is zero.
|
||||
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
|
||||
export SYNAPSE_WORKER_TYPES="\
|
||||
event_persister, \
|
||||
event_persister, \
|
||||
event_persister:2, \
|
||||
background_worker, \
|
||||
frontend_proxy, \
|
||||
event_creator, \
|
||||
@@ -64,7 +63,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
synchrotron, \
|
||||
client_reader, \
|
||||
appservice, \
|
||||
pusher"
|
||||
pusher, \
|
||||
stream_writers=account_data+presence+receipts+to_device+typing"
|
||||
|
||||
fi
|
||||
log "Workers requested: $SYNAPSE_WORKER_TYPES"
|
||||
|
||||
@@ -19,8 +19,15 @@
|
||||
# The environment variables it reads are:
|
||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
|
||||
# below. Leave empty for no workers. Add a ':' and a number at the end to
|
||||
# multiply that worker. Append multiple worker types with '+' to merge the
|
||||
# worker types into a single worker. Add a name and a '=' to the front of a
|
||||
# worker type to give this instance a name in logs and nginx.
|
||||
# Examples:
|
||||
# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
|
||||
# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
|
||||
# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
|
||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||
# will be treated as Application Service registration files.
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
@@ -40,16 +47,33 @@
|
||||
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Set,
|
||||
SupportsIndex,
|
||||
)
|
||||
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||
|
||||
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
|
||||
# during processing with the name of the worker.
|
||||
WORKER_PLACEHOLDER_NAME = "placeholder_name"
|
||||
|
||||
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
|
||||
# Watching /_matrix/client needs a "client" listener
|
||||
# Watching /_matrix/federation needs a "federation" listener
|
||||
@@ -70,11 +94,13 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
|
||||
],
|
||||
"shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
|
||||
"shared_extra_conf": {
|
||||
"update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME
|
||||
},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"media_repository": {
|
||||
"app": "synapse.app.media_repository",
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["media"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/media/",
|
||||
@@ -87,7 +113,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
"enable_media_repo": False,
|
||||
"media_instance_running_background_jobs": "media_repository1",
|
||||
"media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME,
|
||||
},
|
||||
"worker_extra_conf": "enable_media_repo: true",
|
||||
},
|
||||
@@ -95,7 +121,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
"shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
|
||||
"shared_extra_conf": {
|
||||
"notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME
|
||||
},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_sender": {
|
||||
@@ -192,9 +220,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": [],
|
||||
"endpoint_patterns": [],
|
||||
# This worker cannot be sharded. Therefore there should only ever be one background
|
||||
# worker, and it should be named background_worker1
|
||||
"shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
|
||||
# This worker cannot be sharded. Therefore, there should only ever be one
|
||||
# background worker. This is enforced for the safety of your database.
|
||||
"shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"event_creator": {
|
||||
@@ -275,7 +303,7 @@ NGINX_LOCATION_CONFIG_BLOCK = """
|
||||
"""
|
||||
|
||||
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
||||
upstream {upstream_worker_type} {{
|
||||
upstream {upstream_worker_base_name} {{
|
||||
{body}
|
||||
}}
|
||||
"""
|
||||
@@ -326,7 +354,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
|
||||
def add_worker_roles_to_shared_config(
|
||||
shared_config: dict,
|
||||
worker_type: str,
|
||||
worker_types_set: Set[str],
|
||||
worker_name: str,
|
||||
worker_port: int,
|
||||
) -> None:
|
||||
@@ -334,22 +362,36 @@ def add_worker_roles_to_shared_config(
|
||||
append appropriate worker information to it for the current worker_type instance.
|
||||
|
||||
Args:
|
||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
||||
worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
|
||||
shared_config: The config dict that all worker instances share (after being
|
||||
converted to YAML)
|
||||
worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG).
|
||||
This list can be a single worker type or multiple.
|
||||
worker_name: The name of the worker instance.
|
||||
worker_port: The HTTP replication port that the worker instance is listening on.
|
||||
"""
|
||||
# The instance_map config field marks the workers that write to various replication streams
|
||||
# The instance_map config field marks the workers that write to various replication
|
||||
# streams
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
|
||||
# Worker-type specific sharding config
|
||||
if worker_type == "pusher":
|
||||
# This is a list of the stream_writers that there can be only one of. Events can be
|
||||
# sharded, and therefore doesn't belong here.
|
||||
singular_stream_writers = [
|
||||
"account_data",
|
||||
"presence",
|
||||
"receipts",
|
||||
"to_device",
|
||||
"typing",
|
||||
]
|
||||
|
||||
# Worker-type specific sharding config. Now a single worker can fulfill multiple
|
||||
# roles, check each.
|
||||
if "pusher" in worker_types_set:
|
||||
shared_config.setdefault("pusher_instances", []).append(worker_name)
|
||||
|
||||
elif worker_type == "federation_sender":
|
||||
if "federation_sender" in worker_types_set:
|
||||
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
|
||||
|
||||
elif worker_type == "event_persister":
|
||||
if "event_persister" in worker_types_set:
|
||||
# Event persisters write to the events stream, so we need to update
|
||||
# the list of event stream writers
|
||||
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
|
||||
@@ -362,19 +404,154 @@ def add_worker_roles_to_shared_config(
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
|
||||
# Update the list of stream writers
|
||||
# It's convenient that the name of the worker type is the same as the stream to write
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker_type, []
|
||||
).append(worker_name)
|
||||
# Update the list of stream writers. It's convenient that the name of the worker
|
||||
# type is the same as the stream to write. Iterate over the whole list in case there
|
||||
# is more than one.
|
||||
for worker in worker_types_set:
|
||||
if worker in singular_stream_writers:
|
||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||
worker, []
|
||||
).append(worker_name)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def merge_worker_template_configs(
|
||||
existing_dict: Dict[str, Any] | None,
|
||||
to_be_merged_dict: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
"""When given an existing dict of worker template configuration consisting with both
|
||||
dicts and lists, merge new template data from WORKERS_CONFIG(or create) and
|
||||
return new dict.
|
||||
|
||||
Args:
|
||||
existing_dict: Either an existing worker template or a fresh blank one.
|
||||
to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into
|
||||
existing_dict.
|
||||
Returns: The newly merged together dict values.
|
||||
"""
|
||||
new_dict: Dict[str, Any] = {}
|
||||
if not existing_dict:
|
||||
# It doesn't exist yet, just use the new dict(but take a copy not a reference)
|
||||
new_dict = to_be_merged_dict.copy()
|
||||
else:
|
||||
for i in to_be_merged_dict.keys():
|
||||
if (i == "endpoint_patterns") or (i == "listener_resources"):
|
||||
# merge the two lists, remove duplicates
|
||||
new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i]))
|
||||
elif i == "shared_extra_conf":
|
||||
# merge dictionary's, the worker name will be replaced later
|
||||
new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]}
|
||||
elif i == "worker_extra_conf":
|
||||
# There is only one worker type that has a 'worker_extra_conf' and it is
|
||||
# the media_repo. Since duplicate worker types on the same worker don't
|
||||
# work, this is fine.
|
||||
new_dict[i] = existing_dict[i] + to_be_merged_dict[i]
|
||||
else:
|
||||
# Everything else should be identical, like "app", which only works
|
||||
# because all apps are now generic_workers.
|
||||
new_dict[i] = to_be_merged_dict[i]
|
||||
return new_dict
|
||||
|
||||
|
||||
def insert_worker_name_for_worker_config(
|
||||
existing_dict: Dict[str, Any], worker_name: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Insert a given worker name into the worker's configuration dict.
|
||||
|
||||
Args:
|
||||
existing_dict: The worker_config dict that is imported into shared_config.
|
||||
worker_name: The name of the worker to insert.
|
||||
Returns: Copy of the dict with newly inserted worker name
|
||||
"""
|
||||
dict_to_edit = existing_dict.copy()
|
||||
for k, v in dict_to_edit["shared_extra_conf"].items():
|
||||
# Only proceed if it's the placeholder name string
|
||||
if v == WORKER_PLACEHOLDER_NAME:
|
||||
dict_to_edit["shared_extra_conf"][k] = worker_name
|
||||
return dict_to_edit
|
||||
|
||||
|
||||
def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]:
|
||||
"""
|
||||
Apply multiplier(if found) by returning a new expanded list with some basic error
|
||||
checking.
|
||||
|
||||
Args:
|
||||
worker_types: The unprocessed List of requested workers
|
||||
Returns:
|
||||
A new list with all requested workers expanded.
|
||||
"""
|
||||
# Checking performed:
|
||||
# 1. if worker:2 or more is declared, it will create additional workers up to number
|
||||
# 2. if worker:1, it will create a single copy of this worker as if no number was
|
||||
# given
|
||||
# 3. if worker:0 is declared, this worker will be ignored. This is to allow for
|
||||
# scripting and automated expansion and is intended behaviour.
|
||||
# 4. if worker:NaN or is a negative number, it will error and log it.
|
||||
new_worker_types = []
|
||||
for worker_type in worker_types:
|
||||
if ":" in worker_type:
|
||||
worker_type_components = split_and_strip_string(worker_type, ":", 1)
|
||||
worker_count = 0
|
||||
# Should only be 2 components, a type of worker(s) and an integer as a
|
||||
# string. Cast the number as an int then it can be used as a counter.
|
||||
try:
|
||||
worker_count = int(worker_type_components[1])
|
||||
except ValueError:
|
||||
error(
|
||||
f"Bad number in worker count for '{worker_type}': "
|
||||
f"'{worker_type_components[1]}' is not an integer"
|
||||
)
|
||||
|
||||
# As long as there are more than 0, we add one to the list to make below.
|
||||
for _ in range(worker_count):
|
||||
new_worker_types.append(worker_type_components[0])
|
||||
|
||||
else:
|
||||
# If it's not a real worker_type, it will error out later.
|
||||
new_worker_types.append(worker_type)
|
||||
return new_worker_types
|
||||
|
||||
|
||||
def is_sharding_allowed_for_worker_type(worker_type: str) -> bool:
|
||||
"""Helper to check to make sure worker types that cannot have multiples do not.
|
||||
|
||||
Args:
|
||||
worker_type: The type of worker to check against.
|
||||
Returns: True if allowed, False if not
|
||||
"""
|
||||
return worker_type not in [
|
||||
"background_worker",
|
||||
"account_data",
|
||||
"presence",
|
||||
"receipts",
|
||||
"typing",
|
||||
"to_device",
|
||||
]
|
||||
|
||||
|
||||
def split_and_strip_string(
|
||||
given_string: str, split_char: str, max_split: SupportsIndex = -1
|
||||
) -> List[str]:
|
||||
"""
|
||||
Helper to split a string on split_char and strip whitespace from each end of each
|
||||
element.
|
||||
Args:
|
||||
given_string: The string to split
|
||||
split_char: The character to split the string on
|
||||
max_split: kwarg for split() to limit how many times the split() happens
|
||||
Returns:
|
||||
A List of strings
|
||||
"""
|
||||
# Removes whitespace from ends of result strings before adding to list. Allow for
|
||||
# overriding 'maxsplit' kwarg, default being -1 to signify no maximum.
|
||||
return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]
|
||||
|
||||
|
||||
def generate_base_homeserver_config() -> None:
|
||||
@@ -389,29 +566,153 @@ def generate_base_homeserver_config() -> None:
|
||||
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
||||
|
||||
|
||||
def parse_worker_types(
|
||||
requested_worker_types: List[str],
|
||||
) -> Dict[str, Set[str]]:
|
||||
"""Read the desired list of requested workers and prepare the data for use in
|
||||
generating worker config files while also checking for potential gotchas.
|
||||
|
||||
Args:
|
||||
requested_worker_types: The list formed from the split environment variable
|
||||
containing the unprocessed requests for workers.
|
||||
|
||||
Returns: A dict of worker names to set of worker types. Format:
|
||||
{'worker_name':
|
||||
{'worker_type', 'worker_type2'}
|
||||
}
|
||||
"""
|
||||
# A counter of worker_base_name -> int. Used for determining the name for a given
|
||||
# worker when generating its config file, as each worker's name is just
|
||||
# worker_base_name followed by instance number
|
||||
worker_base_name_counter: Dict[str, int] = defaultdict(int)
|
||||
|
||||
# Similar to above, but more finely grained. This is used to determine we don't have
|
||||
# more than a single worker for cases where multiples would be bad(e.g. presence).
|
||||
worker_type_shard_counter: Dict[str, int] = defaultdict(int)
|
||||
|
||||
# The final result of all this processing
|
||||
dict_to_return: Dict[str, Set[str]] = {}
|
||||
|
||||
# Handle any multipliers requested for given workers.
|
||||
multiple_processed_worker_types = apply_requested_multiplier_for_worker(
|
||||
requested_worker_types
|
||||
)
|
||||
|
||||
# Process each worker_type_string
|
||||
# Examples of expected formats:
|
||||
# - requested_name=type1+type2+type3
|
||||
# - synchrotron
|
||||
# - event_creator+event_persister
|
||||
for worker_type_string in multiple_processed_worker_types:
|
||||
# First, if a name is requested, use that — otherwise generate one.
|
||||
worker_base_name: str = ""
|
||||
if "=" in worker_type_string:
|
||||
# Split on "=", remove extra whitespace from ends then make list
|
||||
worker_type_split = split_and_strip_string(worker_type_string, "=")
|
||||
if len(worker_type_split) > 2:
|
||||
error(
|
||||
"There should only be one '=' in the worker type string. "
|
||||
f"Please fix: {worker_type_string}"
|
||||
)
|
||||
|
||||
# Assign the name
|
||||
worker_base_name = worker_type_split[0]
|
||||
|
||||
if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name):
|
||||
# Apply a fairly narrow regex to the worker names. Some characters
|
||||
# aren't safe for use in file paths or nginx configurations.
|
||||
# Don't allow to end with a number because we'll add a number
|
||||
# ourselves in a moment.
|
||||
error(
|
||||
"Invalid worker name; please choose a name consisting of "
|
||||
"alphanumeric letters, _ + -, but not ending with a digit: "
|
||||
f"{worker_base_name!r}"
|
||||
)
|
||||
|
||||
# Continue processing the remainder of the worker_type string
|
||||
# with the name override removed.
|
||||
worker_type_string = worker_type_split[1]
|
||||
|
||||
# Split the worker_type_string on "+", remove whitespace from ends then make
|
||||
# the list a set so it's deduplicated.
|
||||
worker_types_set: Set[str] = set(
|
||||
split_and_strip_string(worker_type_string, "+")
|
||||
)
|
||||
|
||||
if not worker_base_name:
|
||||
# No base name specified: generate one deterministically from set of
|
||||
# types
|
||||
worker_base_name = "+".join(sorted(worker_types_set))
|
||||
|
||||
# At this point, we have:
|
||||
# worker_base_name which is the name for the worker, without counter.
|
||||
# worker_types_set which is the set of worker types for this worker.
|
||||
|
||||
# Validate worker_type and make sure we don't allow sharding for a worker type
|
||||
# that doesn't support it. Will error and stop if it is a problem,
|
||||
# e.g. 'background_worker'.
|
||||
for worker_type in worker_types_set:
|
||||
# Verify this is a real defined worker type. If it's not, stop everything so
|
||||
# it can be fixed.
|
||||
if worker_type not in WORKERS_CONFIG:
|
||||
error(
|
||||
f"{worker_type} is an unknown worker type! Was found in "
|
||||
f"'{worker_type_string}'. Please fix!"
|
||||
)
|
||||
|
||||
if worker_type in worker_type_shard_counter:
|
||||
if not is_sharding_allowed_for_worker_type(worker_type):
|
||||
error(
|
||||
f"There can be only a single worker with {worker_type} "
|
||||
"type. Please recount and remove."
|
||||
)
|
||||
# Not in shard counter, must not have seen it yet, add it.
|
||||
worker_type_shard_counter[worker_type] += 1
|
||||
|
||||
# Generate the number for the worker using incrementing counter
|
||||
worker_base_name_counter[worker_base_name] += 1
|
||||
worker_number = worker_base_name_counter[worker_base_name]
|
||||
worker_name = f"{worker_base_name}{worker_number}"
|
||||
|
||||
if worker_number > 1:
|
||||
# If this isn't the first worker, check that we don't have a confusing
|
||||
# mixture of worker types with the same base name.
|
||||
first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"]
|
||||
if first_worker_with_base_name != worker_types_set:
|
||||
error(
|
||||
f"Can not use worker_name: '{worker_name}' for worker_type(s): "
|
||||
f"{worker_types_set!r}. It is already in use by "
|
||||
f"worker_type(s): {first_worker_with_base_name!r}"
|
||||
)
|
||||
|
||||
dict_to_return[worker_name] = worker_types_set
|
||||
|
||||
return dict_to_return
|
||||
|
||||
|
||||
def generate_worker_files(
|
||||
environ: Mapping[str, str], config_path: str, data_dir: str
|
||||
environ: Mapping[str, str],
|
||||
config_path: str,
|
||||
data_dir: str,
|
||||
requested_worker_types: Dict[str, Set[str]],
|
||||
) -> None:
|
||||
"""Read the desired list of workers from environment variables and generate
|
||||
shared homeserver, nginx and supervisord configs.
|
||||
"""Read the desired workers(if any) that is passed in and generate shared
|
||||
homeserver, nginx and supervisord configs.
|
||||
|
||||
Args:
|
||||
environ: os.environ instance.
|
||||
config_path: The location of the generated Synapse main worker config file.
|
||||
data_dir: The location of the synapse data directory. Where log and
|
||||
user-facing config files live.
|
||||
requested_worker_types: A Dict containing requested workers in the format of
|
||||
{'worker_name1': {'worker_type', ...}}
|
||||
"""
|
||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||
# into files at the correct indentation below.
|
||||
|
||||
# shared_config is the contents of a Synapse config file that will be shared amongst
|
||||
# the main Synapse process as well as all workers.
|
||||
# It is intended mainly for disabling functionality when certain workers are spun up,
|
||||
# and adding a replication listener.
|
||||
|
||||
# First read the original config file and extract the listeners block. Then we'll add
|
||||
# another listener for replication. Later we'll write out the result to the shared
|
||||
# config file.
|
||||
# First read the original config file and extract the listeners block. Then we'll
|
||||
# add another listener for replication. Later we'll write out the result to the
|
||||
# shared config file.
|
||||
listeners = [
|
||||
{
|
||||
"port": 9093,
|
||||
@@ -427,9 +728,9 @@ def generate_worker_files(
|
||||
listeners += original_listeners
|
||||
|
||||
# The shared homeserver config. The contents of which will be inserted into the
|
||||
# base shared worker jinja2 template.
|
||||
#
|
||||
# This config file will be passed to all workers, included Synapse's main process.
|
||||
# base shared worker jinja2 template. This config file will be passed to all
|
||||
# workers, included Synapse's main process. It is intended mainly for disabling
|
||||
# functionality when certain workers are spun up, and adding a replication listener.
|
||||
shared_config: Dict[str, Any] = {"listeners": listeners}
|
||||
|
||||
# List of dicts that describe workers.
|
||||
@@ -437,31 +738,20 @@ def generate_worker_files(
|
||||
# program blocks.
|
||||
worker_descriptors: List[Dict[str, Any]] = []
|
||||
|
||||
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
|
||||
# ports of each worker. For example:
|
||||
# Upstreams for load-balancing purposes. This dict takes the form of the worker
|
||||
# type to the ports of each worker. For example:
|
||||
# {
|
||||
# worker_type: {1234, 1235, ...}}
|
||||
# }
|
||||
# and will be used to construct 'upstream' nginx directives.
|
||||
nginx_upstreams: Dict[str, Set[int]] = {}
|
||||
|
||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
|
||||
# placed after the proxy_pass directive. The main benefit to representing this data as a
|
||||
# dict over a str is that we can easily deduplicate endpoints across multiple instances
|
||||
# of the same worker.
|
||||
#
|
||||
# An nginx site config that will be amended to depending on the workers that are
|
||||
# spun up. To be placed in /etc/nginx/conf.d.
|
||||
nginx_locations = {}
|
||||
|
||||
# Read the desired worker configuration from the environment
|
||||
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
||||
if not worker_types_env:
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
else:
|
||||
# Split type names by comma, ignoring whitespace.
|
||||
worker_types = [x.strip() for x in worker_types_env.split(",")]
|
||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what
|
||||
# will be placed after the proxy_pass directive. The main benefit to representing
|
||||
# this data as a dict over a str is that we can easily deduplicate endpoints
|
||||
# across multiple instances of the same worker. The final rendering will be combined
|
||||
# with nginx_upstreams and placed in /etc/nginx/conf.d.
|
||||
nginx_locations: Dict[str, str] = {}
|
||||
|
||||
# Create the worker configuration directory if it doesn't already exist
|
||||
os.makedirs("/conf/workers", exist_ok=True)
|
||||
@@ -469,66 +759,57 @@ def generate_worker_files(
|
||||
# Start worker ports from this arbitrary port
|
||||
worker_port = 18009
|
||||
|
||||
# A counter of worker_type -> int. Used for determining the name for a given
|
||||
# worker type when generating its config file, as each worker's name is just
|
||||
# worker_type + instance #
|
||||
worker_type_counter: Dict[str, int] = {}
|
||||
|
||||
# A list of internal endpoints to healthcheck, starting with the main process
|
||||
# which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
# For each worker type specified by the user, create config values
|
||||
for worker_type in worker_types:
|
||||
worker_config = WORKERS_CONFIG.get(worker_type)
|
||||
if worker_config:
|
||||
worker_config = worker_config.copy()
|
||||
else:
|
||||
error(worker_type + " is an unknown worker type! Please fix!")
|
||||
# Get the set of all worker types that we have configured
|
||||
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
||||
# Map locations to upstreams (corresponding to worker types) in Nginx
|
||||
# but only if we use the appropriate worker type
|
||||
for worker_type in all_worker_types_in_use:
|
||||
for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]:
|
||||
nginx_locations[endpoint_pattern] = f"http://{worker_type}"
|
||||
|
||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
||||
worker_type_counter[worker_type] = new_worker_count
|
||||
# For each worker type specified by the user, create config values and write it's
|
||||
# yaml config file
|
||||
for worker_name, worker_types_set in requested_worker_types.items():
|
||||
# The collected and processed data will live here.
|
||||
worker_config: Dict[str, Any] = {}
|
||||
|
||||
# Merge all worker config templates for this worker into a single config
|
||||
for worker_type in worker_types_set:
|
||||
copy_of_template_config = WORKERS_CONFIG[worker_type].copy()
|
||||
|
||||
# Merge worker type template configuration data. It's a combination of lists
|
||||
# and dicts, so use this helper.
|
||||
worker_config = merge_worker_template_configs(
|
||||
worker_config, copy_of_template_config
|
||||
)
|
||||
|
||||
# Replace placeholder names in the config template with the actual worker name.
|
||||
worker_config = insert_worker_name_for_worker_config(worker_config, worker_name)
|
||||
|
||||
# Name workers by their type concatenated with an incrementing number
|
||||
# e.g. federation_reader1
|
||||
worker_name = worker_type + str(new_worker_count)
|
||||
worker_config.update(
|
||||
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
|
||||
)
|
||||
|
||||
# Update the shared config with any worker-type specific options
|
||||
shared_config.update(worker_config["shared_extra_conf"])
|
||||
# Update the shared config with any worker_type specific options. The first of a
|
||||
# given worker_type needs to stay assigned and not be replaced.
|
||||
worker_config["shared_extra_conf"].update(shared_config)
|
||||
shared_config = worker_config["shared_extra_conf"]
|
||||
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
# Check if more than one instance of this worker type has been specified
|
||||
worker_type_total_count = worker_types.count(worker_type)
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
shared_config, worker_type, worker_name, worker_port
|
||||
shared_config, worker_types_set, worker_name, worker_port
|
||||
)
|
||||
|
||||
# Enable the worker in supervisord
|
||||
worker_descriptors.append(worker_config)
|
||||
|
||||
# Add nginx location blocks for this worker's endpoints (if any are defined)
|
||||
for pattern in worker_config["endpoint_patterns"]:
|
||||
# Determine whether we need to load-balance this worker
|
||||
if worker_type_total_count > 1:
|
||||
# Create or add to a load-balanced upstream for this worker
|
||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
||||
|
||||
# Upstreams are named after the worker_type
|
||||
upstream = "http://" + worker_type
|
||||
else:
|
||||
upstream = "http://localhost:%d" % (worker_port,)
|
||||
|
||||
# Note that this endpoint should proxy to this upstream
|
||||
nginx_locations[pattern] = upstream
|
||||
|
||||
# Write out the worker's logging config file
|
||||
|
||||
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
|
||||
|
||||
# Then a worker config file
|
||||
@@ -539,6 +820,10 @@ def generate_worker_files(
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
)
|
||||
|
||||
# Save this worker's port number to the correct nginx upstreams
|
||||
for worker_type in worker_types_set:
|
||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
||||
|
||||
worker_port += 1
|
||||
|
||||
# Build the nginx location config blocks
|
||||
@@ -551,15 +836,14 @@ def generate_worker_files(
|
||||
|
||||
# Determine the load-balancing upstreams to configure
|
||||
nginx_upstream_config = ""
|
||||
|
||||
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
|
||||
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += " server localhost:%d;\n" % (port,)
|
||||
body += f" server localhost:{port};\n"
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
upstream_worker_type=upstream_worker_type,
|
||||
upstream_worker_base_name=upstream_worker_base_name,
|
||||
body=body,
|
||||
)
|
||||
|
||||
@@ -580,7 +864,7 @@ def generate_worker_files(
|
||||
if reg_path.suffix.lower() in (".yaml", ".yml")
|
||||
]
|
||||
|
||||
workers_in_use = len(worker_types) > 0
|
||||
workers_in_use = len(requested_worker_types) > 0
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
@@ -678,13 +962,26 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
|
||||
generate_base_homeserver_config()
|
||||
else:
|
||||
log("Base homeserver config exists—not regenerating")
|
||||
# This script may be run multiple times (mostly by Complement, see note at top of file).
|
||||
# Don't re-configure workers in this instance.
|
||||
# This script may be run multiple times (mostly by Complement, see note at top of
|
||||
# file). Don't re-configure workers in this instance.
|
||||
mark_filepath = "/conf/workers_have_been_configured"
|
||||
if not os.path.exists(mark_filepath):
|
||||
# Collect and validate worker_type requests
|
||||
# Read the desired worker configuration from the environment
|
||||
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
||||
# Only process worker_types if they exist
|
||||
if not worker_types_env:
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
requested_worker_types: Dict[str, Any] = {}
|
||||
else:
|
||||
# Split type names by comma, ignoring whitespace.
|
||||
worker_types = split_and_strip_string(worker_types_env, ",")
|
||||
requested_worker_types = parse_worker_types(worker_types)
|
||||
|
||||
# Always regenerate all other config files
|
||||
log("Generating worker config files")
|
||||
generate_worker_files(environ, config_path, data_dir)
|
||||
generate_worker_files(environ, config_path, data_dir, requested_worker_types)
|
||||
|
||||
# Mark workers as being configured
|
||||
with open(mark_filepath, "w") as f:
|
||||
|
||||
8
mypy.ini
8
mypy.ini
@@ -48,9 +48,6 @@ warn_unused_ignores = False
|
||||
[mypy-synapse.util.caches.treecache]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.storage.database]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.util.caches.test_descriptors]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
@@ -74,11 +71,6 @@ ignore_missing_imports = True
|
||||
[mypy-msgpack]
|
||||
ignore_missing_imports = True
|
||||
|
||||
# Note: WIP stubs available at
|
||||
# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr
|
||||
[mypy-netaddr]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-parameterized.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
|
||||
382
poetry.lock
generated
382
poetry.lock
generated
@@ -352,35 +352,35 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "39.0.1"
|
||||
version = "39.0.2"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"},
|
||||
{file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"},
|
||||
{file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"},
|
||||
{file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5caeb8188c24888c90b5108a441c106f7faa4c4c075a2bcae438c6e8ca73cef"},
|
||||
{file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4789d1e3e257965e960232345002262ede4d094d1a19f4d3b52e48d4d8f3b885"},
|
||||
{file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"},
|
||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"},
|
||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"},
|
||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e"},
|
||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0"},
|
||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6"},
|
||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a"},
|
||||
{file = "cryptography-39.0.1.tar.gz", hash = "sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:2725672bb53bb92dc7b4150d233cd4b8c59615cd8288d495eaa86db00d4e5c06"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:23df8ca3f24699167daf3e23e51f7ba7334d504af63a94af468f468b975b7dd7"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:eb40fe69cfc6f5cdab9a5ebd022131ba21453cf7b8a7fd3631f45bbf52bed612"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc0521cce2c1d541634b19f3ac661d7a64f9555135e9d8af3980965be717fd4a"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd394c7896ed7821a6d13b24657c6a34b6e2650bd84ae063cf11ccffa4f1a97"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:e8a0772016feeb106efd28d4a328e77dc2edae84dfbac06061319fdb669ff828"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8f35c17bd4faed2bc7797d2a66cbb4f986242ce2e30340ab832e5d99ae60e011"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b49a88ff802e1993b7f749b1eeb31134f03c8d5c956e3c125c75558955cda536"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c682e736513db7d04349b4f6693690170f95aac449c56f97415c6980edef5"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:d7d84a512a59f4412ca8549b01f94be4161c94efc598bf09d027d67826beddc0"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-win32.whl", hash = "sha256:c43ac224aabcbf83a947eeb8b17eaf1547bce3767ee2d70093b461f31729a480"},
|
||||
{file = "cryptography-39.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:788b3921d763ee35dfdb04248d0e3de11e3ca8eb22e2e48fef880c42e1f3c8f9"},
|
||||
{file = "cryptography-39.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d15809e0dbdad486f4ad0979753518f47980020b7a34e9fc56e8be4f60702fac"},
|
||||
{file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:50cadb9b2f961757e712a9737ef33d89b8190c3ea34d0fb6675e00edbe35d074"},
|
||||
{file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:103e8f7155f3ce2ffa0049fe60169878d47a4364b277906386f8de21c9234aa1"},
|
||||
{file = "cryptography-39.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6236a9610c912b129610eb1a274bdc1350b5df834d124fa84729ebeaf7da42c3"},
|
||||
{file = "cryptography-39.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e944fe07b6f229f4c1a06a7ef906a19652bdd9fd54c761b0ff87e83ae7a30354"},
|
||||
{file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:35d658536b0a4117c885728d1a7032bdc9a5974722ae298d6c533755a6ee3915"},
|
||||
{file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:30b1d1bfd00f6fc80d11300a29f1d8ab2b8d9febb6ed4a38a76880ec564fae84"},
|
||||
{file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e029b844c21116564b8b61216befabca4b500e6816fa9f0ba49527653cae2108"},
|
||||
{file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fa507318e427169ade4e9eccef39e9011cdc19534f55ca2f36ec3f388c1f70f3"},
|
||||
{file = "cryptography-39.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8bc0008ef798231fac03fe7d26e82d601d15bd16f3afaad1c6113771566570f3"},
|
||||
{file = "cryptography-39.0.2.tar.gz", hash = "sha256:bc5b871e977c8ee5a1bbc42fa8d19bcc08baf0c51cbf1586b0e87a2694dde42f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -497,14 +497,14 @@ smmap = ">=3.0.1,<6"
|
||||
|
||||
[[package]]
|
||||
name = "gitpython"
|
||||
version = "3.1.30"
|
||||
description = "GitPython is a python library used to interact with Git repositories"
|
||||
version = "3.1.31"
|
||||
description = "GitPython is a Python library used to interact with Git repositories"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "GitPython-3.1.30-py3-none-any.whl", hash = "sha256:cd455b0000615c60e286208ba540271af9fe531fa6a87cc590a7298785ab2882"},
|
||||
{file = "GitPython-3.1.30.tar.gz", hash = "sha256:769c2d83e13f5d938b7688479da374c4e3d49f71549aaf462b646db9602ea6f8"},
|
||||
{file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"},
|
||||
{file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -513,101 +513,101 @@ typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""
|
||||
|
||||
[[package]]
|
||||
name = "hiredis"
|
||||
version = "2.2.1"
|
||||
version = "2.2.2"
|
||||
description = "Python wrapper for hiredis"
|
||||
category = "main"
|
||||
optional = true
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "hiredis-2.2.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:998ab35070dc81806a23be5de837466a51b25e739fb1a0d5313474d5bb29c829"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:70db8f514ebcb6f884497c4eee21d0350bbc4102e63502411f8e100cf3b7921e"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a57a4a33a78e94618d026fc68e853d3f71fa4a1d4da7a6e828e927819b001f2d"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:209b94fa473b39e174b665186cad73206ca849cf6e822900b761e83080f67b06"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:58e51d83b42fdcc29780897641b1dcb30c0e4d3c4f6d9d71d79b2cfec99b8eb7"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:706995fb1173fab7f12110fbad00bb95dd0453336f7f0b341b4ca7b1b9ff0bc7"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:812e27a9b20db967f942306267bcd8b1369d7c171831b6f45d22d75576cd01cd"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69c32d54ac1f6708145c77d79af12f7448ca1025a0bf912700ad1f0be511026a"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:96745c4cdca261a50bd70c01f14c6c352a48c4d6a78e2d422040fba7919eadef"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:943631a49d7746cd413acaf0b712d030a15f02671af94c54759ba3144351f97a"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:796b616478a5c1cac83e9e10fcd803e746e5a02461bfa7767aebae8b304e2124"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:341952a311654c39433c1e0d8d31c2a0c5864b2675ed159ed264ecaa5cfb225b"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6fbb1a56d455602bd6c276d5c316ae245111b2dc8158355112f2d905e7471c85"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-win32.whl", hash = "sha256:14f67987e1d55b197e46729d1497019228ad8c94427bb63500e6f217aa586ca5"},
|
||||
{file = "hiredis-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:ea011b3bfa37f2746737860c1e5ba198b63c9b4764e40b042aac7bd2c258938f"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:103bde304d558061c4ba1d7ff94351e761da753c28883fd68964f25080152dac"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6ba9f425739a55e1409fda5dafad7fdda91c6dcd2b111ba93bb7b53d90737506"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cb59a7535e0b8373f694ce87576c573f533438c5fbee450193333a22118f4a98"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afbddc82bbb2c4c405d9a49a056ffe6541f8ad3160df49a80573b399f94ba3a"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a386f00800b1b043b091b93850e02814a8b398952438a9d4895bd70f5c80a821"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fec7465caac7b0a36551abb37066221cabf59f776d78fdd58ff17669942b4b41"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd590dd7858d0107c37b438aa27bbcaa0ba77c5b8eda6ebab7acff0aa89f7d7"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1523ec56d711bee863aaaf4325cef4430da3143ec388e60465f47e28818016cd"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d4f6bbe599d255a504ef789c19e23118c654d256343c1ecdf7042fb4b4d0f7fa"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d77dbc13d55c1d45d6a203da910002fffd13fa310af5e9c5994959587a192789"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b2b847ea3f9af99e02c4c58b7cc6714e105c8d73705e5ff1132e9a249391f688"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:18135ecf28fc6577e71c0f8d8eb2f31e4783020a7d455571e7e5d2793374ce20"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:724aed63871bc386d6f28b5f4d15490d84934709f093e021c4abb785e72db5db"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-win32.whl", hash = "sha256:497a8837984ddfbf6f5a4c034c0107f2c5aaaebeebf34e2c6ab591acffce5f12"},
|
||||
{file = "hiredis-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:1776db8af168b22588ec10c3df674897b20cc6d25f093cd2724b8b26d7dac057"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:49a518b456403602775218062a4dd06bed42b26854ff1ff6784cfee2ef6fa347"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02118dc8545e2371448b9983a0041f12124eea907eb61858f2be8e7c1dfa1e43"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78f2a53149b116e0088f6eda720574f723fbc75189195aab8a7a2a591ca89cab"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e3b8f0eba6d88c2aec63e6d1e38960f8a25c01f9796d32993ffa1cfcf48744c"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38270042f40ed9e576966c603d06c984c80364b0d9ec86962a31551dae27b0cd"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a11250dd0521e9f729325b19ce9121df4cbb80ad3468cc21e56803e8380bc4b"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:595474e6c25f1c3c8ec67d587188e7dd47c492829b2c7c5ba1b17ee9e7e9a9ea"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8ad00a7621de8ef9ae1616cf24a53d48ad1a699b96668637559a8982d109a800"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a5e5e51faa7cd02444d4ee1eb59e316c08e974bcfa3a959cb790bc4e9bb616c5"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:0a9493bbc477436a3725e99cfcba768f416ab70ab92956e373d1a3b480b1e204"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:231e5836579fc75b25c6f9bb6213950ea3d39aadcfeb7f880211ca55df968342"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-win32.whl", hash = "sha256:2ed6c948648798b440a9da74db65cdd2ad22f38cf4687f5212df369031394591"},
|
||||
{file = "hiredis-2.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:c65f38418e35970d44f9b5a59533f0f60f14b9f91b712dba51092d2c74d4dcd1"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:2f6e80fb7cd4cc61af95ab2875801e4c36941a956c183297c3273cbfbbefa9d3"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:a54d2b3328a2305e0dfb257a4545053fdc64df0c64e0635982e191c846cc0456"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:33624903dfb629d6f7c17ed353b4b415211c29fd447f31e6bf03361865b97e68"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f4b92df1e69dc48411045d2117d1d27ec6b5f0dd2b6501759cea2f6c68d5618"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03c6a1f6bf2f64f40d076c997cdfcb8b3d1c9557dda6cb7bbad2c5c839921726"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af3071d33432960cba88ce4e4932b508ab3e13ce41431c2a1b2dc9a988f7627"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb3f56d371b560bf39fe45d29c24e3d819ae2399733e2c86394a34e76adab38"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5da26970c41084a2ac337a4f075301a78cffb0e0f3df5e98c3049fc95e10725c"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d87f90064106dfd7d2cc7baeb007a8ca289ee985f4bf64bb627c50cdc34208ed"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c233199b9f4dd43e2297577e32ba5fcd0378871a47207bc424d5e5344d030a3e"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:99b5bcadd5e029234f89d244b86bc8d21093be7ac26111068bebd92a4a95dc73"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ed79f65098c4643cb6ec4530b337535f00b58ea02e25180e3df15e9cc9da58dc"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c7fd6394779c9a3b324b65394deadb949311662f3770bd34f904b8c04328082c"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-win32.whl", hash = "sha256:bde0178e7e6c49e408b8d3a8c0ec8e69a23e8dc2ae29f87af2d74b21025385dc"},
|
||||
{file = "hiredis-2.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:6f5f469ba5ae613e4c652cdedfc723aa802329fcc2d65df1e9ab0ac0de34ad9e"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:e5945ef29a76ab792973bef1ffa2970d81dd22edb94dfa5d6cba48beb9f51962"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bad6e9a0e31678ee15ac3ef72e77c08177c86df05c37d2423ff3cded95131e51"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e57dfcd72f036cce9eab77bc533a932444459f7e54d96a555d25acf2501048be"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3afc76a012b907895e679d1e6bcc6394845d0cc91b75264711f8caf53d7b0f37"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a99c0d50d1a31be285c83301eff4b911dca16aac1c3fe1875c7d6f517a1e9fc4"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8849bc74473778c10377f82cf9a534e240734e2f9a92c181ef6d51b4e3d3eb2"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e199868fe78c2d175bbb7b88f5daf2eae4a643a62f03f8d6736f9832f04f88b"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0e98106a28fabb672bb014f6c4506cc67491e4cf9ac56d189cbb1e81a9a3e68"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0f2607e08dcb1c5d1e925c451facbfc357927acaa336a004552c32a6dd68e050"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:954abb363ed1d18dfb7510dbd89402cb7c21106307e04e2ee7bccf35a134f4dd"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0474ab858f5dd15be6b467d89ec14b4c287f53b55ca5455369c3a1a787ef3a24"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b90dd0adb1d659f8c94b32556198af1e61e38edd27fc7434d4b3b68ad4e51d37"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a5dac3ae05bc64b233f950edf37dce9c904aedbc7e18cfc2adfb98edb85da46"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-win32.whl", hash = "sha256:19666eb154b7155d043bf941e50d1640125f92d3294e2746df87639cc44a10e6"},
|
||||
{file = "hiredis-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:c702dd28d52656bb86f7a2a76ea9341ac434810871b51fcd6cd28c6d7490fbdf"},
|
||||
{file = "hiredis-2.2.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c604919bba041e4c4708ecb0fe6c7c8a92a7f1e886b0ae8d2c13c3e4abfc5eda"},
|
||||
{file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04c972593f26f4769e2be7058b7928179337593bcfc6a8b6bda87eea807b7cbf"},
|
||||
{file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42504e4058246536a9f477f450ab21275126fc5f094be5d5e5290c6de9d855f9"},
|
||||
{file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220b6ac9d3fce60d14ccc34f9790e20a50dc56b6fb747fc357600963c0cf6aca"},
|
||||
{file = "hiredis-2.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a16d81115128e6a9fc6904de051475be195f6c460c9515583dccfd407b16ff78"},
|
||||
{file = "hiredis-2.2.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:df6325aade17b1f86c8b87f6a1d9549a4184fda00e27e2fca0e5d2a987130365"},
|
||||
{file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcad9c9239845b29f149a895e7e99b8307889cecbfc37b69924c2dad1f4ae4e8"},
|
||||
{file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ccf6fc116795d76bca72aa301a33874c507f9e77402e857d298c73419b5ea3"},
|
||||
{file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:63f941e77c024be2a1451089e2fdbd5ff450ff0965f49948bbeb383aef1799ea"},
|
||||
{file = "hiredis-2.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2bb682785a37145b209f44f5d5290b0f9f4b56205542fc592d0f1b3d5ffdfcf0"},
|
||||
{file = "hiredis-2.2.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8fe289556264cb1a2efbcd3d6b3c55e059394ad01b6afa88151264137f85c352"},
|
||||
{file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96b079c53b6acd355edb6fe615270613f3f7ddc4159d69837ce15ec518925c40"},
|
||||
{file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82ad46d1140c5779cd9dfdafc35f47dd09dadff7654d8001c50bb283da82e7c9"},
|
||||
{file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17e9f363db56a8edb4eff936354cfa273197465bcd970922f3d292032eca87b0"},
|
||||
{file = "hiredis-2.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ae6b356ed166a0ec663a46b547c988815d2b0e5f2d0af31ef34a16cf3ce705d0"},
|
||||
{file = "hiredis-2.2.1.tar.gz", hash = "sha256:d9fbef7f9070055a7cc012ac965e3dbabbf2400b395649ea8d6016dc82a7d13a"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:ba6123ff137275e2f4c31fc74b93813fcbb79160d43f5357163e09638c7743de"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d995846acc8e3339fb7833cd19bf6f3946ff5157c8488a4df9c51cd119a36870"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82f869ca44bcafa37cd71cfa1429648fa354d6021dcd72f03a2f66bcb339c546"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa90a5ee7a7f30c3d72d3513914b8f51f953a71b8cbd52a241b6db6685e55645"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01e2e588392b5fdcc3a6aa0eb62a2eb2a142f829082fa4c3354228029d3aa1ce"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dac177a6ab8b4eb4d5e74978c29eef7cc9eef14086f814cb3893f7465578044"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb992e3f9753c5a0c637f333c2010d1ad702aebf2d730ee4d484f32b19bae97"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61c22fda5fc25d31bbced24a8322d33c5cb8cad9ba698634c16edb5b3e79a91"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9873898e26e50cd41415e9d1ea128bfdb60eb26abb4f5be28a4500fd7834dc0c"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2c18b00a382546e19bcda8b83dcca5b6e0dbc238d235723434405f48a18e8f77"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:8c3a6998f6f88d7ca4d082fd26525074df13162b274d7c64034784b6fdc56666"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0fc1f9a9791d028b2b8afa318ccff734c7fc8861d37a04ca9b3d27c9b05f9718"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f2cfd323f83985f2bed6ed013107873275025af270485b7d04c338bfb47bd14"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-win32.whl", hash = "sha256:55c7e9a9e05f8c0555bfba5c16d98492f8b6db650e56d0c35cc28aeabfc86020"},
|
||||
{file = "hiredis-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:eaff526c2fed31c971b0fa338a25237ae5513550ef75d0b85b9420ec778cca45"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:688b9b7458b4f3f452fea6ed062c04fa1fd9a69d9223d95c6cb052581aba553b"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:544d52fde3a8dac7854673eac20deca05214758193c01926ffbb0d57c6bf4ffe"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:990916e8b0b4eedddef787e73549b562f8c9e73a7fea82f9b8ff517806774ad0"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10dc34854e9acfb3e7cc4157606e2efcb497b1c6fca07bd6c3be34ae5e413f13"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c446a2007985ae49c2ecd946dd819dea72b931beb5f647ba08655a1a1e133fa8"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02b9f928dc6cd43ed0f0ffc1c75fb209fb180f004b7e2e19994805f998d247aa"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a355aff8dfa02ebfe67f0946dd706e490bddda9ea260afac9cdc43942310c53"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831461abe5b63e73719621a5f31d8fc175528a05dc09d5a8aa8ef565d6deefa4"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75349f7c8f77eb0fd33ede4575d1e5b0a902a8176a436bf03293d7fec4bd3894"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1eb39b34d15220095dc49ad1e1082580d35cd3b6d9741def52988b5075e4ff03"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a9b306f4e870747eea8b008dcba2e9f1e4acd12b333a684bc1cc120e633a280e"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:03dfb4ab7a2136ce1be305592553f102e1bd91a96068ab2778e3252aed20d9bc"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8bc89c7e33fecb083a199ade0131a34d20365a8c32239e218da57290987ca9a"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-win32.whl", hash = "sha256:ed44b3c711cecde920f238ac35f70ac08744f2079b6369655856e43944464a72"},
|
||||
{file = "hiredis-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:2e2f0ce3e8ab1314a52f562386220f6714fd24d7968a95528135ad04e88cc741"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:e7e61ab75b851aac2d6bc634d03738a242a6ef255a44178437b427c5ebac0a87"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9eb14339e399554bb436cc4628e8aaa3943adf7afcf34aba4cbd1e3e6b9ec7ec"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4ec57886f20f4298537cb1ab9dbda98594fb8d7c724c5fbf9a4b55329fd4a63"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a89f5afb9827eab07b9c8c585cd4dc95e5232c727508ae2c935d09531abe9e33"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3645590b9234cafd21c8ecfbf252ad9aa1d67629f4bdc98ba3627f48f8f7b5aa"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99350e89f52186146938bdba0b9c6cd68802c20346707d6ca8366f2d69d89b2f"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b5d290f3d8f7a05c4adbe6c355055b87c7081bfa1eccd1ae5491216307ee5f53"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c95be6f20377d5995ef41a98314542e194d2dc9c2579d8f130a1aea78d48fd42"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e4e2da61a04251121cb551f569c3250e6e27e95f2a80f8351c36822eda1f5d2b"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:ac7f8d68826f95a3652e44b0c12bfa74d3aa6531d47d5dbe6a2fbfc7979bc20f"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:359e662324318baadb768d3c4ade8c4bdcfbb313570eb01e15d75dc5db781815"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-win32.whl", hash = "sha256:fd0ca35e2cf44866137cbb5ae7e439fab18a0b0e0e1cf51d45137622d59ec012"},
|
||||
{file = "hiredis-2.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c9488ffb10acc6b121c498875278b0a6715d193742dc92d21a281712169ac06d"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:1570fe4f93bc1ea487fb566f2b863fd0ed146f643a4ea31e4e07036db9e0c7f8"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8753c561b37cccbda7264c9b4486e206a6318c18377cd647beb3aa41a15a6beb"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a06d0dd84f10be6b15a92edbca2490b64917280f66d8267c63de99b6550308ad"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40ff3f1ec3a4046732e9e41df08dcb1a559847196755d295d43e32528aae39e6"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24d856e13c02bd9d28a189e47be70cbba6f2c2a4bd85a8cc98819db9e7e3e06"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ee9fe7cef505e8d925c70bebcc16bfab12aa7af922f948346baffd4730f7b00"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03ab1d545794bb0e09f3b1e2c8b3adcfacd84f6f2d402bfdcd441a98c0e9643c"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14dfccf4696d75395c587a5dafafb4f7aa0a5d55309341d10bc2e7f1eaa20771"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2ddc573809ca4374da1b24b48604f34f3d5f0911fcccfb1c403ff8d8ca31c232"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:24301ca2bf9b2f843b4c3015c90f161798fa3bbc5b95fd494785751b137dbbe2"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b083a69e158138ffa95740ff6984d328259387b5596908021b3ccb946469ff66"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8e16dc949cc2e9c5fbcd08de05b5fb61b89ff65738d772863c5c96248628830e"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:674f296c3c89cb53f97aa9ba2508d3f360ad481b9e0c0e3a59b342a15192adaf"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-win32.whl", hash = "sha256:20ecbf87aac4f0f33f9c55ae15cb73b485d256c57518c590b7d0c9c152150632"},
|
||||
{file = "hiredis-2.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:b11960237a3025bf248135e5b497dc4923e83d137eb798fbfe78b40d57c4b156"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:18103090b8eda9c529830e26594e88b0b1472055785f3ed29b8adc694d03862a"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d1acb7c957e5343303b3862947df3232dc7395da320b3b9ae076dfaa56ad59dc"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4997f55e1208af95a8fbd0fa187b04c672fcec8f66e49b9ab7fcc45cc1657dc4"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:449e18506d22af40977abd0f5a8979f57f88d4562fe591478a3438d76a15133d"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a32a4474f7a4abdea954f3365608edee3f90f1de9fa05b81d214d4cad04c718a"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e86c800c6941698777fc58419216a66a7f76504f1cea72381d2ee206888e964d"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c73aa295c5369135247ff63aa1fbb116067485d0506cd787cc0c868e72bbee55"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e10a66680023bd5c5a3d605dae0844e3dde60eac5b79e39f51395a2aceaf634"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:03ab760fc96e0c5d36226eb727f30645bf6a53c97f14bfc0a4d0401bfc9b8af7"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:855d258e7f1aee3d7fbd5b1dc87790b1b5016e23d369a97b934a25ae7bc0171f"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ccc33d87866d213f84f857a98f69c13f94fbf99a3304e328869890c9e49c8d65"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:339af17bb9817f8acb127247c79a99cad63db6738c0fb2aec9fa3d4f35d2a250"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:57f73aa04d0b70ff436fb35fa7ea2b796aa7addbd7ebb8d1aa1f3d1b3e4439f1"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-win32.whl", hash = "sha256:e97d4e650b8d933a1229f341db92b610fc52b8d752490235977b63b81fbbc2cb"},
|
||||
{file = "hiredis-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:8d43a7bba66a800279e33229a206861be09c279e261eaa8db4824e59465f4848"},
|
||||
{file = "hiredis-2.2.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632d79fd02b03e8d9fbaebbe40bfe34b920c5d0a9c0ef6270752e0db85208175"},
|
||||
{file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a5fefac31c84143782ec1ebc323c04e733a6e4bfebcef9907a34e47a465e648"},
|
||||
{file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5155bc1710df8e21aa48c9b2f4d4e13e4987e1efff363a1ef9c84fae2cc6c145"},
|
||||
{file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f220b71235d2deab1b4b22681c8aee444720d973b80f1b86a4e2a85f6bcf1e1"},
|
||||
{file = "hiredis-2.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1f1efbe9cc29a3af39cf7eed27225f951aed3f48a1149c7fb74529fb5ab86d4"},
|
||||
{file = "hiredis-2.2.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1f1c44242c18b1f02e6d1162f133d65d00e09cc10d9165dccc78662def72abc2"},
|
||||
{file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0f444d9062f7e487ef42bab2fb2e290f1704afcbca48ad3ec23de63eef0fda"},
|
||||
{file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac15e7e1efca51b4695e540c80c328accb352c9608da7c2df82d1fa1a3c539ef"},
|
||||
{file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20cfbc469400669a5999aa34ccba3872a1e34490ec3d5c84e8c0752c27977b7c"},
|
||||
{file = "hiredis-2.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:bae004a0b978bf62e38d0eef5ab9156f8101d01167b3ca7054bd0994b773e917"},
|
||||
{file = "hiredis-2.2.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1ce725542133dbdda9e8704867ef52651886bd1ef568c6fd997a27404381985"},
|
||||
{file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6ea7532221c97fa6d79f7d19d452cd9d1141d759c54279cc4774ce24728f13"},
|
||||
{file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7114961ed78d708142f6c6eb1d2ed65dc3da4b5ae8a4660ad889dd7fc891971"},
|
||||
{file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b084fbc3e69f99865242f8e1ccd4ea2a34bf6a3983d015d61133377526c0ce2"},
|
||||
{file = "hiredis-2.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2d1ba0799f3487294f72b2157944d5c3a4fb33c99e2d495d63eab98c7ec7234b"},
|
||||
{file = "hiredis-2.2.2.tar.gz", hash = "sha256:9c270bd0567a9c60673284e000132f603bb4ecbcd707567647a68f85ef45c4d4"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1098,64 +1098,75 @@ dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "ma
|
||||
|
||||
[[package]]
|
||||
name = "msgpack"
|
||||
version = "1.0.4"
|
||||
version = "1.0.5"
|
||||
description = "MessagePack serializer"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"},
|
||||
{file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"},
|
||||
{file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"},
|
||||
{file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"},
|
||||
{file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"},
|
||||
{file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"},
|
||||
{file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"},
|
||||
{file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"},
|
||||
{file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"},
|
||||
{file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"},
|
||||
{file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"},
|
||||
{file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"},
|
||||
{file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"},
|
||||
{file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"},
|
||||
{file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"},
|
||||
{file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"},
|
||||
{file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"},
|
||||
{file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"},
|
||||
{file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"},
|
||||
{file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"},
|
||||
{file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"},
|
||||
{file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"},
|
||||
{file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"},
|
||||
{file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"},
|
||||
{file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"},
|
||||
{file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"},
|
||||
{file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"},
|
||||
{file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"},
|
||||
{file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"},
|
||||
{file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"},
|
||||
{file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"},
|
||||
{file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:525228efd79bb831cf6830a732e2e80bc1b05436b086d4264814b4b2955b2fa9"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f8d8b3bf1ff2672567d6b5c725a1b347fe838b912772aa8ae2bf70338d5a198"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdc793c50be3f01106245a61b739328f7dccc2c648b501e237f0699fe1395b81"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb47c21a8a65b165ce29f2bec852790cbc04936f502966768e4aae9fa763cb7"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42b9594cc3bf4d838d67d6ed62b9e59e201862a25e9a157019e171fbe672dd3"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55b56a24893105dc52c1253649b60f475f36b3aa0fc66115bffafb624d7cb30b"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1967f6129fc50a43bfe0951c35acbb729be89a55d849fab7686004da85103f1c"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a97bf595a232c3ee6d57ddaadd5453d174a52594bf9c21d10407e2a2d9b3bd"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d25dd59bbbbb996eacf7be6b4ad082ed7eacc4e8f3d2df1ba43822da9bfa122a"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-win32.whl", hash = "sha256:382b2c77589331f2cb80b67cc058c00f225e19827dbc818d700f61513ab47bea"},
|
||||
{file = "msgpack-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:4867aa2df9e2a5fa5f76d7d5565d25ec76e84c106b55509e78c1ede0f152659a"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9f5ae84c5c8a857ec44dc180a8b0cc08238e021f57abdf51a8182e915e6299f0"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e6ca5d5699bcd89ae605c150aee83b5321f2115695e741b99618f4856c50898"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5494ea30d517a3576749cad32fa27f7585c65f5f38309c88c6d137877fa28a5a"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ab2f3331cb1b54165976a9d976cb251a83183631c88076613c6c780f0d6e45a"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28592e20bbb1620848256ebc105fc420436af59515793ed27d5c77a217477705"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe5c63197c55bce6385d9aee16c4d0641684628f63ace85f73571e65ad1c1e8d"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed40e926fa2f297e8a653c954b732f125ef97bdd4c889f243182299de27e2aa9"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b2de4c1c0538dcb7010902a2b97f4e00fc4ddf2c8cda9749af0e594d3b7fa3d7"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf22a83f973b50f9d38e55c6aade04c41ddda19b00c4ebc558930d78eecc64ed"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-win32.whl", hash = "sha256:c396e2cc213d12ce017b686e0f53497f94f8ba2b24799c25d913d46c08ec422c"},
|
||||
{file = "msgpack-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c4c68d87497f66f96d50142a2b73b97972130d93677ce930718f68828b382e2"},
|
||||
{file = "msgpack-1.0.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a2b031c2e9b9af485d5e3c4520f4220d74f4d222a5b8dc8c1a3ab9448ca79c57"},
|
||||
{file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f837b93669ce4336e24d08286c38761132bc7ab29782727f8557e1eb21b2080"},
|
||||
{file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1d46dfe3832660f53b13b925d4e0fa1432b00f5f7210eb3ad3bb9a13c6204a6"},
|
||||
{file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:366c9a7b9057e1547f4ad51d8facad8b406bab69c7d72c0eb6f529cf76d4b85f"},
|
||||
{file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4c075728a1095efd0634a7dccb06204919a2f67d1893b6aa8e00497258bf926c"},
|
||||
{file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f933bbda5a3ee63b8834179096923b094b76f0c7a73c1cfe8f07ad608c58844b"},
|
||||
{file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:36961b0568c36027c76e2ae3ca1132e35123dcec0706c4b7992683cc26c1320c"},
|
||||
{file = "msgpack-1.0.5-cp36-cp36m-win32.whl", hash = "sha256:b5ef2f015b95f912c2fcab19c36814963b5463f1fb9049846994b007962743e9"},
|
||||
{file = "msgpack-1.0.5-cp36-cp36m-win_amd64.whl", hash = "sha256:288e32b47e67f7b171f86b030e527e302c91bd3f40fd9033483f2cacc37f327a"},
|
||||
{file = "msgpack-1.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:137850656634abddfb88236008339fdaba3178f4751b28f270d2ebe77a563b6c"},
|
||||
{file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c05a4a96585525916b109bb85f8cb6511db1c6f5b9d9cbcbc940dc6b4be944b"},
|
||||
{file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a62ec00b636583e5cb6ad313bbed36bb7ead5fa3a3e38938503142c72cba4f"},
|
||||
{file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef8108f8dedf204bb7b42994abf93882da1159728a2d4c5e82012edd92c9da9f"},
|
||||
{file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1835c84d65f46900920b3708f5ba829fb19b1096c1800ad60bae8418652a951d"},
|
||||
{file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e57916ef1bd0fee4f21c4600e9d1da352d8816b52a599c46460e93a6e9f17086"},
|
||||
{file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17358523b85973e5f242ad74aa4712b7ee560715562554aa2134d96e7aa4cbbf"},
|
||||
{file = "msgpack-1.0.5-cp37-cp37m-win32.whl", hash = "sha256:cb5aaa8c17760909ec6cb15e744c3ebc2ca8918e727216e79607b7bbce9c8f77"},
|
||||
{file = "msgpack-1.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:ab31e908d8424d55601ad7075e471b7d0140d4d3dd3272daf39c5c19d936bd82"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b72d0698f86e8d9ddf9442bdedec15b71df3598199ba33322d9711a19f08145c"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:379026812e49258016dd84ad79ac8446922234d498058ae1d415f04b522d5b2d"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:332360ff25469c346a1c5e47cbe2a725517919892eda5cfaffe6046656f0b7bb"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:476a8fe8fae289fdf273d6d2a6cb6e35b5a58541693e8f9f019bfe990a51e4ba"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9985b214f33311df47e274eb788a5893a761d025e2b92c723ba4c63936b69b1"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48296af57cdb1d885843afd73c4656be5c76c0c6328db3440c9601a98f303d87"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:addab7e2e1fcc04bd08e4eb631c2a90960c340e40dfc4a5e24d2ff0d5a3b3edb"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:916723458c25dfb77ff07f4c66aed34e47503b2eb3188b3adbec8d8aa6e00f48"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:821c7e677cc6acf0fd3f7ac664c98803827ae6de594a9f99563e48c5a2f27eb0"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-win32.whl", hash = "sha256:1c0f7c47f0087ffda62961d425e4407961a7ffd2aa004c81b9c07d9269512f6e"},
|
||||
{file = "msgpack-1.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:bae7de2026cbfe3782c8b78b0db9cbfc5455e079f1937cb0ab8d133496ac55e1"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:20c784e66b613c7f16f632e7b5e8a1651aa5702463d61394671ba07b2fc9e025"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:266fa4202c0eb94d26822d9bfd7af25d1e2c088927fe8de9033d929dd5ba24c5"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18334484eafc2b1aa47a6d42427da7fa8f2ab3d60b674120bce7a895a0a85bdd"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57e1f3528bd95cc44684beda696f74d3aaa8a5e58c816214b9046512240ef437"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586d0d636f9a628ddc6a17bfd45aa5b5efaf1606d2b60fa5d87b8986326e933f"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a740fa0e4087a734455f0fc3abf5e746004c9da72fbd541e9b113013c8dc3282"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3055b0455e45810820db1f29d900bf39466df96ddca11dfa6d074fa47054376d"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a61215eac016f391129a013c9e46f3ab308db5f5ec9f25811e811f96962599a8"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:362d9655cd369b08fda06b6657a303eb7172d5279997abe094512e919cf74b11"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-win32.whl", hash = "sha256:ac9dd47af78cae935901a9a500104e2dea2e253207c924cc95de149606dc43cc"},
|
||||
{file = "msgpack-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:06f5174b5f8ed0ed919da0e62cbd4ffde676a374aba4020034da05fab67b9164"},
|
||||
{file = "msgpack-1.0.5.tar.gz", hash = "sha256:c075544284eadc5cddc70f4757331d99dcbc16b2bbd4849d15f8aae4cf36d31c"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1777,26 +1788,25 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "pysaml2"
|
||||
version = "7.2.1"
|
||||
version = "7.3.1"
|
||||
description = "Python implementation of SAML Version 2 Standard"
|
||||
category = "main"
|
||||
optional = true
|
||||
python-versions = "<4,>=3.6"
|
||||
python-versions = ">=3.6.2,<4.0.0"
|
||||
files = [
|
||||
{file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"},
|
||||
{file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"},
|
||||
{file = "pysaml2-7.3.1-py3-none-any.whl", hash = "sha256:2cc66e7a371d3f5ff9601f0ed93b5276cca816fce82bb38447d5a0651f2f5193"},
|
||||
{file = "pysaml2-7.3.1.tar.gz", hash = "sha256:eab22d187c6dd7707c58b5bb1688f9b8e816427667fc99d77f54399e15cd0a0a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
cryptography = ">=3.1"
|
||||
defusedxml = "*"
|
||||
importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""}
|
||||
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
|
||||
pyOpenSSL = "*"
|
||||
pyopenssl = "*"
|
||||
python-dateutil = "*"
|
||||
pytz = "*"
|
||||
requests = ">=1.0.0"
|
||||
setuptools = "*"
|
||||
six = "*"
|
||||
requests = ">=2,<3"
|
||||
xmlschema = ">=1.2.1"
|
||||
|
||||
[package.extras]
|
||||
@@ -2597,6 +2607,18 @@ files = [
|
||||
{file = "types_jsonschema-4.17.0.5-py3-none-any.whl", hash = "sha256:79ac8a7763fe728947af90a24168b91621edf7e8425bf3670abd4ea0d4758fba"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-netaddr"
|
||||
version = "0.8.0.6"
|
||||
description = "Typing stubs for netaddr"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-netaddr-0.8.0.6.tar.gz", hash = "sha256:e5048640c2412e7ea2d3eb02c94ae1b50442b2c7a50a7c48e957676139cdf19b"},
|
||||
{file = "types_netaddr-0.8.0.6-py3-none-any.whl", hash = "sha256:d4d40d1ba35430a4e4c929596542cd37e6831f5d08676b33dc84e06e01a840f6"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-opentracing"
|
||||
version = "2.4.10.3"
|
||||
@@ -2990,4 +3012,4 @@ user-search = ["pyicu"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "7bcffef7b6e6d4b1113222e2ca152b3798c997872789c8a1ea01238f199d56fe"
|
||||
content-hash = "de2c4c8de336593478ce02581a5336afe2544db93ea82f3955b34c3653c29a26"
|
||||
|
||||
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.79.0rc1"
|
||||
version = "1.79.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -321,6 +321,7 @@ mypy-zope = "*"
|
||||
types-bleach = ">=4.1.0"
|
||||
types-commonmark = ">=0.9.2"
|
||||
types-jsonschema = ">=3.2.0"
|
||||
types-netaddr = ">=0.8.0.6"
|
||||
types-opentracing = ">=2.4.2"
|
||||
types-Pillow = ">=8.3.4"
|
||||
types-psycopg2 = ">=2.9.9"
|
||||
|
||||
@@ -283,6 +283,13 @@ class MockHomeserver:
|
||||
def get_replication_notifier(self) -> ReplicationNotifier:
|
||||
return ReplicationNotifier()
|
||||
|
||||
def get_user_directory_handler(self) -> object:
|
||||
class FakeUserDirectoryHandler:
|
||||
def kick_off_remote_profile_refresh_process(self) -> None:
|
||||
pass
|
||||
|
||||
return FakeUserDirectoryHandler()
|
||||
|
||||
|
||||
class Porter:
|
||||
def __init__(
|
||||
|
||||
@@ -247,6 +247,11 @@ class ThirdPartyEventRules:
|
||||
on_add_user_third_party_identifier
|
||||
)
|
||||
|
||||
if on_remove_user_third_party_identifier is not None:
|
||||
self._on_remove_user_third_party_identifier_callbacks.append(
|
||||
on_remove_user_third_party_identifier
|
||||
)
|
||||
|
||||
async def check_event_allowed(
|
||||
self,
|
||||
event: EventBase,
|
||||
|
||||
@@ -497,8 +497,8 @@ class PerDestinationQueue:
|
||||
#
|
||||
# Note: `catchup_pdus` will have exactly one PDU per room.
|
||||
for pdu in catchup_pdus:
|
||||
# The PDU from the DB will be the last PDU in the room from
|
||||
# *this server* that wasn't sent to the remote. However, other
|
||||
# The PDU from the DB will be the newest PDU in the room from
|
||||
# *this server* that we tried---but were unable---to send to the remote.
|
||||
# servers may have sent lots of events since then, and we want
|
||||
# to try and tell the remote only about the *latest* events in
|
||||
# the room. This is so that it doesn't get inundated by events
|
||||
@@ -516,6 +516,11 @@ class PerDestinationQueue:
|
||||
# If the event is in the extremities, then great! We can just
|
||||
# use that without having to do further checks.
|
||||
room_catchup_pdus = [pdu]
|
||||
elif await self._store.is_partial_state_room(pdu.room_id):
|
||||
# We can't be sure which events the destination should
|
||||
# see using only partial state. Avoid doing so, and just retry
|
||||
# sending our the newest PDU the remote is missing from us.
|
||||
room_catchup_pdus = [pdu]
|
||||
else:
|
||||
# If not, fetch the extremities and figure out which we can
|
||||
# send.
|
||||
@@ -547,6 +552,8 @@ class PerDestinationQueue:
|
||||
self._server_name,
|
||||
new_pdus,
|
||||
redact=False,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
|
||||
# If we've filtered out all the extremities, fall back to
|
||||
|
||||
@@ -15,9 +15,7 @@
|
||||
import email.mime.multipart
|
||||
import email.utils
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
|
||||
|
||||
from twisted.web.http import Request
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import AuthError, StoreError, SynapseError
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
@@ -30,25 +28,17 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Types for callbacks to be registered via the module api
|
||||
IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]]
|
||||
ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable]
|
||||
# Temporary hooks to allow for a transition from `/_matrix/client` endpoints
|
||||
# to `/_synapse/client/account_validity`. See `register_account_validity_callbacks`.
|
||||
ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable]
|
||||
ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]]
|
||||
ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable]
|
||||
|
||||
|
||||
class AccountValidityHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.config = hs.config
|
||||
self.store = self.hs.get_datastores().main
|
||||
self.send_email_handler = self.hs.get_send_email_handler()
|
||||
self.clock = self.hs.get_clock()
|
||||
self.store = hs.get_datastores().main
|
||||
self.send_email_handler = hs.get_send_email_handler()
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self._app_name = self.hs.config.email.email_app_name
|
||||
self._app_name = hs.config.email.email_app_name
|
||||
self._module_api_callbacks = hs.get_module_api_callbacks().account_validity
|
||||
|
||||
self._account_validity_enabled = (
|
||||
hs.config.account_validity.account_validity_enabled
|
||||
@@ -78,69 +68,6 @@ class AccountValidityHandler:
|
||||
if hs.config.worker.run_background_tasks:
|
||||
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
|
||||
|
||||
self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
|
||||
self._on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = []
|
||||
self._on_legacy_send_mail_callback: Optional[
|
||||
ON_LEGACY_SEND_MAIL_CALLBACK
|
||||
] = None
|
||||
self._on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None
|
||||
|
||||
# The legacy admin requests callback isn't a protected attribute because we need
|
||||
# to access it from the admin servlet, which is outside of this handler.
|
||||
self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None
|
||||
|
||||
def register_account_validity_callbacks(
|
||||
self,
|
||||
is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None,
|
||||
on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None,
|
||||
on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
|
||||
on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
|
||||
on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
|
||||
) -> None:
|
||||
"""Register callbacks from module for each hook."""
|
||||
if is_user_expired is not None:
|
||||
self._is_user_expired_callbacks.append(is_user_expired)
|
||||
|
||||
if on_user_registration is not None:
|
||||
self._on_user_registration_callbacks.append(on_user_registration)
|
||||
|
||||
# The builtin account validity feature exposes 3 endpoints (send_mail, renew, and
|
||||
# an admin one). As part of moving the feature into a module, we need to change
|
||||
# the path from /_matrix/client/unstable/account_validity/... to
|
||||
# /_synapse/client/account_validity, because:
|
||||
#
|
||||
# * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix
|
||||
# * the way we register servlets means that modules can't register resources
|
||||
# under /_matrix/client
|
||||
#
|
||||
# We need to allow for a transition period between the old and new endpoints
|
||||
# in order to allow for clients to update (and for emails to be processed).
|
||||
#
|
||||
# Once the email-account-validity module is loaded, it will take control of account
|
||||
# validity by moving the rows from our `account_validity` table into its own table.
|
||||
#
|
||||
# Therefore, we need to allow modules (in practice just the one implementing the
|
||||
# email-based account validity) to temporarily hook into the legacy endpoints so we
|
||||
# can route the traffic coming into the old endpoints into the module, which is
|
||||
# why we have the following three temporary hooks.
|
||||
if on_legacy_send_mail is not None:
|
||||
if self._on_legacy_send_mail_callback is not None:
|
||||
raise RuntimeError("Tried to register on_legacy_send_mail twice")
|
||||
|
||||
self._on_legacy_send_mail_callback = on_legacy_send_mail
|
||||
|
||||
if on_legacy_renew is not None:
|
||||
if self._on_legacy_renew_callback is not None:
|
||||
raise RuntimeError("Tried to register on_legacy_renew twice")
|
||||
|
||||
self._on_legacy_renew_callback = on_legacy_renew
|
||||
|
||||
if on_legacy_admin_request is not None:
|
||||
if self.on_legacy_admin_request_callback is not None:
|
||||
raise RuntimeError("Tried to register on_legacy_admin_request twice")
|
||||
|
||||
self.on_legacy_admin_request_callback = on_legacy_admin_request
|
||||
|
||||
async def is_user_expired(self, user_id: str) -> bool:
|
||||
"""Checks if a user has expired against third-party modules.
|
||||
|
||||
@@ -150,7 +77,7 @@ class AccountValidityHandler:
|
||||
Returns:
|
||||
Whether the user has expired.
|
||||
"""
|
||||
for callback in self._is_user_expired_callbacks:
|
||||
for callback in self._module_api_callbacks.is_user_expired_callbacks:
|
||||
expired = await delay_cancellation(callback(user_id))
|
||||
if expired is not None:
|
||||
return expired
|
||||
@@ -168,7 +95,7 @@ class AccountValidityHandler:
|
||||
Args:
|
||||
user_id: The ID of the newly registered user.
|
||||
"""
|
||||
for callback in self._on_user_registration_callbacks:
|
||||
for callback in self._module_api_callbacks.on_user_registration_callbacks:
|
||||
await callback(user_id)
|
||||
|
||||
@wrap_as_background_process("send_renewals")
|
||||
@@ -198,8 +125,8 @@ class AccountValidityHandler:
|
||||
"""
|
||||
# If a module supports sending a renewal email from here, do that, otherwise do
|
||||
# the legacy dance.
|
||||
if self._on_legacy_send_mail_callback is not None:
|
||||
await self._on_legacy_send_mail_callback(user_id)
|
||||
if self._module_api_callbacks.on_legacy_send_mail_callback is not None:
|
||||
await self._module_api_callbacks.on_legacy_send_mail_callback(user_id)
|
||||
return
|
||||
|
||||
if not self._account_validity_renew_by_email_enabled:
|
||||
@@ -336,8 +263,10 @@ class AccountValidityHandler:
|
||||
"""
|
||||
# If a module supports triggering a renew from here, do that, otherwise do the
|
||||
# legacy dance.
|
||||
if self._on_legacy_renew_callback is not None:
|
||||
return await self._on_legacy_renew_callback(renewal_token)
|
||||
if self._module_api_callbacks.on_legacy_renew_callback is not None:
|
||||
return await self._module_api_callbacks.on_legacy_renew_callback(
|
||||
renewal_token
|
||||
)
|
||||
|
||||
try:
|
||||
(
|
||||
|
||||
@@ -392,7 +392,7 @@ class FederationHandler:
|
||||
get_prev_content=False,
|
||||
)
|
||||
|
||||
# We set `check_history_visibility_only` as we might otherwise get false
|
||||
# We unset `filter_out_erased_senders` as we might otherwise get false
|
||||
# positives from users having been erased.
|
||||
filtered_extremities = await filter_events_for_server(
|
||||
self._storage_controllers,
|
||||
@@ -400,7 +400,8 @@ class FederationHandler:
|
||||
self.server_name,
|
||||
events_to_check,
|
||||
redact=False,
|
||||
check_history_visibility_only=True,
|
||||
filter_out_erased_senders=False,
|
||||
filter_out_remote_partial_state_events=False,
|
||||
)
|
||||
if filtered_extremities:
|
||||
extremities_to_request.append(bp.event_id)
|
||||
@@ -1331,7 +1332,13 @@ class FederationHandler:
|
||||
)
|
||||
|
||||
events = await filter_events_for_server(
|
||||
self._storage_controllers, origin, self.server_name, events
|
||||
self._storage_controllers,
|
||||
origin,
|
||||
self.server_name,
|
||||
events,
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
|
||||
return events
|
||||
@@ -1362,7 +1369,13 @@ class FederationHandler:
|
||||
await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
|
||||
|
||||
events = await filter_events_for_server(
|
||||
self._storage_controllers, origin, self.server_name, [event]
|
||||
self._storage_controllers,
|
||||
origin,
|
||||
self.server_name,
|
||||
[event],
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
event = events[0]
|
||||
return event
|
||||
@@ -1390,7 +1403,13 @@ class FederationHandler:
|
||||
)
|
||||
|
||||
missing_events = await filter_events_for_server(
|
||||
self._storage_controllers, origin, self.server_name, missing_events
|
||||
self._storage_controllers,
|
||||
origin,
|
||||
self.server_name,
|
||||
missing_events,
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
|
||||
return missing_events
|
||||
|
||||
@@ -683,7 +683,7 @@ class PaginationHandler:
|
||||
|
||||
await self._storage_controllers.purge_events.purge_room(room_id)
|
||||
|
||||
logger.info("complete")
|
||||
logger.info("purge complete for room_id %s", room_id)
|
||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE
|
||||
except Exception:
|
||||
f = Failure()
|
||||
|
||||
@@ -63,7 +63,7 @@ class ProfileHandler:
|
||||
|
||||
self._third_party_rules = hs.get_third_party_event_rules()
|
||||
|
||||
async def get_profile(self, user_id: str) -> JsonDict:
|
||||
async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict:
|
||||
target_user = UserID.from_string(user_id)
|
||||
|
||||
if self.hs.is_mine(target_user):
|
||||
@@ -81,7 +81,7 @@ class ProfileHandler:
|
||||
destination=target_user.domain,
|
||||
query_type="profile",
|
||||
args={"user_id": user_id},
|
||||
ignore_backoff=True,
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
return result
|
||||
except RequestSendFailed as e:
|
||||
|
||||
@@ -596,14 +596,20 @@ class RegistrationHandler:
|
||||
Args:
|
||||
user_id: The user to join
|
||||
"""
|
||||
# If there are no rooms to auto-join, just bail.
|
||||
if not self.hs.config.registration.auto_join_rooms:
|
||||
return
|
||||
|
||||
# auto-join the user to any rooms we're supposed to dump them into
|
||||
|
||||
# try to create the room if we're the first real user on the server. Note
|
||||
# that an auto-generated support or bot user is not a real user and will never be
|
||||
# the user to create the room
|
||||
should_auto_create_rooms = False
|
||||
is_real_user = await self.store.is_real_user(user_id)
|
||||
if self.hs.config.registration.autocreate_auto_join_rooms and is_real_user:
|
||||
if (
|
||||
self.hs.config.registration.autocreate_auto_join_rooms
|
||||
and await self.store.is_real_user(user_id)
|
||||
):
|
||||
count = await self.store.count_real_users()
|
||||
should_auto_create_rooms = count == 1
|
||||
|
||||
|
||||
@@ -569,7 +569,7 @@ class RoomCreationHandler:
|
||||
new_room_id,
|
||||
# we expect to override all the presets with initial_state, so this is
|
||||
# somewhat arbitrary.
|
||||
preset_config=RoomCreationPreset.PRIVATE_CHAT,
|
||||
room_config={"preset": RoomCreationPreset.PRIVATE_CHAT},
|
||||
invite_list=[],
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
@@ -904,13 +904,6 @@ class RoomCreationHandler:
|
||||
check_membership=False,
|
||||
)
|
||||
|
||||
preset_config = config.get(
|
||||
"preset",
|
||||
RoomCreationPreset.PRIVATE_CHAT
|
||||
if visibility == "private"
|
||||
else RoomCreationPreset.PUBLIC_CHAT,
|
||||
)
|
||||
|
||||
raw_initial_state = config.get("initial_state", [])
|
||||
|
||||
initial_state = OrderedDict()
|
||||
@@ -929,7 +922,7 @@ class RoomCreationHandler:
|
||||
) = await self._send_events_for_new_room(
|
||||
requester,
|
||||
room_id,
|
||||
preset_config=preset_config,
|
||||
room_config=config,
|
||||
invite_list=invite_list,
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
@@ -938,48 +931,6 @@ class RoomCreationHandler:
|
||||
creator_join_profile=creator_join_profile,
|
||||
)
|
||||
|
||||
if "name" in config:
|
||||
name = config["name"]
|
||||
(
|
||||
name_event,
|
||||
last_stream_id,
|
||||
) = await self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Name,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"state_key": "",
|
||||
"content": {"name": name},
|
||||
},
|
||||
ratelimit=False,
|
||||
prev_event_ids=[last_sent_event_id],
|
||||
depth=depth,
|
||||
)
|
||||
last_sent_event_id = name_event.event_id
|
||||
depth += 1
|
||||
|
||||
if "topic" in config:
|
||||
topic = config["topic"]
|
||||
(
|
||||
topic_event,
|
||||
last_stream_id,
|
||||
) = await self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Topic,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"state_key": "",
|
||||
"content": {"topic": topic},
|
||||
},
|
||||
ratelimit=False,
|
||||
prev_event_ids=[last_sent_event_id],
|
||||
depth=depth,
|
||||
)
|
||||
last_sent_event_id = topic_event.event_id
|
||||
depth += 1
|
||||
|
||||
# we avoid dropping the lock between invites, as otherwise joins can
|
||||
# start coming in and making the createRoom slow.
|
||||
#
|
||||
@@ -1047,7 +998,7 @@ class RoomCreationHandler:
|
||||
self,
|
||||
creator: Requester,
|
||||
room_id: str,
|
||||
preset_config: str,
|
||||
room_config: JsonDict,
|
||||
invite_list: List[str],
|
||||
initial_state: MutableStateMap,
|
||||
creation_content: JsonDict,
|
||||
@@ -1064,11 +1015,33 @@ class RoomCreationHandler:
|
||||
|
||||
Rate limiting should already have been applied by this point.
|
||||
|
||||
Args:
|
||||
creator:
|
||||
the user requesting the room creation
|
||||
room_id:
|
||||
room id for the room being created
|
||||
room_config:
|
||||
A dict of configuration options. This will be the body of
|
||||
a /createRoom request; see
|
||||
https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom
|
||||
invite_list:
|
||||
a list of user ids to invite to the room
|
||||
initial_state:
|
||||
A list of state events to set in the new room.
|
||||
creation_content:
|
||||
Extra keys, such as m.federate, to be added to the content of the m.room.create event.
|
||||
room_alias:
|
||||
alias for the room
|
||||
power_level_content_override:
|
||||
The power level content to override in the default power level event.
|
||||
creator_join_profile:
|
||||
Set to override the displayname and avatar for the creating
|
||||
user in this room.
|
||||
|
||||
Returns:
|
||||
A tuple containing the stream ID, event ID and depth of the last
|
||||
event sent to the room.
|
||||
"""
|
||||
|
||||
creator_id = creator.user.to_string()
|
||||
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
|
||||
depth = 1
|
||||
@@ -1079,9 +1052,6 @@ class RoomCreationHandler:
|
||||
# created (but not persisted to the db) to determine state for future created events
|
||||
# (as this info can't be pulled from the db)
|
||||
state_map: MutableStateMap[str] = {}
|
||||
# current_state_group of last event created. Used for computing event context of
|
||||
# events to be batched
|
||||
current_state_group: Optional[int] = None
|
||||
|
||||
def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
|
||||
e = {"type": etype, "content": content}
|
||||
@@ -1135,6 +1105,14 @@ class RoomCreationHandler:
|
||||
|
||||
return new_event, new_unpersisted_context
|
||||
|
||||
visibility = room_config.get("visibility", "private")
|
||||
preset_config = room_config.get(
|
||||
"preset",
|
||||
RoomCreationPreset.PRIVATE_CHAT
|
||||
if visibility == "private"
|
||||
else RoomCreationPreset.PUBLIC_CHAT,
|
||||
)
|
||||
|
||||
try:
|
||||
config = self._presets_dict[preset_config]
|
||||
except KeyError:
|
||||
@@ -1286,6 +1264,24 @@ class RoomCreationHandler:
|
||||
)
|
||||
events_to_send.append((encryption_event, encryption_context))
|
||||
|
||||
if "name" in room_config:
|
||||
name = room_config["name"]
|
||||
name_event, name_context = await create_event(
|
||||
EventTypes.Name,
|
||||
{"name": name},
|
||||
True,
|
||||
)
|
||||
events_to_send.append((name_event, name_context))
|
||||
|
||||
if "topic" in room_config:
|
||||
topic = room_config["topic"]
|
||||
topic_event, topic_context = await create_event(
|
||||
EventTypes.Topic,
|
||||
{"topic": topic},
|
||||
True,
|
||||
)
|
||||
events_to_send.append((topic_event, topic_context))
|
||||
|
||||
datastore = self.hs.get_datastores().state
|
||||
events_and_context = (
|
||||
await UnpersistedEventContext.batch_persist_unpersisted_contexts(
|
||||
|
||||
@@ -1226,6 +1226,10 @@ class SyncHandler:
|
||||
continue
|
||||
|
||||
event_with_membership_auth = events_with_membership_auth[member]
|
||||
is_create = (
|
||||
event_with_membership_auth.is_state()
|
||||
and event_with_membership_auth.type == EventTypes.Create
|
||||
)
|
||||
is_join = (
|
||||
event_with_membership_auth.is_state()
|
||||
and event_with_membership_auth.type == EventTypes.Member
|
||||
@@ -1233,9 +1237,10 @@ class SyncHandler:
|
||||
and event_with_membership_auth.content.get("membership")
|
||||
== Membership.JOIN
|
||||
)
|
||||
if not is_join:
|
||||
if not is_create and not is_join:
|
||||
# The event must include the desired membership as an auth event, unless
|
||||
# it's the first join event for a given user.
|
||||
# it's the `m.room.create` event for a room or the first join event for
|
||||
# a given user.
|
||||
missing_members.add(member)
|
||||
auth_event_ids.update(event_with_membership_auth.auth_event_ids())
|
||||
|
||||
|
||||
@@ -13,21 +13,52 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main.user_directory import SearchResult
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import UserID
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.util.stringutils import non_null_str_or_none
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Don't refresh a stale user directory entry, using a Federation /profile request,
|
||||
# for 60 seconds. This gives time for other state events to arrive (which will
|
||||
# then be coalesced such that only one /profile request is made).
|
||||
USER_DIRECTORY_STALE_REFRESH_TIME_MS = 60 * 1000
|
||||
|
||||
# Maximum number of remote servers that we will attempt to refresh profiles for
|
||||
# in one go.
|
||||
MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO = 5
|
||||
|
||||
# As long as we have servers to refresh (without backoff), keep adding more
|
||||
# every 15 seconds.
|
||||
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = 15
|
||||
|
||||
|
||||
def calculate_time_of_next_retry(now_ts: int, retry_count: int) -> int:
|
||||
"""
|
||||
Calculates the time of a next retry given `now_ts` in ms and the number
|
||||
of failures encountered thus far.
|
||||
|
||||
Currently the sequence goes:
|
||||
1 min, 5 min, 25 min, 2 hour, 10 hour, 52 hour, 10 day, 7.75 week
|
||||
"""
|
||||
return now_ts + 60_000 * (5 ** min(retry_count, 7))
|
||||
|
||||
|
||||
class UserDirectoryHandler(StateDeltasHandler):
|
||||
"""Handles queries and updates for the user_directory.
|
||||
@@ -64,12 +95,24 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
self.update_user_directory = hs.config.worker.should_update_user_directory
|
||||
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self._hs = hs
|
||||
|
||||
# The current position in the current_state_delta stream
|
||||
self.pos: Optional[int] = None
|
||||
|
||||
# Guard to ensure we only process deltas one at a time
|
||||
self._is_processing = False
|
||||
|
||||
# Guard to ensure we only have one process for refreshing remote profiles
|
||||
self._is_refreshing_remote_profiles = False
|
||||
# Handle to cancel the `call_later` of `kick_off_remote_profile_refresh_process`
|
||||
self._refresh_remote_profiles_call_later: Optional[IDelayedCall] = None
|
||||
|
||||
# Guard to ensure we only have one process for refreshing remote profiles
|
||||
# for the given servers.
|
||||
# Set of server names.
|
||||
self._is_refreshing_remote_profiles_for_servers: Set[str] = set()
|
||||
|
||||
if self.update_user_directory:
|
||||
self.notifier.add_replication_callback(self.notify_new_event)
|
||||
|
||||
@@ -77,6 +120,11 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# we start populating the user directory
|
||||
self.clock.call_later(0, self.notify_new_event)
|
||||
|
||||
# Kick off the profile refresh process on startup
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
10, self.kick_off_remote_profile_refresh_process
|
||||
)
|
||||
|
||||
async def search_users(
|
||||
self, user_id: str, search_term: str, limit: int
|
||||
) -> SearchResult:
|
||||
@@ -200,8 +248,8 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
typ = delta["type"]
|
||||
state_key = delta["state_key"]
|
||||
room_id = delta["room_id"]
|
||||
event_id = delta["event_id"]
|
||||
prev_event_id = delta["prev_event_id"]
|
||||
event_id: Optional[str] = delta["event_id"]
|
||||
prev_event_id: Optional[str] = delta["prev_event_id"]
|
||||
|
||||
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
||||
|
||||
@@ -297,8 +345,8 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
async def _handle_room_membership_event(
|
||||
self,
|
||||
room_id: str,
|
||||
prev_event_id: str,
|
||||
event_id: str,
|
||||
prev_event_id: Optional[str],
|
||||
event_id: Optional[str],
|
||||
state_key: str,
|
||||
) -> None:
|
||||
"""Process a single room membershp event.
|
||||
@@ -348,7 +396,8 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# Handle any profile changes for remote users.
|
||||
# (For local users the rest of the application calls
|
||||
# `handle_local_profile_change`.)
|
||||
if is_remote:
|
||||
# Only process if there is an event_id.
|
||||
if is_remote and event_id is not None:
|
||||
await self._handle_possible_remote_profile_change(
|
||||
state_key, room_id, prev_event_id, event_id
|
||||
)
|
||||
@@ -356,29 +405,13 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
# This may be the first time we've seen a remote user. If
|
||||
# so, ensure we have a directory entry for them. (For local users,
|
||||
# the rest of the application calls `handle_local_profile_change`.)
|
||||
if is_remote:
|
||||
await self._upsert_directory_entry_for_remote_user(state_key, event_id)
|
||||
# Only process if there is an event_id.
|
||||
if is_remote and event_id is not None:
|
||||
await self._handle_possible_remote_profile_change(
|
||||
state_key, room_id, None, event_id
|
||||
)
|
||||
await self._track_user_joined_room(room_id, state_key)
|
||||
|
||||
async def _upsert_directory_entry_for_remote_user(
|
||||
self, user_id: str, event_id: str
|
||||
) -> None:
|
||||
"""A remote user has just joined a room. Ensure they have an entry in
|
||||
the user directory. The caller is responsible for making sure they're
|
||||
remote.
|
||||
"""
|
||||
event = await self.store.get_event(event_id, allow_none=True)
|
||||
# It isn't expected for this event to not exist, but we
|
||||
# don't want the entire background process to break.
|
||||
if event is None:
|
||||
return
|
||||
|
||||
logger.debug("Adding new user to dir, %r", user_id)
|
||||
|
||||
await self.store.update_profile_in_user_dir(
|
||||
user_id, event.content.get("displayname"), event.content.get("avatar_url")
|
||||
)
|
||||
|
||||
async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None:
|
||||
"""Someone's just joined a room. Update `users_in_public_rooms` or
|
||||
`users_who_share_private_rooms` as appropriate.
|
||||
@@ -460,14 +493,17 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
user_id: str,
|
||||
room_id: str,
|
||||
prev_event_id: Optional[str],
|
||||
event_id: Optional[str],
|
||||
event_id: str,
|
||||
) -> None:
|
||||
"""Check member event changes for any profile changes and update the
|
||||
database if there are. This is intended for remote users only. The caller
|
||||
is responsible for checking that the given user is remote.
|
||||
"""
|
||||
if not prev_event_id or not event_id:
|
||||
return
|
||||
|
||||
if not prev_event_id:
|
||||
# If we don't have an older event to fall back on, just fetch the same
|
||||
# event itself.
|
||||
prev_event_id = event_id
|
||||
|
||||
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
|
||||
event = await self.store.get_event(event_id, allow_none=True)
|
||||
@@ -478,17 +514,236 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
if event.membership != Membership.JOIN:
|
||||
return
|
||||
|
||||
is_public = await self.store.is_room_world_readable_or_publicly_joinable(
|
||||
room_id
|
||||
)
|
||||
if not is_public:
|
||||
# Don't collect user profiles from private rooms as they are not guaranteed
|
||||
# to be the same as the user's global profile.
|
||||
now_ts = self.clock.time_msec()
|
||||
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||
user_id,
|
||||
next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME_MS,
|
||||
retry_counter=0,
|
||||
)
|
||||
# Schedule a wake-up to refresh the user directory for this server.
|
||||
# We intentionally wake up this server directly because we don't want
|
||||
# other servers ahead of it in the queue to get in the way of updating
|
||||
# the profile if the server only just sent us an event.
|
||||
self.clock.call_later(
|
||||
USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1,
|
||||
self.kick_off_remote_profile_refresh_process_for_remote_server,
|
||||
UserID.from_string(user_id).domain,
|
||||
)
|
||||
# Schedule a wake-up to handle any backoffs that may occur in the future.
|
||||
self.clock.call_later(
|
||||
2 * USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
return
|
||||
|
||||
prev_name = prev_event.content.get("displayname")
|
||||
new_name = event.content.get("displayname")
|
||||
# If the new name is an unexpected form, do not update the directory.
|
||||
# If the new name is an unexpected form, replace with None.
|
||||
if not isinstance(new_name, str):
|
||||
new_name = prev_name
|
||||
new_name = None
|
||||
|
||||
prev_avatar = prev_event.content.get("avatar_url")
|
||||
new_avatar = event.content.get("avatar_url")
|
||||
# If the new avatar is an unexpected form, do not update the directory.
|
||||
# If the new avatar is an unexpected form, replace with None.
|
||||
if not isinstance(new_avatar, str):
|
||||
new_avatar = prev_avatar
|
||||
new_avatar = None
|
||||
|
||||
if prev_name != new_name or prev_avatar != new_avatar:
|
||||
if (
|
||||
prev_name != new_name
|
||||
or prev_avatar != new_avatar
|
||||
or prev_event_id == event_id
|
||||
):
|
||||
# Only update if something has changed, or we didn't have a previous event
|
||||
# in the first place.
|
||||
await self.store.update_profile_in_user_dir(user_id, new_name, new_avatar)
|
||||
|
||||
def kick_off_remote_profile_refresh_process(self) -> None:
|
||||
"""Called when there may be remote users with stale profiles to be refreshed"""
|
||||
if not self.update_user_directory:
|
||||
return
|
||||
|
||||
if self._is_refreshing_remote_profiles:
|
||||
return
|
||||
|
||||
if self._refresh_remote_profiles_call_later:
|
||||
if self._refresh_remote_profiles_call_later.active():
|
||||
self._refresh_remote_profiles_call_later.cancel()
|
||||
self._refresh_remote_profiles_call_later = None
|
||||
|
||||
async def process() -> None:
|
||||
try:
|
||||
await self._unsafe_refresh_remote_profiles()
|
||||
finally:
|
||||
self._is_refreshing_remote_profiles = False
|
||||
|
||||
self._is_refreshing_remote_profiles = True
|
||||
run_as_background_process("user_directory.refresh_remote_profiles", process)
|
||||
|
||||
async def _unsafe_refresh_remote_profiles(self) -> None:
|
||||
limit = MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO - len(
|
||||
self._is_refreshing_remote_profiles_for_servers
|
||||
)
|
||||
if limit <= 0:
|
||||
# nothing to do: already refreshing the maximum number of servers
|
||||
# at once.
|
||||
# Come back later.
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
return
|
||||
|
||||
servers_to_refresh = (
|
||||
await self.store.get_remote_servers_with_profiles_to_refresh(
|
||||
now_ts=self.clock.time_msec(), limit=limit
|
||||
)
|
||||
)
|
||||
|
||||
if not servers_to_refresh:
|
||||
# Do we have any backing-off servers that we should try again
|
||||
# for eventually?
|
||||
# By setting `now` is a point in the far future, we can ask for
|
||||
# which server/user is next to be refreshed, even though it is
|
||||
# not actually refreshable *now*.
|
||||
end_of_time = 1 << 62
|
||||
backing_off_servers = (
|
||||
await self.store.get_remote_servers_with_profiles_to_refresh(
|
||||
now_ts=end_of_time, limit=1
|
||||
)
|
||||
)
|
||||
if backing_off_servers:
|
||||
# Find out when the next user is refreshable and schedule a
|
||||
# refresh then.
|
||||
backing_off_server_name = backing_off_servers[0]
|
||||
users = await self.store.get_remote_users_to_refresh_on_server(
|
||||
backing_off_server_name, now_ts=end_of_time, limit=1
|
||||
)
|
||||
if not users:
|
||||
return
|
||||
_, _, next_try_at_ts = users[0]
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
((next_try_at_ts - self.clock.time_msec()) // 1000) + 2,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
for server_to_refresh in servers_to_refresh:
|
||||
self.kick_off_remote_profile_refresh_process_for_remote_server(
|
||||
server_to_refresh
|
||||
)
|
||||
|
||||
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES,
|
||||
self.kick_off_remote_profile_refresh_process,
|
||||
)
|
||||
|
||||
def kick_off_remote_profile_refresh_process_for_remote_server(
|
||||
self, server_name: str
|
||||
) -> None:
|
||||
"""Called when there may be remote users with stale profiles to be refreshed
|
||||
on the given server."""
|
||||
if not self.update_user_directory:
|
||||
return
|
||||
|
||||
if server_name in self._is_refreshing_remote_profiles_for_servers:
|
||||
return
|
||||
|
||||
async def process() -> None:
|
||||
try:
|
||||
await self._unsafe_refresh_remote_profiles_for_remote_server(
|
||||
server_name
|
||||
)
|
||||
finally:
|
||||
self._is_refreshing_remote_profiles_for_servers.remove(server_name)
|
||||
|
||||
self._is_refreshing_remote_profiles_for_servers.add(server_name)
|
||||
run_as_background_process(
|
||||
"user_directory.refresh_remote_profiles_for_remote_server", process
|
||||
)
|
||||
|
||||
async def _unsafe_refresh_remote_profiles_for_remote_server(
|
||||
self, server_name: str
|
||||
) -> None:
|
||||
logger.info("Refreshing profiles in user directory for %s", server_name)
|
||||
|
||||
while True:
|
||||
# Get a handful of users to process.
|
||||
next_batch = await self.store.get_remote_users_to_refresh_on_server(
|
||||
server_name, now_ts=self.clock.time_msec(), limit=10
|
||||
)
|
||||
if not next_batch:
|
||||
# Finished for now
|
||||
return
|
||||
|
||||
for user_id, retry_counter, _ in next_batch:
|
||||
# Request the profile of the user.
|
||||
try:
|
||||
profile = await self._hs.get_profile_handler().get_profile(
|
||||
user_id, ignore_backoff=False
|
||||
)
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(
|
||||
"Failed to refresh profile for %r because the destination is undergoing backoff",
|
||||
user_id,
|
||||
)
|
||||
# As a special-case, we back off until the destination is no longer
|
||||
# backed off from.
|
||||
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||
user_id,
|
||||
e.retry_last_ts + e.retry_interval,
|
||||
retry_counter=retry_counter + 1,
|
||||
)
|
||||
continue
|
||||
except SynapseError as e:
|
||||
if e.code == HTTPStatus.NOT_FOUND and e.errcode == Codes.NOT_FOUND:
|
||||
# The profile doesn't exist.
|
||||
# TODO Does this mean we should clear it from our user
|
||||
# directory?
|
||||
await self.store.clear_remote_user_profile_in_user_dir_stale(
|
||||
user_id
|
||||
)
|
||||
logger.warning(
|
||||
"Refresh of remote profile %r: not found (%r)",
|
||||
user_id,
|
||||
e.msg,
|
||||
)
|
||||
continue
|
||||
|
||||
logger.warning(
|
||||
"Failed to refresh profile for %r because %r", user_id, e
|
||||
)
|
||||
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||
user_id,
|
||||
calculate_time_of_next_retry(
|
||||
self.clock.time_msec(), retry_counter + 1
|
||||
),
|
||||
retry_counter=retry_counter + 1,
|
||||
)
|
||||
continue
|
||||
except Exception:
|
||||
logger.error(
|
||||
"Failed to refresh profile for %r due to unhandled exception",
|
||||
user_id,
|
||||
exc_info=True,
|
||||
)
|
||||
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||
user_id,
|
||||
calculate_time_of_next_retry(
|
||||
self.clock.time_msec(), retry_counter + 1
|
||||
),
|
||||
retry_counter=retry_counter + 1,
|
||||
)
|
||||
continue
|
||||
|
||||
await self.store.update_profile_in_user_dir(
|
||||
user_id,
|
||||
display_name=non_null_str_or_none(profile.get("displayname")),
|
||||
avatar_url=non_null_str_or_none(profile.get("avatar_url")),
|
||||
)
|
||||
|
||||
@@ -268,8 +268,8 @@ class BlacklistingAgentWrapper(Agent):
|
||||
def __init__(
|
||||
self,
|
||||
agent: IAgent,
|
||||
ip_blacklist: IPSet,
|
||||
ip_whitelist: Optional[IPSet] = None,
|
||||
ip_blacklist: Optional[IPSet] = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
@@ -291,7 +291,9 @@ class BlacklistingAgentWrapper(Agent):
|
||||
h = urllib.parse.urlparse(uri.decode("ascii"))
|
||||
|
||||
try:
|
||||
ip_address = IPAddress(h.hostname)
|
||||
# h.hostname is Optional[str], None raises an AddrFormatError, so
|
||||
# this is safe even though IPAddress requires a str.
|
||||
ip_address = IPAddress(h.hostname) # type: ignore[arg-type]
|
||||
except AddrFormatError:
|
||||
# Not an IP
|
||||
pass
|
||||
@@ -388,8 +390,8 @@ class SimpleHttpClient:
|
||||
# by the DNS resolution.
|
||||
self.agent = BlacklistingAgentWrapper(
|
||||
self.agent,
|
||||
ip_whitelist=self._ip_whitelist,
|
||||
ip_blacklist=self._ip_blacklist,
|
||||
ip_whitelist=self._ip_whitelist,
|
||||
)
|
||||
|
||||
async def request(
|
||||
|
||||
@@ -87,7 +87,7 @@ class MatrixFederationAgent:
|
||||
reactor: ISynapseReactor,
|
||||
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
|
||||
user_agent: bytes,
|
||||
ip_whitelist: IPSet,
|
||||
ip_whitelist: Optional[IPSet],
|
||||
ip_blacklist: IPSet,
|
||||
_srv_resolver: Optional[SrvResolver] = None,
|
||||
_well_known_resolver: Optional[WellKnownResolver] = None,
|
||||
|
||||
@@ -73,13 +73,6 @@ from synapse.events.third_party_rules import (
|
||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK,
|
||||
)
|
||||
from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK
|
||||
from synapse.handlers.account_validity import (
|
||||
IS_USER_EXPIRED_CALLBACK,
|
||||
ON_LEGACY_ADMIN_REQUEST,
|
||||
ON_LEGACY_RENEW_CALLBACK,
|
||||
ON_LEGACY_SEND_MAIL_CALLBACK,
|
||||
ON_USER_REGISTRATION_CALLBACK,
|
||||
)
|
||||
from synapse.handlers.auth import (
|
||||
CHECK_3PID_AUTH_CALLBACK,
|
||||
CHECK_AUTH_CALLBACK,
|
||||
@@ -105,6 +98,13 @@ from synapse.logging.context import (
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.module_api.callbacks.account_validity_callbacks import (
|
||||
IS_USER_EXPIRED_CALLBACK,
|
||||
ON_LEGACY_ADMIN_REQUEST,
|
||||
ON_LEGACY_RENEW_CALLBACK,
|
||||
ON_LEGACY_SEND_MAIL_CALLBACK,
|
||||
ON_USER_REGISTRATION_CALLBACK,
|
||||
)
|
||||
from synapse.rest.client.login import LoginResponse
|
||||
from synapse.storage import DataStore
|
||||
from synapse.storage.background_updates import (
|
||||
@@ -250,6 +250,7 @@ class ModuleApi:
|
||||
self._push_rules_handler = hs.get_push_rules_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self.custom_template_dir = hs.config.server.custom_template_directory
|
||||
self._callbacks = hs.get_module_api_callbacks()
|
||||
|
||||
try:
|
||||
app_name = self._hs.config.email.email_app_name
|
||||
@@ -271,7 +272,6 @@ class ModuleApi:
|
||||
self._account_data_manager = AccountDataManager(hs)
|
||||
|
||||
self._spam_checker = hs.get_spam_checker()
|
||||
self._account_validity_handler = hs.get_account_validity_handler()
|
||||
self._third_party_event_rules = hs.get_third_party_event_rules()
|
||||
self._password_auth_provider = hs.get_password_auth_provider()
|
||||
self._presence_router = hs.get_presence_router()
|
||||
@@ -332,7 +332,7 @@ class ModuleApi:
|
||||
|
||||
Added in Synapse v1.39.0.
|
||||
"""
|
||||
return self._account_validity_handler.register_account_validity_callbacks(
|
||||
return self._callbacks.account_validity.register_callbacks(
|
||||
is_user_expired=is_user_expired,
|
||||
on_user_registration=on_user_registration,
|
||||
on_legacy_send_mail=on_legacy_send_mail,
|
||||
|
||||
22
synapse/module_api/callbacks/__init__.py
Normal file
22
synapse/module_api/callbacks/__init__.py
Normal file
@@ -0,0 +1,22 @@
|
||||
# Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.module_api.callbacks.account_validity_callbacks import (
|
||||
AccountValidityModuleApiCallbacks,
|
||||
)
|
||||
|
||||
|
||||
class ModuleApiCallbacks:
|
||||
def __init__(self) -> None:
|
||||
self.account_validity = AccountValidityModuleApiCallbacks()
|
||||
93
synapse/module_api/callbacks/account_validity_callbacks.py
Normal file
93
synapse/module_api/callbacks/account_validity_callbacks.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Awaitable, Callable, List, Optional, Tuple
|
||||
|
||||
from twisted.web.http import Request
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Types for callbacks to be registered via the module api
|
||||
IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]]
|
||||
ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable]
|
||||
# Temporary hooks to allow for a transition from `/_matrix/client` endpoints
|
||||
# to `/_synapse/client/account_validity`. See `register_callbacks` below.
|
||||
ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable]
|
||||
ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]]
|
||||
ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable]
|
||||
|
||||
|
||||
class AccountValidityModuleApiCallbacks:
|
||||
def __init__(self) -> None:
|
||||
self.is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
|
||||
self.on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = []
|
||||
self.on_legacy_send_mail_callback: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None
|
||||
self.on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None
|
||||
|
||||
# The legacy admin requests callback isn't a protected attribute because we need
|
||||
# to access it from the admin servlet, which is outside of this handler.
|
||||
self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None
|
||||
|
||||
def register_callbacks(
|
||||
self,
|
||||
is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None,
|
||||
on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None,
|
||||
on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
|
||||
on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
|
||||
on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
|
||||
) -> None:
|
||||
"""Register callbacks from module for each hook."""
|
||||
if is_user_expired is not None:
|
||||
self.is_user_expired_callbacks.append(is_user_expired)
|
||||
|
||||
if on_user_registration is not None:
|
||||
self.on_user_registration_callbacks.append(on_user_registration)
|
||||
|
||||
# The builtin account validity feature exposes 3 endpoints (send_mail, renew, and
|
||||
# an admin one). As part of moving the feature into a module, we need to change
|
||||
# the path from /_matrix/client/unstable/account_validity/... to
|
||||
# /_synapse/client/account_validity, because:
|
||||
#
|
||||
# * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix
|
||||
# * the way we register servlets means that modules can't register resources
|
||||
# under /_matrix/client
|
||||
#
|
||||
# We need to allow for a transition period between the old and new endpoints
|
||||
# in order to allow for clients to update (and for emails to be processed).
|
||||
#
|
||||
# Once the email-account-validity module is loaded, it will take control of account
|
||||
# validity by moving the rows from our `account_validity` table into its own table.
|
||||
#
|
||||
# Therefore, we need to allow modules (in practice just the one implementing the
|
||||
# email-based account validity) to temporarily hook into the legacy endpoints so we
|
||||
# can route the traffic coming into the old endpoints into the module, which is
|
||||
# why we have the following three temporary hooks.
|
||||
if on_legacy_send_mail is not None:
|
||||
if self.on_legacy_send_mail_callback is not None:
|
||||
raise RuntimeError("Tried to register on_legacy_send_mail twice")
|
||||
|
||||
self.on_legacy_send_mail_callback = on_legacy_send_mail
|
||||
|
||||
if on_legacy_renew is not None:
|
||||
if self.on_legacy_renew_callback is not None:
|
||||
raise RuntimeError("Tried to register on_legacy_renew twice")
|
||||
|
||||
self.on_legacy_renew_callback = on_legacy_renew
|
||||
|
||||
if on_legacy_admin_request is not None:
|
||||
if self.on_legacy_admin_request_callback is not None:
|
||||
raise RuntimeError("Tried to register on_legacy_admin_request twice")
|
||||
|
||||
self.on_legacy_admin_request_callback = on_legacy_admin_request
|
||||
@@ -18,16 +18,12 @@ from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IAddress, IConnector
|
||||
from twisted.internet.protocol import ReconnectingClientFactory
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, ReceiptTypes
|
||||
from synapse.federation import send_queue
|
||||
from synapse.federation.sender import FederationSender
|
||||
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol
|
||||
from synapse.replication.tcp.streams import (
|
||||
AccountDataStream,
|
||||
DeviceListsStream,
|
||||
@@ -53,7 +49,6 @@ from synapse.util.async_helpers import Linearizer, timeout_deferred
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.replication.tcp.handler import ReplicationCommandHandler
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -62,52 +57,6 @@ logger = logging.getLogger(__name__)
|
||||
_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5
|
||||
|
||||
|
||||
class DirectTcpReplicationClientFactory(ReconnectingClientFactory):
|
||||
"""Factory for building connections to the master. Will reconnect if the
|
||||
connection is lost.
|
||||
|
||||
Accepts a handler that is passed to `ClientReplicationStreamProtocol`.
|
||||
"""
|
||||
|
||||
initialDelay = 0.1
|
||||
maxDelay = 1 # Try at least once every N seconds
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
client_name: str,
|
||||
command_handler: "ReplicationCommandHandler",
|
||||
):
|
||||
self.client_name = client_name
|
||||
self.command_handler = command_handler
|
||||
self.server_name = hs.config.server.server_name
|
||||
self.hs = hs
|
||||
self._clock = hs.get_clock() # As self.clock is defined in super class
|
||||
|
||||
hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.stopTrying)
|
||||
|
||||
def startedConnecting(self, connector: IConnector) -> None:
|
||||
logger.info("Connecting to replication: %r", connector.getDestination())
|
||||
|
||||
def buildProtocol(self, addr: IAddress) -> ClientReplicationStreamProtocol:
|
||||
logger.info("Connected to replication: %r", addr)
|
||||
return ClientReplicationStreamProtocol(
|
||||
self.hs,
|
||||
self.client_name,
|
||||
self.server_name,
|
||||
self._clock,
|
||||
self.command_handler,
|
||||
)
|
||||
|
||||
def clientConnectionLost(self, connector: IConnector, reason: Failure) -> None:
|
||||
logger.error("Lost replication conn: %r", reason)
|
||||
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
|
||||
|
||||
def clientConnectionFailed(self, connector: IConnector, reason: Failure) -> None:
|
||||
logger.error("Failed to connect to replication: %r", reason)
|
||||
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
|
||||
|
||||
|
||||
class ReplicationDataHandler:
|
||||
"""Handles incoming stream updates from replication.
|
||||
|
||||
|
||||
@@ -422,6 +422,21 @@ class RemoteServerUpCommand(_SimpleCommand):
|
||||
NAME = "REMOTE_SERVER_UP"
|
||||
|
||||
|
||||
class ReadyToRefreshStaleUserDirectoryProfilesCommand(_SimpleCommand):
|
||||
"""
|
||||
Sent when a worker needs to tell the user directory worker that there are
|
||||
stale remote user profiles that require refreshing.
|
||||
|
||||
Triggered when the user directory background update has been completed.
|
||||
|
||||
Format::
|
||||
|
||||
USER_DIRECTORY_READY_TO_REFRESH_STALE_REMOTE_PROFILES ''
|
||||
"""
|
||||
|
||||
NAME = "USER_DIRECTORY_READY_TO_REFRESH_STALE_REMOTE_PROFILES"
|
||||
|
||||
|
||||
_COMMANDS: Tuple[Type[Command], ...] = (
|
||||
ServerCommand,
|
||||
RdataCommand,
|
||||
@@ -435,6 +450,7 @@ _COMMANDS: Tuple[Type[Command], ...] = (
|
||||
UserIpCommand,
|
||||
RemoteServerUpCommand,
|
||||
ClearUserSyncsCommand,
|
||||
ReadyToRefreshStaleUserDirectoryProfilesCommand,
|
||||
)
|
||||
|
||||
# Map of command name to command type.
|
||||
@@ -448,6 +464,7 @@ VALID_SERVER_COMMANDS = (
|
||||
ErrorCommand.NAME,
|
||||
PingCommand.NAME,
|
||||
RemoteServerUpCommand.NAME,
|
||||
ReadyToRefreshStaleUserDirectoryProfilesCommand.NAME,
|
||||
)
|
||||
|
||||
# The commands the client is allowed to send
|
||||
@@ -461,6 +478,7 @@ VALID_CLIENT_COMMANDS = (
|
||||
UserIpCommand.NAME,
|
||||
ErrorCommand.NAME,
|
||||
RemoteServerUpCommand.NAME,
|
||||
ReadyToRefreshStaleUserDirectoryProfilesCommand.NAME,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -138,10 +138,15 @@ class BackgroundUpdateStartJobRestServlet(RestServlet):
|
||||
"populate_user_directory_process_rooms",
|
||||
),
|
||||
(
|
||||
"populate_user_directory_cleanup",
|
||||
"populate_user_directory_process_remote_users",
|
||||
"{}",
|
||||
"populate_user_directory_process_users",
|
||||
),
|
||||
(
|
||||
"populate_user_directory_cleanup",
|
||||
"{}",
|
||||
"populate_user_directory_process_remote_users",
|
||||
),
|
||||
]
|
||||
else:
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, "Invalid job_name")
|
||||
|
||||
@@ -683,19 +683,18 @@ class AccountValidityRenewServlet(RestServlet):
|
||||
PATTERNS = admin_patterns("/account_validity/validity$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.account_activity_handler = hs.get_account_validity_handler()
|
||||
self.account_validity_handler = hs.get_account_validity_handler()
|
||||
self.account_validity_module_callbacks = (
|
||||
hs.get_module_api_callbacks().account_validity
|
||||
)
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
if self.account_activity_handler.on_legacy_admin_request_callback:
|
||||
expiration_ts = (
|
||||
await (
|
||||
self.account_activity_handler.on_legacy_admin_request_callback(
|
||||
request
|
||||
)
|
||||
)
|
||||
if self.account_validity_module_callbacks.on_legacy_admin_request_callback:
|
||||
expiration_ts = await self.account_validity_module_callbacks.on_legacy_admin_request_callback(
|
||||
request
|
||||
)
|
||||
else:
|
||||
body = parse_json_object_from_request(request)
|
||||
@@ -706,7 +705,7 @@ class AccountValidityRenewServlet(RestServlet):
|
||||
"Missing property 'user_id' in the request body",
|
||||
)
|
||||
|
||||
expiration_ts = await self.account_activity_handler.renew_account_for_user(
|
||||
expiration_ts = await self.account_validity_handler.renew_account_for_user(
|
||||
body["user_id"],
|
||||
body.get("expiration_ts"),
|
||||
not body.get("enable_renewal_emails", True),
|
||||
|
||||
@@ -23,6 +23,8 @@ import functools
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, TypeVar, cast
|
||||
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from twisted.internet.interfaces import IOpenSSLContextFactory
|
||||
from twisted.internet.tcp import Port
|
||||
from twisted.web.iweb import IPolicyForHTTPS
|
||||
@@ -108,6 +110,7 @@ from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||
from synapse.media.media_repository import MediaRepository
|
||||
from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.module_api.callbacks import ModuleApiCallbacks
|
||||
from synapse.notifier import Notifier, ReplicationNotifier
|
||||
from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator
|
||||
from synapse.push.pusherpool import PusherPool
|
||||
@@ -142,10 +145,31 @@ if TYPE_CHECKING:
|
||||
from synapse.handlers.saml import SamlHandler
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
# The annotation for `cache_in_self` used to be
|
||||
# def (builder: Callable[["HomeServer"],T]) -> Callable[["HomeServer"],T]
|
||||
# which mypy was happy with.
|
||||
#
|
||||
# But PyCharm was confused by this. If `foo` was decorated by `@cache_in_self`, then
|
||||
# an expression like `hs.foo()`
|
||||
#
|
||||
# - would erroneously warn that we hadn't provided a `hs` argument to foo (PyCharm
|
||||
# confused about boundmethods and unbound methods?), and
|
||||
# - would be considered to have type `Any`, making for a poor autocomplete and
|
||||
# cross-referencing experience.
|
||||
#
|
||||
# Instead, use a typevar `F` to express that `@cache_in_self` returns exactly the
|
||||
# same type it receives. This isn't strictly true [*], but it's more than good
|
||||
# enough to keep PyCharm and mypy happy.
|
||||
#
|
||||
# [*]: (e.g. `builder` could be an object with a __call__ attribute rather than a
|
||||
# types.FunctionType instance, whereas the return value is always a
|
||||
# types.FunctionType instance.)
|
||||
|
||||
T: TypeAlias = object
|
||||
F = TypeVar("F", bound=Callable[["HomeServer"], T])
|
||||
|
||||
|
||||
def cache_in_self(builder: Callable[["HomeServer"], T]) -> Callable[["HomeServer"], T]:
|
||||
def cache_in_self(builder: F) -> F:
|
||||
"""Wraps a function called e.g. `get_foo`, checking if `self.foo` exists and
|
||||
returning if so. If not, calls the given function and sets `self.foo` to it.
|
||||
|
||||
@@ -183,7 +207,7 @@ def cache_in_self(builder: Callable[["HomeServer"], T]) -> Callable[["HomeServer
|
||||
|
||||
return dep
|
||||
|
||||
return _get
|
||||
return cast(F, _get)
|
||||
|
||||
|
||||
class HomeServer(metaclass=abc.ABCMeta):
|
||||
@@ -777,6 +801,10 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
def get_module_api(self) -> ModuleApi:
|
||||
return ModuleApi(self, self.get_auth_handler())
|
||||
|
||||
@cache_in_self
|
||||
def get_module_api_callbacks(self) -> ModuleApiCallbacks:
|
||||
return ModuleApiCallbacks()
|
||||
|
||||
@cache_in_self
|
||||
def get_account_data_handler(self) -> AccountDataHandler:
|
||||
return AccountDataHandler(self)
|
||||
|
||||
@@ -16,6 +16,7 @@ import itertools
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Set
|
||||
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.storage.databases import Databases
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -33,8 +34,9 @@ class PurgeEventsStorageController:
|
||||
async def purge_room(self, room_id: str) -> None:
|
||||
"""Deletes all record of a room"""
|
||||
|
||||
state_groups_to_delete = await self.stores.main.purge_room(room_id)
|
||||
await self.stores.state.purge_room_state(room_id, state_groups_to_delete)
|
||||
with nested_logging_context(room_id):
|
||||
state_groups_to_delete = await self.stores.main.purge_room(room_id)
|
||||
await self.stores.state.purge_room_state(room_id, state_groups_to_delete)
|
||||
|
||||
async def purge_history(
|
||||
self, room_id: str, token: str, delete_local_events: bool
|
||||
@@ -51,15 +53,17 @@ class PurgeEventsStorageController:
|
||||
(instead of just marking them as outliers and deleting their
|
||||
state groups).
|
||||
"""
|
||||
state_groups = await self.stores.main.purge_history(
|
||||
room_id, token, delete_local_events
|
||||
)
|
||||
with nested_logging_context(room_id):
|
||||
state_groups = await self.stores.main.purge_history(
|
||||
room_id, token, delete_local_events
|
||||
)
|
||||
|
||||
logger.info("[purge] finding state groups that can be deleted")
|
||||
logger.info("[purge] finding state groups that can be deleted")
|
||||
sg_to_delete = await self._find_unreferenced_groups(state_groups)
|
||||
|
||||
sg_to_delete = await self._find_unreferenced_groups(state_groups)
|
||||
|
||||
await self.stores.state.purge_unreferenced_state_groups(room_id, sg_to_delete)
|
||||
await self.stores.state.purge_unreferenced_state_groups(
|
||||
room_id, sg_to_delete
|
||||
)
|
||||
|
||||
async def _find_unreferenced_groups(self, state_groups: Set[int]) -> Set[int]:
|
||||
"""Used when purging history to figure out which state groups can be
|
||||
|
||||
@@ -34,6 +34,7 @@ from typing import (
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
@@ -100,6 +101,15 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
|
||||
}
|
||||
|
||||
|
||||
class _PoolConnection(Connection):
|
||||
"""
|
||||
A Connection from twisted.enterprise.adbapi.Connection.
|
||||
"""
|
||||
|
||||
def reconnect(self) -> None:
|
||||
...
|
||||
|
||||
|
||||
def make_pool(
|
||||
reactor: IReactorCore,
|
||||
db_config: DatabaseConnectionConfig,
|
||||
@@ -856,7 +866,8 @@ class DatabasePool:
|
||||
try:
|
||||
with opentracing.start_active_span(f"db.{desc}"):
|
||||
result = await self.runWithConnection(
|
||||
self.new_transaction,
|
||||
# mypy seems to have an issue with this, maybe a bug?
|
||||
self.new_transaction, # type: ignore[arg-type]
|
||||
desc,
|
||||
after_callbacks,
|
||||
async_after_callbacks,
|
||||
@@ -892,7 +903,7 @@ class DatabasePool:
|
||||
|
||||
async def runWithConnection(
|
||||
self,
|
||||
func: Callable[..., R],
|
||||
func: Callable[Concatenate[LoggingDatabaseConnection, P], R],
|
||||
*args: Any,
|
||||
db_autocommit: bool = False,
|
||||
isolation_level: Optional[int] = None,
|
||||
@@ -926,7 +937,7 @@ class DatabasePool:
|
||||
|
||||
start_time = monotonic_time()
|
||||
|
||||
def inner_func(conn, *args, **kwargs):
|
||||
def inner_func(conn: _PoolConnection, *args: P.args, **kwargs: P.kwargs) -> R:
|
||||
# We shouldn't be in a transaction. If we are then something
|
||||
# somewhere hasn't committed after doing work. (This is likely only
|
||||
# possible during startup, as `run*` will ensure changes are
|
||||
@@ -1019,7 +1030,7 @@ class DatabasePool:
|
||||
decoder: Optional[Callable[[Cursor], R]],
|
||||
query: str,
|
||||
*args: Any,
|
||||
) -> R:
|
||||
) -> Union[List[Tuple[Any, ...]], R]:
|
||||
"""Runs a single query for a result set.
|
||||
|
||||
Args:
|
||||
@@ -1032,7 +1043,7 @@ class DatabasePool:
|
||||
The result of decoder(results)
|
||||
"""
|
||||
|
||||
def interaction(txn):
|
||||
def interaction(txn: LoggingTransaction) -> Union[List[Tuple[Any, ...]], R]:
|
||||
txn.execute(query, args)
|
||||
if decoder:
|
||||
return decoder(txn)
|
||||
|
||||
@@ -325,6 +325,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
# We then run the same purge a second time without this isolation level to
|
||||
# purge any of those rows which were added during the first.
|
||||
|
||||
logger.info("[purge] Starting initial main purge of [1/2]")
|
||||
state_groups_to_delete = await self.db_pool.runInteraction(
|
||||
"purge_room",
|
||||
self._purge_room_txn,
|
||||
@@ -332,6 +333,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
isolation_level=IsolationLevel.READ_COMMITTED,
|
||||
)
|
||||
|
||||
logger.info("[purge] Starting secondary main purge of [2/2]")
|
||||
state_groups_to_delete.extend(
|
||||
await self.db_pool.runInteraction(
|
||||
"purge_room",
|
||||
@@ -339,6 +341,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
room_id=room_id,
|
||||
),
|
||||
)
|
||||
logger.info("[purge] Done with main purge")
|
||||
|
||||
return state_groups_to_delete
|
||||
|
||||
@@ -376,7 +379,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
)
|
||||
referenced_chain_id_tuples = list(txn)
|
||||
|
||||
logger.info("[purge] removing events from event_auth_chain_links")
|
||||
logger.info("[purge] removing from event_auth_chain_links")
|
||||
txn.executemany(
|
||||
"""
|
||||
DELETE FROM event_auth_chain_links WHERE
|
||||
@@ -399,7 +402,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
"rejections",
|
||||
"state_events",
|
||||
):
|
||||
logger.info("[purge] removing %s from %s", room_id, table)
|
||||
logger.info("[purge] removing from %s", table)
|
||||
|
||||
txn.execute(
|
||||
"""
|
||||
@@ -454,7 +457,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
# happy
|
||||
"rooms",
|
||||
):
|
||||
logger.info("[purge] removing %s from %s", room_id, table)
|
||||
logger.info("[purge] removing from %s", table)
|
||||
txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,))
|
||||
|
||||
# Other tables we do NOT need to clear out:
|
||||
@@ -486,6 +489,4 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||
# that already exist.
|
||||
self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,))
|
||||
|
||||
logger.info("[purge] done")
|
||||
|
||||
return state_groups
|
||||
|
||||
@@ -224,7 +224,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"set_destination_retry_timings",
|
||||
self._set_destination_retry_timings_native,
|
||||
self._set_destination_retry_timings_txn,
|
||||
destination,
|
||||
failure_ts,
|
||||
retry_last_ts,
|
||||
@@ -232,7 +232,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
|
||||
db_autocommit=True, # Safe as it's a single upsert
|
||||
)
|
||||
|
||||
def _set_destination_retry_timings_native(
|
||||
def _set_destination_retry_timings_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
destination: str,
|
||||
@@ -266,58 +266,6 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
|
||||
txn, self.get_destination_retry_timings, (destination,)
|
||||
)
|
||||
|
||||
def _set_destination_retry_timings_emulated(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
destination: str,
|
||||
failure_ts: Optional[int],
|
||||
retry_last_ts: int,
|
||||
retry_interval: int,
|
||||
) -> None:
|
||||
self.database_engine.lock_table(txn, "destinations")
|
||||
|
||||
# We need to be careful here as the data may have changed from under us
|
||||
# due to a worker setting the timings.
|
||||
|
||||
prev_row = self.db_pool.simple_select_one_txn(
|
||||
txn,
|
||||
table="destinations",
|
||||
keyvalues={"destination": destination},
|
||||
retcols=("failure_ts", "retry_last_ts", "retry_interval"),
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
if not prev_row:
|
||||
self.db_pool.simple_insert_txn(
|
||||
txn,
|
||||
table="destinations",
|
||||
values={
|
||||
"destination": destination,
|
||||
"failure_ts": failure_ts,
|
||||
"retry_last_ts": retry_last_ts,
|
||||
"retry_interval": retry_interval,
|
||||
},
|
||||
)
|
||||
elif (
|
||||
retry_interval == 0
|
||||
or prev_row["retry_interval"] is None
|
||||
or prev_row["retry_interval"] < retry_interval
|
||||
):
|
||||
self.db_pool.simple_update_one_txn(
|
||||
txn,
|
||||
"destinations",
|
||||
keyvalues={"destination": destination},
|
||||
updatevalues={
|
||||
"failure_ts": failure_ts,
|
||||
"retry_last_ts": retry_last_ts,
|
||||
"retry_interval": retry_interval,
|
||||
},
|
||||
)
|
||||
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_destination_retry_timings, (destination,)
|
||||
)
|
||||
|
||||
async def store_destination_rooms_entries(
|
||||
self,
|
||||
destinations: Iterable[str],
|
||||
|
||||
@@ -27,6 +27,10 @@ from typing import (
|
||||
cast,
|
||||
)
|
||||
|
||||
from synapse.replication.tcp.commands import (
|
||||
ReadyToRefreshStaleUserDirectoryProfilesCommand,
|
||||
)
|
||||
|
||||
try:
|
||||
# Figure out if ICU support is available for searching users.
|
||||
import icu
|
||||
@@ -54,6 +58,7 @@ from synapse.storage.databases.main.state_deltas import StateDeltasStore
|
||||
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
UserID,
|
||||
UserProfile,
|
||||
get_domain_from_id,
|
||||
get_localpart_from_id,
|
||||
@@ -90,17 +95,32 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
)
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
"populate_user_directory_process_users",
|
||||
self._populate_user_directory_process_users,
|
||||
self._populate_user_directory_process_local_users,
|
||||
)
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
"populate_user_directory_process_remote_users",
|
||||
self._populate_user_directory_process_remote_users,
|
||||
)
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
"populate_user_directory_cleanup", self._populate_user_directory_cleanup
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _delete_staging_area(txn: LoggingTransaction) -> None:
|
||||
txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_rooms")
|
||||
txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_users")
|
||||
txn.execute(
|
||||
"DROP TABLE IF EXISTS " + TEMP_TABLE + "_remote_users_needing_lookup"
|
||||
)
|
||||
txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position")
|
||||
|
||||
async def _populate_user_directory_createtables(
|
||||
self, progress: JsonDict, batch_size: int
|
||||
) -> int:
|
||||
# Get all the rooms that we want to process.
|
||||
def _make_staging_area(txn: LoggingTransaction) -> None:
|
||||
# Clear out any tables if they already exist beforehand.
|
||||
UserDirectoryBackgroundUpdateStore._delete_staging_area(txn)
|
||||
|
||||
sql = (
|
||||
"CREATE TABLE IF NOT EXISTS "
|
||||
+ TEMP_TABLE
|
||||
@@ -141,6 +161,18 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
txn, TEMP_TABLE + "_users", keys=("user_id",), values=users
|
||||
)
|
||||
|
||||
# A table for storing a list of remote users that *may* need a remote
|
||||
# lookup in order to obtain a public profile.
|
||||
# The list should be compared against the user directory's cache
|
||||
# to see whether any queries can be skipped because the remote user
|
||||
# also appeared in a public room.
|
||||
sql = (
|
||||
"CREATE TABLE IF NOT EXISTS "
|
||||
+ TEMP_TABLE
|
||||
+ "_remote_users_needing_lookup(user_id TEXT PRIMARY KEY NOT NULL)"
|
||||
)
|
||||
txn.execute(sql)
|
||||
|
||||
new_pos = await self.get_max_stream_id_in_current_state_deltas()
|
||||
await self.db_pool.runInteraction(
|
||||
"populate_user_directory_temp_build", _make_staging_area
|
||||
@@ -167,13 +199,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
)
|
||||
await self.update_user_directory_stream_pos(position)
|
||||
|
||||
def _delete_staging_area(txn: LoggingTransaction) -> None:
|
||||
txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_rooms")
|
||||
txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_users")
|
||||
txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position")
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"populate_user_directory_cleanup", _delete_staging_area
|
||||
"populate_user_directory_cleanup",
|
||||
UserDirectoryBackgroundUpdateStore._delete_staging_area,
|
||||
)
|
||||
|
||||
await self.db_pool.updates._end_background_update(
|
||||
@@ -261,10 +289,17 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
or await self.should_include_local_user_in_dir(user_id)
|
||||
}
|
||||
|
||||
# Determine whether the room is public
|
||||
is_public = await self.is_room_world_readable_or_publicly_joinable(
|
||||
room_id
|
||||
)
|
||||
|
||||
remote_users_to_query_later = set()
|
||||
|
||||
# Upsert a user_directory record for each remote user we see.
|
||||
for user_id, profile in users_with_profile.items():
|
||||
# Local users are processed separately in
|
||||
# `_populate_user_directory_users`; there we can read from
|
||||
# `_populate_user_directory_local_users`; there we can read from
|
||||
# the `profiles` table to ensure we don't leak their per-room
|
||||
# profiles. It also means we write local users to this table
|
||||
# exactly once, rather than once for every room they're in.
|
||||
@@ -273,14 +308,29 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
# TODO `users_with_profile` above reads from the `user_directory`
|
||||
# table, meaning that `profile` is bespoke to this room.
|
||||
# and this leaks remote users' per-room profiles to the user directory.
|
||||
await self.update_profile_in_user_dir(
|
||||
user_id, profile.display_name, profile.avatar_url
|
||||
)
|
||||
if is_public:
|
||||
# If this is a public room, it's acceptable to add the profile
|
||||
# into the user directory.
|
||||
await self.update_profile_in_user_dir(
|
||||
user_id, profile.display_name, profile.avatar_url
|
||||
)
|
||||
else:
|
||||
# Otherwise query the user at a later time
|
||||
remote_users_to_query_later.add(user_id)
|
||||
|
||||
# (insert the remote users needing a query in batch;
|
||||
# use upsert with no values for 'INSERT OR IGNORE' semantics)
|
||||
await self.db_pool.simple_upsert_many(
|
||||
f"{TEMP_TABLE}_remote_users_needing_lookup",
|
||||
("user_id",),
|
||||
[(u,) for u in remote_users_to_query_later],
|
||||
(),
|
||||
(),
|
||||
desc="populate_user_directory_queue_remote_needing_lookup",
|
||||
)
|
||||
del remote_users_to_query_later
|
||||
|
||||
# Now update the room sharing tables to include this room.
|
||||
is_public = await self.is_room_world_readable_or_publicly_joinable(
|
||||
room_id
|
||||
)
|
||||
if is_public:
|
||||
if users_with_profile:
|
||||
await self.add_users_in_public_rooms(
|
||||
@@ -335,7 +385,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
|
||||
return processed_event_count
|
||||
|
||||
async def _populate_user_directory_process_users(
|
||||
async def _populate_user_directory_process_local_users(
|
||||
self, progress: JsonDict, batch_size: int
|
||||
) -> int:
|
||||
"""
|
||||
@@ -403,6 +453,114 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
|
||||
return len(users_to_work_on)
|
||||
|
||||
async def _populate_user_directory_process_remote_users(
|
||||
self, progress: JsonDict, batch_size: int
|
||||
) -> int:
|
||||
"""
|
||||
Sorts through the `_remote_users_needing_lookup` table and adds the
|
||||
users within to the list of stale remote profiles,
|
||||
unless we already populated a user directory entry for them (i.e. they were
|
||||
also in a public room).
|
||||
"""
|
||||
|
||||
def _get_next_batch_txn(
|
||||
txn: LoggingTransaction, done_up_to_user_id: str
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Given the last user ID we've processed,
|
||||
Returns
|
||||
- a user ID to process up to and including; or
|
||||
- `None` if there is no limit left (i.e. we should just process all
|
||||
remaining rows).
|
||||
"""
|
||||
# Should be a B-Tree index only scan: so reasonably efficient despite the
|
||||
# OFFSET
|
||||
# If we're lucky, will also warm up the disk cache for the subsequent query
|
||||
# that actually does some work.
|
||||
txn.execute(
|
||||
f"""
|
||||
SELECT user_id
|
||||
FROM {TEMP_TABLE}_remote_users_needing_lookup
|
||||
WHERE user_id > ?
|
||||
ORDER BY user_id
|
||||
LIMIT 1 OFFSET ?
|
||||
""",
|
||||
(done_up_to_user_id, batch_size),
|
||||
)
|
||||
row = txn.fetchone()
|
||||
if row:
|
||||
return row[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def _add_private_only_users_to_stale_profile_refresh_queue_txn(
|
||||
txn: LoggingTransaction, from_exc: str, until_inc: Optional[str]
|
||||
) -> None:
|
||||
end_condition = "AND user_id <= ?" if until_inc is not None else ""
|
||||
end_args = (until_inc,) if until_inc is not None else ()
|
||||
|
||||
user_id_serverpart: str
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
user_id_serverpart = (
|
||||
"SUBSTRING(user_id FROM POSITION(':' IN user_id) + 1)"
|
||||
)
|
||||
elif isinstance(self.database_engine, Sqlite3Engine):
|
||||
user_id_serverpart = "SUBSTR(user_id, INSTR(user_id, ':') + 1)"
|
||||
else:
|
||||
raise RuntimeError("Unknown database engine!")
|
||||
|
||||
txn.execute(
|
||||
f"""
|
||||
INSERT INTO user_directory_stale_remote_users
|
||||
(user_id, next_try_at_ts, retry_counter, user_server_name)
|
||||
SELECT
|
||||
user_id, 0, 0, {user_id_serverpart}
|
||||
FROM {TEMP_TABLE}_remote_users_needing_lookup AS runl
|
||||
LEFT JOIN user_directory AS ud USING (user_id)
|
||||
WHERE ud.user_id IS NULL
|
||||
AND ? < user_id {end_condition}
|
||||
""",
|
||||
(from_exc,) + end_args,
|
||||
)
|
||||
|
||||
def _do_txn(txn: LoggingTransaction) -> None:
|
||||
"""
|
||||
Does a step of background update.
|
||||
"""
|
||||
last_user_id = progress.get("last_user_id", "@")
|
||||
next_end_limit_inc = _get_next_batch_txn(txn, last_user_id)
|
||||
_add_private_only_users_to_stale_profile_refresh_queue_txn(
|
||||
txn, last_user_id, next_end_limit_inc
|
||||
)
|
||||
|
||||
# Update the progress
|
||||
progress["last_user_id"] = next_end_limit_inc
|
||||
self.db_pool.updates._background_update_progress_txn(
|
||||
txn, "populate_user_directory_process_remote_users", progress
|
||||
)
|
||||
|
||||
if progress.get("last_user_id", "@") is None:
|
||||
await self.db_pool.updates._end_background_update(
|
||||
"populate_user_directory_process_remote_users"
|
||||
)
|
||||
|
||||
# Now kick off querying remote homeservers for profile information.
|
||||
if self.hs.config.worker.should_update_user_directory:
|
||||
self.hs.get_user_directory_handler().kick_off_remote_profile_refresh_process()
|
||||
else:
|
||||
command_handler = self.hs.get_replication_command_handler()
|
||||
command_handler.send_command(
|
||||
ReadyToRefreshStaleUserDirectoryProfilesCommand("")
|
||||
)
|
||||
|
||||
return 1
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"populate_user_directory_process_remote_users",
|
||||
_do_txn,
|
||||
)
|
||||
return batch_size
|
||||
|
||||
async def should_include_local_user_in_dir(self, user: str) -> bool:
|
||||
"""Certain classes of local user are omitted from the user directory.
|
||||
Is this user one of them?
|
||||
@@ -473,11 +631,116 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
|
||||
return False
|
||||
|
||||
async def set_remote_user_profile_in_user_dir_stale(
|
||||
self, user_id: str, next_try_at_ms: int, retry_counter: int
|
||||
) -> None:
|
||||
"""
|
||||
Marks a remote user as having a possibly-stale user directory profile.
|
||||
|
||||
Args:
|
||||
user_id: the remote user who may have a stale profile on this server.
|
||||
next_try_at_ms: timestamp in ms after which the user directory profile can be
|
||||
refreshed.
|
||||
retry_counter: number of failures in refreshing the profile so far. Used for
|
||||
exponential backoff calculations.
|
||||
"""
|
||||
assert not self.hs.is_mine_id(
|
||||
user_id
|
||||
), "Can't mark a local user as a stale remote user."
|
||||
|
||||
server_name = UserID.from_string(user_id).domain
|
||||
|
||||
await self.db_pool.simple_upsert(
|
||||
table="user_directory_stale_remote_users",
|
||||
keyvalues={"user_id": user_id},
|
||||
values={
|
||||
"next_try_at_ts": next_try_at_ms,
|
||||
"retry_counter": retry_counter,
|
||||
"user_server_name": server_name,
|
||||
},
|
||||
desc="set_remote_user_profile_in_user_dir_stale",
|
||||
)
|
||||
|
||||
async def clear_remote_user_profile_in_user_dir_stale(self, user_id: str) -> None:
|
||||
"""
|
||||
Marks a remote user as no longer having a possibly-stale user directory profile.
|
||||
|
||||
Args:
|
||||
user_id: the remote user who no longer has a stale profile on this server.
|
||||
"""
|
||||
await self.db_pool.simple_delete(
|
||||
table="user_directory_stale_remote_users",
|
||||
keyvalues={"user_id": user_id},
|
||||
desc="clear_remote_user_profile_in_user_dir_stale",
|
||||
)
|
||||
|
||||
async def get_remote_servers_with_profiles_to_refresh(
|
||||
self, now_ts: int, limit: int
|
||||
) -> List[str]:
|
||||
"""
|
||||
Get a list of up to `limit` server names which have users whose
|
||||
locally-cached profiles we believe to be stale
|
||||
and are refreshable given the current time `now_ts` in milliseconds.
|
||||
"""
|
||||
|
||||
def _get_remote_servers_with_refreshable_profiles_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> List[str]:
|
||||
sql = """
|
||||
SELECT user_server_name
|
||||
FROM user_directory_stale_remote_users
|
||||
WHERE next_try_at_ts < ?
|
||||
GROUP BY user_server_name
|
||||
ORDER BY MIN(next_try_at_ts), user_server_name
|
||||
LIMIT ?
|
||||
"""
|
||||
txn.execute(sql, (now_ts, limit))
|
||||
return [row[0] for row in txn]
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_remote_servers_with_profiles_to_refresh",
|
||||
_get_remote_servers_with_refreshable_profiles_txn,
|
||||
)
|
||||
|
||||
async def get_remote_users_to_refresh_on_server(
|
||||
self, server_name: str, now_ts: int, limit: int
|
||||
) -> List[Tuple[str, int, int]]:
|
||||
"""
|
||||
Get a list of up to `limit` user IDs from the server `server_name`
|
||||
whose locally-cached profiles we believe to be stale
|
||||
and are refreshable given the current time `now_ts` in milliseconds.
|
||||
|
||||
Returns:
|
||||
tuple of:
|
||||
- User ID
|
||||
- Retry counter (number of failures so far)
|
||||
- Time the retry is scheduled for, in milliseconds
|
||||
"""
|
||||
|
||||
def _get_remote_users_to_refresh_on_server_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> List[Tuple[str, int, int]]:
|
||||
sql = """
|
||||
SELECT user_id, retry_counter, next_try_at_ts
|
||||
FROM user_directory_stale_remote_users
|
||||
WHERE user_server_name = ? AND next_try_at_ts < ?
|
||||
ORDER BY next_try_at_ts
|
||||
LIMIT ?
|
||||
"""
|
||||
txn.execute(sql, (server_name, now_ts, limit))
|
||||
return cast(List[Tuple[str, int, int]], txn.fetchall())
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_remote_users_to_refresh_on_server",
|
||||
_get_remote_users_to_refresh_on_server_txn,
|
||||
)
|
||||
|
||||
async def update_profile_in_user_dir(
|
||||
self, user_id: str, display_name: Optional[str], avatar_url: Optional[str]
|
||||
) -> None:
|
||||
"""
|
||||
Update or add a user's profile in the user directory.
|
||||
If the user is remote, the profile will be marked as not stale.
|
||||
"""
|
||||
# If the display name or avatar URL are unexpected types, replace with None.
|
||||
display_name = non_null_str_or_none(display_name)
|
||||
@@ -491,6 +754,14 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
values={"display_name": display_name, "avatar_url": avatar_url},
|
||||
)
|
||||
|
||||
if not self.hs.is_mine_id(user_id):
|
||||
# Remote users: Make sure the profile is not marked as stale anymore.
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
table="user_directory_stale_remote_users",
|
||||
keyvalues={"user_id": user_id},
|
||||
)
|
||||
|
||||
# The display name that goes into the database index.
|
||||
index_display_name = display_name
|
||||
if index_display_name is not None:
|
||||
|
||||
@@ -805,12 +805,14 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
|
||||
state_groups_to_delete: State groups to delete
|
||||
"""
|
||||
|
||||
logger.info("[purge] Starting state purge")
|
||||
await self.db_pool.runInteraction(
|
||||
"purge_room_state",
|
||||
self._purge_room_state_txn,
|
||||
room_id,
|
||||
state_groups_to_delete,
|
||||
)
|
||||
logger.info("[purge] Done with state purge")
|
||||
|
||||
def _purge_room_state_txn(
|
||||
self,
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
/* Copyright 2022 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- Table containing a list of remote users whose profiles may have changed
|
||||
-- since their last update in the user directory.
|
||||
CREATE TABLE user_directory_stale_remote_users (
|
||||
-- The User ID of the remote user whose profile may be stale.
|
||||
user_id TEXT NOT NULL PRIMARY KEY,
|
||||
|
||||
-- The server name of the user.
|
||||
user_server_name TEXT NOT NULL,
|
||||
|
||||
-- The timestamp (in ms) after which we should next try to request the user's
|
||||
-- latest profile.
|
||||
next_try_at_ts BIGINT NOT NULL,
|
||||
|
||||
-- The number of retries so far.
|
||||
-- 0 means we have not yet attempted to refresh the profile.
|
||||
-- Used for calculating exponential backoff.
|
||||
retry_counter INTEGER NOT NULL
|
||||
);
|
||||
|
||||
-- Create an index so we can easily query upcoming servers to try.
|
||||
CREATE INDEX user_directory_stale_remote_users_next_try_idx ON user_directory_stale_remote_users(next_try_at_ts, user_server_name);
|
||||
|
||||
-- Create an index so we can easily query upcoming users to try for a particular server.
|
||||
CREATE INDEX user_directory_stale_remote_users_next_try_by_server_idx ON user_directory_stale_remote_users(user_server_name, next_try_at_ts);
|
||||
@@ -0,0 +1,40 @@
|
||||
/* Copyright 2023 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- Rebuild the user directory in light of the fix for leaking the per-room
|
||||
-- profiles of remote users to the user directory.
|
||||
|
||||
-- First cancel any existing rebuilds if already pending; we'll run from fresh.
|
||||
DELETE FROM background_updates WHERE update_name IN (
|
||||
'populate_user_directory_createtables',
|
||||
'populate_user_directory_process_rooms',
|
||||
'populate_user_directory_process_users',
|
||||
'populate_user_directory_process_remote_users',
|
||||
'populate_user_directory_cleanup'
|
||||
);
|
||||
|
||||
-- Then schedule the steps.
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
|
||||
-- Set up user directory staging tables.
|
||||
(7402, 'populate_user_directory_createtables', '{}', NULL),
|
||||
-- Run through each room and update the user directory according to who is in it.
|
||||
(7402, 'populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables'),
|
||||
-- Insert all users into the user directory, if search_all_users is on.
|
||||
(7402, 'populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms'),
|
||||
-- Insert remote users into the queue for fetching.
|
||||
(7402, 'populate_user_directory_process_remote_users', '{}', 'populate_user_directory_process_users'),
|
||||
-- Clean up user directory staging tables.
|
||||
(7402, 'populate_user_directory_cleanup', '{}', 'populate_user_directory_process_remote_users')
|
||||
ON CONFLICT (update_name) DO NOTHING;
|
||||
@@ -0,0 +1,52 @@
|
||||
/* Copyright 2023 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
--- destinations
|
||||
COMMENT ON TABLE destinations IS
|
||||
'Information about remote homeservers and the health of our connection to them.';
|
||||
|
||||
COMMENT ON COLUMN destinations.destination IS 'server name of remote homeserver in question';
|
||||
|
||||
COMMENT ON COLUMN destinations.last_successful_stream_ordering IS
|
||||
$$Stream ordering of the most recently successfully sent PDU to this server, sent through normal send (not e.g. backfill).
|
||||
In Catch-Up Mode, the original PDU persisted by us is represented here, even if we sent a later forward extremity in its stead.
|
||||
See `destination_rooms` for more information about catch-up.$$;
|
||||
|
||||
COMMENT ON COLUMN destinations.retry_last_ts IS
|
||||
$$The last time we tried and failed to reach the remote server, in ms.
|
||||
This field is reset to `0` when we succeed in connecting again.$$;
|
||||
|
||||
COMMENT ON COLUMN destinations.retry_interval IS
|
||||
$$How long, in milliseconds, to wait since the last time we tried to reach the remote server before trying again.
|
||||
This field is reset to `0` when we succeed in connecting again.$$;
|
||||
|
||||
COMMENT ON COLUMN destinations.failure_ts IS
|
||||
$$The first time we tried and failed to reach the remote server, in ms.
|
||||
This field is reset to `NULL` when we succeed in connecting again.$$;
|
||||
|
||||
|
||||
|
||||
--- destination_rooms
|
||||
COMMENT ON TABLE destination_rooms IS
|
||||
'Information about transmission of PDUs in a given room to a given remote homeserver.';
|
||||
|
||||
COMMENT ON COLUMN destination_rooms.destination IS 'server name of remote homeserver in question';
|
||||
|
||||
COMMENT ON COLUMN destination_rooms.room_id IS 'room ID in question';
|
||||
|
||||
COMMENT ON COLUMN destination_rooms.stream_ordering IS
|
||||
$$`stream_ordering` of the most recent PDU in this room that needs to be sent (by us) to this homeserver.
|
||||
This can only be pointing to our own PDU because we are only responsible for sending our own PDUs.$$;
|
||||
@@ -14,7 +14,17 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from enum import Enum, auto
|
||||
from typing import Collection, Dict, FrozenSet, List, Optional, Tuple
|
||||
from typing import (
|
||||
Collection,
|
||||
Dict,
|
||||
FrozenSet,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
import attr
|
||||
from typing_extensions import Final
|
||||
@@ -565,29 +575,43 @@ async def filter_events_for_server(
|
||||
storage: StorageControllers,
|
||||
target_server_name: str,
|
||||
local_server_name: str,
|
||||
events: List[EventBase],
|
||||
redact: bool = True,
|
||||
check_history_visibility_only: bool = False,
|
||||
events: Sequence[EventBase],
|
||||
*,
|
||||
redact: bool,
|
||||
filter_out_erased_senders: bool,
|
||||
filter_out_remote_partial_state_events: bool,
|
||||
) -> List[EventBase]:
|
||||
"""Filter a list of events based on whether given server is allowed to
|
||||
"""Filter a list of events based on whether the target server is allowed to
|
||||
see them.
|
||||
|
||||
For a fully stated room, the target server is allowed to see an event E if:
|
||||
- the state at E has world readable or shared history vis, OR
|
||||
- the state at E says that the target server is in the room.
|
||||
|
||||
For a partially stated room, the target server is allowed to see E if:
|
||||
- E was created by this homeserver, AND:
|
||||
- the partial state at E has world readable or shared history vis, OR
|
||||
- the partial state at E says that the target server is in the room.
|
||||
|
||||
TODO: state before or state after?
|
||||
|
||||
Args:
|
||||
storage
|
||||
server_name
|
||||
target_server_name
|
||||
local_server_name
|
||||
events
|
||||
redact: Whether to return a redacted version of the event, or
|
||||
to filter them out entirely.
|
||||
check_history_visibility_only: Whether to only check the
|
||||
history visibility, rather than things like if the sender has been
|
||||
redact: Controls what to do with events which have been filtered out.
|
||||
If True, include their redacted forms; if False, omit them entirely.
|
||||
filter_out_erased_senders: If true, also filter out events whose sender has been
|
||||
erased. This is used e.g. during pagination to decide whether to
|
||||
backfill or not.
|
||||
|
||||
filter_out_remote_partial_state_events: If True, also filter out events in
|
||||
partial state rooms created by other homeservers.
|
||||
Returns
|
||||
The filtered events.
|
||||
"""
|
||||
|
||||
def is_sender_erased(event: EventBase, erased_senders: Dict[str, bool]) -> bool:
|
||||
def is_sender_erased(event: EventBase, erased_senders: Mapping[str, bool]) -> bool:
|
||||
if erased_senders and erased_senders[event.sender]:
|
||||
logger.info("Sender of %s has been erased, redacting", event.event_id)
|
||||
return True
|
||||
@@ -616,7 +640,7 @@ async def filter_events_for_server(
|
||||
# server has no users in the room: redact
|
||||
return False
|
||||
|
||||
if not check_history_visibility_only:
|
||||
if filter_out_erased_senders:
|
||||
erased_senders = await storage.main.are_users_erased(e.sender for e in events)
|
||||
else:
|
||||
# We don't want to check whether users are erased, which is equivalent
|
||||
@@ -631,15 +655,15 @@ async def filter_events_for_server(
|
||||
# otherwise a room could be fully joined after we retrieve those, which would then bypass
|
||||
# this check but would base the filtering on an outdated view of the membership events.
|
||||
|
||||
partial_state_invisible_events = set()
|
||||
if not check_history_visibility_only:
|
||||
partial_state_invisible_event_ids: Set[str] = set()
|
||||
if filter_out_remote_partial_state_events:
|
||||
for e in events:
|
||||
sender_domain = get_domain_from_id(e.sender)
|
||||
if (
|
||||
sender_domain != local_server_name
|
||||
and await storage.main.is_partial_state_room(e.room_id)
|
||||
):
|
||||
partial_state_invisible_events.add(e)
|
||||
partial_state_invisible_event_ids.add(e.event_id)
|
||||
|
||||
# Let's check to see if all the events have a history visibility
|
||||
# of "shared" or "world_readable". If that's the case then we don't
|
||||
@@ -658,17 +682,20 @@ async def filter_events_for_server(
|
||||
target_server_name,
|
||||
)
|
||||
|
||||
to_return = []
|
||||
for e in events:
|
||||
def include_event_in_output(e: EventBase) -> bool:
|
||||
erased = is_sender_erased(e, erased_senders)
|
||||
visible = check_event_is_visible(
|
||||
event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {})
|
||||
)
|
||||
|
||||
if e in partial_state_invisible_events:
|
||||
if e.event_id in partial_state_invisible_event_ids:
|
||||
visible = False
|
||||
|
||||
if visible and not erased:
|
||||
return visible and not erased
|
||||
|
||||
to_return = []
|
||||
for e in events:
|
||||
if include_event_in_output(e):
|
||||
to_return.append(e)
|
||||
elif redact:
|
||||
to_return.append(prune_event(e))
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from typing import Callable, List, Optional, Tuple
|
||||
from typing import Callable, Collection, List, Optional, Tuple
|
||||
from unittest import mock
|
||||
from unittest.mock import Mock
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
@@ -500,3 +501,87 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
|
||||
self.assertEqual(len(sent_pdus), 1)
|
||||
self.assertEqual(sent_pdus[0].event_id, event_2.event_id)
|
||||
self.assertFalse(per_dest_queue._catching_up)
|
||||
|
||||
def test_catch_up_is_not_blocked_by_remote_event_in_partial_state_room(
|
||||
self,
|
||||
) -> None:
|
||||
"""Detects (part of?) https://github.com/matrix-org/synapse/issues/15220."""
|
||||
# ARRANGE:
|
||||
# - a local user (u1)
|
||||
# - a room which contains u1 and two remote users, @u2:host2 and @u3:other
|
||||
# - events in that room such that
|
||||
# - history visibility is restricted
|
||||
# - u1 sent message events e1 and e2
|
||||
# - afterwards, u3 sent a remote event e3
|
||||
# - catchup to begin for host2; last successfully sent event was e1
|
||||
per_dest_queue, sent_pdus = self.make_fake_destination_queue()
|
||||
|
||||
self.register_user("u1", "you the one")
|
||||
u1_token = self.login("u1", "you the one")
|
||||
room = self.helper.create_room_as("u1", tok=u1_token)
|
||||
self.helper.send_state(
|
||||
room_id=room,
|
||||
event_type="m.room.history_visibility",
|
||||
body={"history_visibility": "joined"},
|
||||
tok=u1_token,
|
||||
)
|
||||
self.get_success(
|
||||
event_injection.inject_member_event(self.hs, room, "@u2:host2", "join")
|
||||
)
|
||||
self.get_success(
|
||||
event_injection.inject_member_event(self.hs, room, "@u3:other", "join")
|
||||
)
|
||||
|
||||
# create some events
|
||||
event_id_1 = self.helper.send(room, "hello", tok=u1_token)["event_id"]
|
||||
event_id_2 = self.helper.send(room, "world", tok=u1_token)["event_id"]
|
||||
# pretend that u3 changes their displayname
|
||||
event_id_3 = self.get_success(
|
||||
event_injection.inject_member_event(self.hs, room, "@u3:other", "join")
|
||||
).event_id
|
||||
|
||||
# destination_rooms should already be populated, but let us pretend that we already
|
||||
# sent (successfully) up to and including event id 1
|
||||
event_1 = self.get_success(self.hs.get_datastores().main.get_event(event_id_1))
|
||||
assert event_1.internal_metadata.stream_ordering is not None
|
||||
self.get_success(
|
||||
self.hs.get_datastores().main.set_destination_last_successful_stream_ordering(
|
||||
"host2", event_1.internal_metadata.stream_ordering
|
||||
)
|
||||
)
|
||||
|
||||
# also fetch event 2 so we can compare its stream ordering to the sender's
|
||||
# last_successful_stream_ordering later
|
||||
event_2 = self.get_success(self.hs.get_datastores().main.get_event(event_id_2))
|
||||
|
||||
# Mock event 3 as having partial state
|
||||
self.get_success(
|
||||
event_injection.mark_event_as_partial_state(self.hs, event_id_3, room)
|
||||
)
|
||||
|
||||
# Fail the test if we block on full state for event 3.
|
||||
async def mock_await_full_state(event_ids: Collection[str]) -> None:
|
||||
if event_id_3 in event_ids:
|
||||
raise AssertionError("Tried to await full state for event_id_3")
|
||||
|
||||
# ACT
|
||||
with mock.patch.object(
|
||||
self.hs.get_storage_controllers().state._partial_state_events_tracker,
|
||||
"await_full_state",
|
||||
mock_await_full_state,
|
||||
):
|
||||
self.get_success(per_dest_queue._catch_up_transmission_loop())
|
||||
|
||||
# ASSERT
|
||||
# We should have:
|
||||
# - not sent event 3: it's not ours, and the room is partial stated
|
||||
# - fallen back to sending event 2: it's the most recent event in the room
|
||||
# we tried to send to host2
|
||||
# - completed catch-up
|
||||
self.assertEqual(len(sent_pdus), 1)
|
||||
self.assertEqual(sent_pdus[0].event_id, event_id_2)
|
||||
self.assertFalse(per_dest_queue._catching_up)
|
||||
self.assertEqual(
|
||||
per_dest_queue._last_successful_stream_ordering,
|
||||
event_2.internal_metadata.stream_ordering,
|
||||
)
|
||||
|
||||
@@ -19,17 +19,18 @@ from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import UserTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.room_versions import RoomVersion, RoomVersions
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.rest.client import login, register, room, user_directory
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.roommember import ProfileInfo
|
||||
from synapse.types import UserProfile, create_requester
|
||||
from synapse.types import JsonDict, UserProfile, create_requester
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests import unittest
|
||||
from tests.storage.test_user_directory import GetUserDirectoryTables
|
||||
from tests.test_utils import make_awaitable
|
||||
from tests.test_utils import event_injection, make_awaitable
|
||||
from tests.test_utils.event_injection import inject_member_event
|
||||
from tests.unittest import override_config
|
||||
|
||||
@@ -1103,3 +1104,186 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
|
||||
)
|
||||
self.assertEqual(200, channel.code, channel.result)
|
||||
self.assertTrue(len(channel.json_body["results"]) == 0)
|
||||
|
||||
|
||||
class UserDirectoryRemoteProfileTestCase(unittest.HomeserverTestCase):
|
||||
servlets = [
|
||||
login.register_servlets,
|
||||
synapse.rest.admin.register_servlets,
|
||||
register.register_servlets,
|
||||
room.register_servlets,
|
||||
]
|
||||
|
||||
def default_config(self) -> JsonDict:
|
||||
config = super().default_config()
|
||||
# Re-enables updating the user directory, as that functionality is needed below.
|
||||
config["update_user_directory_from_worker"] = None
|
||||
return config
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.alice = self.register_user("alice", "alice123")
|
||||
self.alice_tok = self.login("alice", "alice123")
|
||||
self.user_dir_helper = GetUserDirectoryTables(self.store)
|
||||
self.user_dir_handler = hs.get_user_directory_handler()
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
|
||||
if self.user_dir_handler._refresh_remote_profiles_call_later is not None:
|
||||
# Cancel the startup call: in the steady-state case we can't rely on
|
||||
# it anyway.
|
||||
self.user_dir_handler._refresh_remote_profiles_call_later.cancel()
|
||||
|
||||
def test_public_rooms_have_profiles_collected(self) -> None:
|
||||
"""
|
||||
In a public room, member state events are treated as reflecting the user's
|
||||
real profile and they are accepted.
|
||||
(The main motivation for accepting this is to prevent having to query
|
||||
*every* single profile change over federation.)
|
||||
"""
|
||||
room_id = self.helper.create_room_as(
|
||||
self.alice, is_public=True, tok=self.alice_tok
|
||||
)
|
||||
self.get_success(
|
||||
event_injection.inject_member_event(
|
||||
self.hs,
|
||||
room_id,
|
||||
"@bruce:remote",
|
||||
"join",
|
||||
"@bruce:remote",
|
||||
extra_content={
|
||||
"displayname": "Bruce!",
|
||||
"avatar_url": "mxc://remote/123",
|
||||
},
|
||||
)
|
||||
)
|
||||
# Sending this event makes the streams move forward after the injection...
|
||||
self.helper.send(room_id, "Test", tok=self.alice_tok)
|
||||
self.pump(0.1)
|
||||
|
||||
profiles = self.get_success(
|
||||
self.user_dir_helper.get_profiles_in_user_directory()
|
||||
)
|
||||
self.assertEqual(
|
||||
profiles.get("@bruce:remote"),
|
||||
ProfileInfo(display_name="Bruce!", avatar_url="mxc://remote/123"),
|
||||
)
|
||||
|
||||
def test_private_rooms_do_not_have_profiles_collected(self) -> None:
|
||||
"""
|
||||
In a private room, member state events are not pulled out and used to populate
|
||||
the user directory.
|
||||
"""
|
||||
room_id = self.helper.create_room_as(
|
||||
self.alice, is_public=False, tok=self.alice_tok
|
||||
)
|
||||
self.get_success(
|
||||
event_injection.inject_member_event(
|
||||
self.hs,
|
||||
room_id,
|
||||
"@bruce:remote",
|
||||
"join",
|
||||
"@bruce:remote",
|
||||
extra_content={
|
||||
"displayname": "super-duper bruce",
|
||||
"avatar_url": "mxc://remote/456",
|
||||
},
|
||||
)
|
||||
)
|
||||
# Sending this event makes the streams move forward after the injection...
|
||||
self.helper.send(room_id, "Test", tok=self.alice_tok)
|
||||
self.pump(0.1)
|
||||
|
||||
profiles = self.get_success(
|
||||
self.user_dir_helper.get_profiles_in_user_directory()
|
||||
)
|
||||
self.assertNotIn("@bruce:remote", profiles)
|
||||
|
||||
def test_private_rooms_have_profiles_requested(self) -> None:
|
||||
"""
|
||||
When a name changes in a private room, the homeserver instead requests
|
||||
the user's global profile over federation.
|
||||
"""
|
||||
|
||||
async def get_remote_profile(
|
||||
user_id: str, ignore_backoff: bool = True
|
||||
) -> JsonDict:
|
||||
if user_id == "@bruce:remote":
|
||||
return {
|
||||
"displayname": "Sir Bruce Bruceson",
|
||||
"avatar_url": "mxc://remote/789",
|
||||
}
|
||||
else:
|
||||
raise ValueError(f"unable to fetch {user_id}")
|
||||
|
||||
with patch.object(self.profile_handler, "get_profile", get_remote_profile):
|
||||
# Continue from the earlier test...
|
||||
self.test_private_rooms_do_not_have_profiles_collected()
|
||||
|
||||
# Advance by a minute
|
||||
self.reactor.advance(61.0)
|
||||
|
||||
profiles = self.get_success(
|
||||
self.user_dir_helper.get_profiles_in_user_directory()
|
||||
)
|
||||
self.assertEqual(
|
||||
profiles.get("@bruce:remote"),
|
||||
ProfileInfo(
|
||||
display_name="Sir Bruce Bruceson", avatar_url="mxc://remote/789"
|
||||
),
|
||||
)
|
||||
|
||||
def test_profile_requests_are_retried(self) -> None:
|
||||
"""
|
||||
When we fail to fetch the user's profile over federation,
|
||||
we try again later.
|
||||
"""
|
||||
has_failed_once = False
|
||||
|
||||
async def get_remote_profile(
|
||||
user_id: str, ignore_backoff: bool = True
|
||||
) -> JsonDict:
|
||||
nonlocal has_failed_once
|
||||
if user_id == "@bruce:remote":
|
||||
if not has_failed_once:
|
||||
has_failed_once = True
|
||||
raise SynapseError(502, "temporary network problem")
|
||||
|
||||
return {
|
||||
"displayname": "Sir Bruce Bruceson",
|
||||
"avatar_url": "mxc://remote/789",
|
||||
}
|
||||
else:
|
||||
raise ValueError(f"unable to fetch {user_id}")
|
||||
|
||||
with patch.object(self.profile_handler, "get_profile", get_remote_profile):
|
||||
# Continue from the earlier test...
|
||||
self.test_private_rooms_do_not_have_profiles_collected()
|
||||
|
||||
# Advance by a minute
|
||||
self.reactor.advance(61.0)
|
||||
|
||||
# The request has already failed once
|
||||
self.assertTrue(has_failed_once)
|
||||
|
||||
# The profile has yet to be updated.
|
||||
profiles = self.get_success(
|
||||
self.user_dir_helper.get_profiles_in_user_directory()
|
||||
)
|
||||
self.assertNotIn(
|
||||
"@bruce:remote",
|
||||
profiles,
|
||||
)
|
||||
|
||||
# Advance by five minutes, after the backoff has finished
|
||||
self.reactor.advance(301.0)
|
||||
|
||||
# The profile should have been updated now
|
||||
profiles = self.get_success(
|
||||
self.user_dir_helper.get_profiles_in_user_directory()
|
||||
)
|
||||
self.assertEqual(
|
||||
profiles.get("@bruce:remote"),
|
||||
ProfileInfo(
|
||||
display_name="Sir Bruce Bruceson", avatar_url="mxc://remote/789"
|
||||
),
|
||||
)
|
||||
|
||||
@@ -210,8 +210,8 @@ class BlacklistingAgentTest(TestCase):
|
||||
"""Apply the blacklisting agent and ensure it properly blocks connections to particular IPs."""
|
||||
agent = BlacklistingAgentWrapper(
|
||||
Agent(self.reactor),
|
||||
ip_whitelist=self.ip_whitelist,
|
||||
ip_blacklist=self.ip_blacklist,
|
||||
ip_whitelist=self.ip_whitelist,
|
||||
)
|
||||
|
||||
# The unsafe IPs should be rejected.
|
||||
|
||||
@@ -1249,9 +1249,8 @@ class AccountStatusTestCase(unittest.HomeserverTestCase):
|
||||
# account status will fail.
|
||||
return UserID.from_string(user_id).localpart == "someuser"
|
||||
|
||||
self.hs.get_account_validity_handler()._is_user_expired_callbacks.append(
|
||||
is_expired
|
||||
)
|
||||
account_validity_callbacks = self.hs.get_module_api_callbacks().account_validity
|
||||
account_validity_callbacks.is_user_expired_callbacks.append(is_expired)
|
||||
|
||||
self._test_status(
|
||||
users=[user],
|
||||
|
||||
@@ -941,18 +941,16 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
|
||||
just before associating and removing a 3PID to/from an account.
|
||||
"""
|
||||
# Pretend to be a Synapse module and register both callbacks as mocks.
|
||||
third_party_rules = self.hs.get_third_party_event_rules()
|
||||
on_add_user_third_party_identifier_callback_mock = Mock(
|
||||
return_value=make_awaitable(None)
|
||||
)
|
||||
on_remove_user_third_party_identifier_callback_mock = Mock(
|
||||
return_value=make_awaitable(None)
|
||||
)
|
||||
third_party_rules._on_threepid_bind_callbacks.append(
|
||||
on_add_user_third_party_identifier_callback_mock
|
||||
)
|
||||
third_party_rules._on_threepid_bind_callbacks.append(
|
||||
on_remove_user_third_party_identifier_callback_mock
|
||||
third_party_rules = self.hs.get_third_party_event_rules()
|
||||
third_party_rules.register_third_party_rules_callbacks(
|
||||
on_add_user_third_party_identifier=on_add_user_third_party_identifier_callback_mock,
|
||||
on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock,
|
||||
)
|
||||
|
||||
# Register an admin user.
|
||||
@@ -1008,12 +1006,12 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
|
||||
when a user is deactivated and their third-party ID associations are deleted.
|
||||
"""
|
||||
# Pretend to be a Synapse module and register both callbacks as mocks.
|
||||
third_party_rules = self.hs.get_third_party_event_rules()
|
||||
on_remove_user_third_party_identifier_callback_mock = Mock(
|
||||
return_value=make_awaitable(None)
|
||||
)
|
||||
third_party_rules._on_threepid_bind_callbacks.append(
|
||||
on_remove_user_third_party_identifier_callback_mock
|
||||
third_party_rules = self.hs.get_third_party_event_rules()
|
||||
third_party_rules.register_third_party_rules_callbacks(
|
||||
on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock,
|
||||
)
|
||||
|
||||
# Register an admin user.
|
||||
@@ -1039,6 +1037,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Check that the mock was not called on the act of adding a third-party ID.
|
||||
on_remove_user_third_party_identifier_callback_mock.assert_not_called()
|
||||
|
||||
# Now deactivate the user.
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
|
||||
@@ -102,3 +102,34 @@ async def create_event(
|
||||
context = await unpersisted_context.persist(event)
|
||||
|
||||
return event, context
|
||||
|
||||
|
||||
async def mark_event_as_partial_state(
|
||||
hs: synapse.server.HomeServer,
|
||||
event_id: str,
|
||||
room_id: str,
|
||||
) -> None:
|
||||
"""
|
||||
(Falsely) mark an event as having partial state.
|
||||
|
||||
Naughty, but occasionally useful when checking that partial state doesn't
|
||||
block something from happening.
|
||||
|
||||
If the event already has partial state, this insert will fail (event_id is unique
|
||||
in this table).
|
||||
"""
|
||||
store = hs.get_datastores().main
|
||||
await store.db_pool.simple_upsert(
|
||||
table="partial_state_rooms",
|
||||
keyvalues={"room_id": room_id},
|
||||
values={},
|
||||
insertion_values={"room_id": room_id},
|
||||
)
|
||||
|
||||
await store.db_pool.simple_insert(
|
||||
table="partial_state_events",
|
||||
values={
|
||||
"room_id": room_id,
|
||||
"event_id": event_id,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -63,7 +63,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
filtered = self.get_success(
|
||||
filter_events_for_server(
|
||||
self._storage_controllers, "test_server", "hs", events_to_filter
|
||||
self._storage_controllers,
|
||||
"test_server",
|
||||
"hs",
|
||||
events_to_filter,
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -85,7 +91,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
|
||||
self.assertEqual(
|
||||
self.get_success(
|
||||
filter_events_for_server(
|
||||
self._storage_controllers, "remote_hs", "hs", [outlier]
|
||||
self._storage_controllers,
|
||||
"remote_hs",
|
||||
"hs",
|
||||
[outlier],
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
),
|
||||
[outlier],
|
||||
@@ -96,7 +108,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
filtered = self.get_success(
|
||||
filter_events_for_server(
|
||||
self._storage_controllers, "remote_hs", "local_hs", [outlier, evt]
|
||||
self._storage_controllers,
|
||||
"remote_hs",
|
||||
"local_hs",
|
||||
[outlier, evt],
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
)
|
||||
self.assertEqual(len(filtered), 2, f"expected 2 results, got: {filtered}")
|
||||
@@ -108,7 +126,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
|
||||
# be redacted)
|
||||
filtered = self.get_success(
|
||||
filter_events_for_server(
|
||||
self._storage_controllers, "other_server", "local_hs", [outlier, evt]
|
||||
self._storage_controllers,
|
||||
"other_server",
|
||||
"local_hs",
|
||||
[outlier, evt],
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
)
|
||||
self.assertEqual(filtered[0], outlier)
|
||||
@@ -143,7 +167,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
|
||||
# ... and the filtering happens.
|
||||
filtered = self.get_success(
|
||||
filter_events_for_server(
|
||||
self._storage_controllers, "test_server", "local_hs", events_to_filter
|
||||
self._storage_controllers,
|
||||
"test_server",
|
||||
"local_hs",
|
||||
events_to_filter,
|
||||
redact=True,
|
||||
filter_out_erased_senders=True,
|
||||
filter_out_remote_partial_state_events=True,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user