Compare commits
35 Commits
v1.31.0
...
erikj/cach
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2b2decd11 | ||
|
|
d088695f15 | ||
|
|
0fddb9aacd | ||
|
|
b05667b610 | ||
|
|
8d308066db | ||
|
|
2bb036eda7 | ||
|
|
5deb349a7f | ||
|
|
2e78ef4d22 | ||
|
|
0f82904fdd | ||
|
|
f2e61d3cc1 | ||
|
|
3d6c982c41 | ||
|
|
bebb7f0f60 | ||
|
|
d6b8e471cf | ||
|
|
bb0fe02a52 | ||
|
|
35c5ef2d24 | ||
|
|
e32294f54b | ||
|
|
5ff8eb97c6 | ||
|
|
670564446c | ||
|
|
ac99774dac | ||
|
|
4dabcf026e | ||
|
|
f02663c4dd | ||
|
|
963f4309fe | ||
|
|
e73881a439 | ||
|
|
51a728ec24 | ||
|
|
acd2778d61 | ||
|
|
ffa6e96b5f | ||
|
|
a1dfe34d86 | ||
|
|
c232e16d23 | ||
|
|
ee36be5eef | ||
|
|
b3e99c25bf | ||
|
|
f83ad8dd2d | ||
|
|
915163f72f | ||
|
|
9596641e1d | ||
|
|
c4d468b69f | ||
|
|
f38350fdda |
1
changelog.d/9685.misc
Normal file
1
changelog.d/9685.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update `scripts-dev/complement.sh` to use a local checkout of Complement, allow running a subset of tests and have it use Synapse's Complement test blacklist.
|
||||
1
changelog.d/9691.feature
Normal file
1
changelog.d/9691.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add `order_by` to the admin API `GET /_synapse/admin/v2/users`. Contributed by @dklimpel.
|
||||
1
changelog.d/9700.feature
Normal file
1
changelog.d/9700.feature
Normal file
@@ -0,0 +1 @@
|
||||
Replace the `room_invite_state_types` configuration setting with `room_prejoin_state`.
|
||||
1
changelog.d/9710.feature
Normal file
1
changelog.d/9710.feature
Normal file
@@ -0,0 +1 @@
|
||||
Experimental Spaces support: include `m.room.create` in the room state sent with room-invites.
|
||||
1
changelog.d/9711.bugfix
Normal file
1
changelog.d/9711.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix recently added ratelimits to correctly honour the application service `rate_limited` flag.
|
||||
1
changelog.d/9717.feature
Normal file
1
changelog.d/9717.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership.
|
||||
1
changelog.d/9718.removal
Normal file
1
changelog.d/9718.removal
Normal file
@@ -0,0 +1 @@
|
||||
Replace deprecated `imp` module with successor `importlib`. Contributed by Cristina Muñoz.
|
||||
1
changelog.d/9719.doc
Normal file
1
changelog.d/9719.doc
Normal file
@@ -0,0 +1 @@
|
||||
Make the allowed_local_3pids regex example in the sample config stricter.
|
||||
@@ -173,18 +173,10 @@ report_stats: False
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
room_invite_state_types:
|
||||
- "m.room.join_rules"
|
||||
- "m.room.canonical_alias"
|
||||
- "m.room.avatar"
|
||||
- "m.room.name"
|
||||
|
||||
{% if SYNAPSE_APPSERVICES %}
|
||||
app_service_config_files:
|
||||
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
app_service_config_files: []
|
||||
{% endif %}
|
||||
|
||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||
|
||||
@@ -111,35 +111,16 @@ List Accounts
|
||||
=============
|
||||
|
||||
This API returns all local user accounts.
|
||||
By default, the response is ordered by ascending user ID.
|
||||
|
||||
The api is::
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
The parameter ``from`` is optional but used for pagination, denoting the
|
||||
offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token``
|
||||
from a previous call.
|
||||
|
||||
The parameter ``limit`` is optional but is used for pagination, denoting the
|
||||
maximum number of items to return in this call. Defaults to ``100``.
|
||||
|
||||
The parameter ``user_id`` is optional and filters to only return users with user IDs
|
||||
that contain this value. This parameter is ignored when using the ``name`` parameter.
|
||||
|
||||
The parameter ``name`` is optional and filters to only return users with user ID localparts
|
||||
**or** displaynames that contain this value.
|
||||
|
||||
The parameter ``guests`` is optional and if ``false`` will **exclude** guest users.
|
||||
Defaults to ``true`` to include guest users.
|
||||
|
||||
The parameter ``deactivated`` is optional and if ``true`` will **include** deactivated users.
|
||||
Defaults to ``false`` to exclude deactivated users.
|
||||
|
||||
A JSON body is returned with the following shape:
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
@@ -175,6 +156,66 @@ with ``from`` set to the value of ``next_token``. This will return a new page.
|
||||
If the endpoint does not return a ``next_token`` then there are no more users
|
||||
to paginate through.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - Is optional and filters to only return users with user IDs
|
||||
that contain this value. This parameter is ignored when using the ``name`` parameter.
|
||||
- ``name`` - Is optional and filters to only return users with user ID localparts
|
||||
**or** displaynames that contain this value.
|
||||
- ``guests`` - string representing a bool - Is optional and if ``false`` will **exclude** guest users.
|
||||
Defaults to ``true`` to include guest users.
|
||||
- ``deactivated`` - string representing a bool - Is optional and if ``true`` will **include** deactivated users.
|
||||
Defaults to ``false`` to exclude deactivated users.
|
||||
- ``limit`` - string representing a positive integer - Is optional but is used for pagination,
|
||||
denoting the maximum number of items to return in this call. Defaults to ``100``.
|
||||
- ``from`` - string representing a positive integer - Is optional but used for pagination,
|
||||
denoting the offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
|
||||
Defaults to ``0``.
|
||||
- ``order_by`` - The method by which to sort the returned list of users.
|
||||
If the ordered field has duplicates, the second order is always by ascending ``name``,
|
||||
which guarantees a stable ordering. Valid values are:
|
||||
|
||||
- ``name`` - Users are ordered alphabetically by ``name``. This is the default.
|
||||
- ``is_guest`` - Users are ordered by ``is_guest`` status.
|
||||
- ``admin`` - Users are ordered by ``admin`` status.
|
||||
- ``user_type`` - Users are ordered alphabetically by ``user_type``.
|
||||
- ``deactivated`` - Users are ordered by ``deactivated`` status.
|
||||
- ``shadow_banned`` - Users are ordered by ``shadow_banned`` status.
|
||||
- ``displayname`` - Users are ordered alphabetically by ``displayname``.
|
||||
- ``avatar_url`` - Users are ordered alphabetically by avatar URL.
|
||||
|
||||
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
|
||||
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
|
||||
|
||||
Caution. The database only has indexes on the columns ``name`` and ``created_ts``.
|
||||
This means that if a different sort order is used (``is_guest``, ``admin``,
|
||||
``user_type``, ``deactivated``, ``shadow_banned``, ``avatar_url`` or ``displayname``),
|
||||
this can cause a large load on the database, especially for large environments.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``users`` - An array of objects, each containing information about an user.
|
||||
User objects contain the following fields:
|
||||
|
||||
- ``name`` - string - Fully-qualified user ID (ex. `@user:server.com`).
|
||||
- ``is_guest`` - bool - Status if that user is a guest account.
|
||||
- ``admin`` - bool - Status if that user is a server administrator.
|
||||
- ``user_type`` - string - Type of the user. Normal users are type ``None``.
|
||||
This allows user type specific behaviour. There are also types ``support`` and ``bot``.
|
||||
- ``deactivated`` - bool - Status if that user has been marked as deactivated.
|
||||
- ``shadow_banned`` - bool - Status if that user has been marked as shadow banned.
|
||||
- ``displayname`` - string - The user's display name if they have set one.
|
||||
- ``avatar_url`` - string - The user's avatar URL if they have set one.
|
||||
|
||||
- ``next_token``: string representing a positive integer - Indication for pagination. See above.
|
||||
- ``total`` - integer - Total number of media.
|
||||
|
||||
|
||||
Query current sessions for a user
|
||||
=================================
|
||||
|
||||
|
||||
@@ -128,6 +128,9 @@ Some guidelines follow:
|
||||
will be if no sub-options are enabled).
|
||||
- Lines should be wrapped at 80 characters.
|
||||
- Use two-space indents.
|
||||
- `true` and `false` are spelt thus (as opposed to `True`, etc.)
|
||||
- Use single quotes (`'`) rather than double-quotes (`"`) or backticks
|
||||
(`` ` ``) to refer to configuration options.
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
@@ -1246,9 +1246,9 @@ account_validity:
|
||||
#
|
||||
#allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\.org'
|
||||
# pattern: '^[^@]+@matrix\.org$'
|
||||
# - medium: email
|
||||
# pattern: '.*@vector\.im'
|
||||
# pattern: '^[^@]+@vector\.im$'
|
||||
# - medium: msisdn
|
||||
# pattern: '\+44'
|
||||
|
||||
@@ -1451,14 +1451,31 @@ metrics_flags:
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
# Controls for the state that is shared with users who receive an invite
|
||||
# to a room
|
||||
#
|
||||
#room_invite_state_types:
|
||||
# - "m.room.join_rules"
|
||||
# - "m.room.canonical_alias"
|
||||
# - "m.room.avatar"
|
||||
# - "m.room.encryption"
|
||||
# - "m.room.name"
|
||||
room_prejoin_state:
|
||||
# By default, the following state event types are shared with users who
|
||||
# receive invites to the room:
|
||||
#
|
||||
# - m.room.join_rules
|
||||
# - m.room.canonical_alias
|
||||
# - m.room.avatar
|
||||
# - m.room.encryption
|
||||
# - m.room.name
|
||||
#
|
||||
# Uncomment the following to disable these defaults (so that only the event
|
||||
# types listed in 'additional_event_types' are shared). Defaults to 'false'.
|
||||
#
|
||||
#disable_default_event_types: true
|
||||
|
||||
# Additional state event types to share with users when they are invited
|
||||
# to a room.
|
||||
#
|
||||
# By default, this list is empty (so only the default event types are shared).
|
||||
#
|
||||
#additional_event_types:
|
||||
# - org.example.custom.event.type
|
||||
|
||||
|
||||
# A list of application service config files to use
|
||||
|
||||
@@ -1,22 +1,49 @@
|
||||
#! /bin/bash -eu
|
||||
#!/usr/bin/env bash
|
||||
# This script is designed for developers who want to test their code
|
||||
# against Complement.
|
||||
#
|
||||
# It makes a Synapse image which represents the current checkout,
|
||||
# then downloads Complement and runs it with that image.
|
||||
# builds a synapse-complement image on top, then runs tests with it.
|
||||
#
|
||||
# By default the script will fetch the latest Complement master branch and
|
||||
# run tests with that. This can be overridden to use a custom Complement
|
||||
# checkout by setting the COMPLEMENT_DIR environment variable to the
|
||||
# filepath of a local Complement checkout.
|
||||
#
|
||||
# A regular expression of test method names can be supplied as the first
|
||||
# argument to the script. Complement will then only run those tests. If
|
||||
# no regex is supplied, all tests are run. For example;
|
||||
#
|
||||
# ./complement.sh "TestOutboundFederation(Profile|Send)"
|
||||
#
|
||||
|
||||
# Exit if a line returns a non-zero exit code
|
||||
set -e
|
||||
|
||||
# Change to the repository root
|
||||
cd "$(dirname $0)/.."
|
||||
|
||||
# Check for a user-specified Complement checkout
|
||||
if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
echo "COMPLEMENT_DIR not set. Fetching the latest Complement checkout..."
|
||||
wget -Nq https://github.com/matrix-org/complement/archive/master.tar.gz
|
||||
tar -xzf master.tar.gz
|
||||
COMPLEMENT_DIR=complement-master
|
||||
echo "Checkout available at 'complement-master'"
|
||||
fi
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile .
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
# Build the Synapse monolith image from Complement, based on the above image we just built
|
||||
docker build -t complement-synapse -f "$COMPLEMENT_DIR/dockerfiles/Synapse.Dockerfile" "$COMPLEMENT_DIR/dockerfiles"
|
||||
|
||||
# Download Complement
|
||||
wget -N https://github.com/matrix-org/complement/archive/master.tar.gz
|
||||
tar -xzf master.tar.gz
|
||||
cd complement-master
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
# Build the Synapse image from Complement, based on the above image we just built
|
||||
docker build -t complement-synapse -f dockerfiles/Synapse.Dockerfile ./dockerfiles
|
||||
EXTRA_COMPLEMENT_ARGS=""
|
||||
if [[ -n "$1" ]]; then
|
||||
# A test name regex has been set, supply it to Complement
|
||||
EXTRA_COMPLEMENT_ARGS+="-run $1 "
|
||||
fi
|
||||
|
||||
# Run the tests on the resulting image!
|
||||
COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -count=1 ./tests
|
||||
# Run the tests!
|
||||
COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
|
||||
|
||||
@@ -59,6 +59,8 @@ class JoinRules:
|
||||
KNOCK = "knock"
|
||||
INVITE = "invite"
|
||||
PRIVATE = "private"
|
||||
# As defined for MSC3083.
|
||||
MSC3083_RESTRICTED = "restricted"
|
||||
|
||||
|
||||
class LoginType:
|
||||
|
||||
@@ -17,6 +17,7 @@ from collections import OrderedDict
|
||||
from typing import Hashable, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import LimitExceededError
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import Requester
|
||||
from synapse.util import Clock
|
||||
|
||||
@@ -31,10 +32,13 @@ class Ratelimiter:
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
"""
|
||||
|
||||
def __init__(self, clock: Clock, rate_hz: float, burst_count: int):
|
||||
def __init__(
|
||||
self, store: DataStore, clock: Clock, rate_hz: float, burst_count: int
|
||||
):
|
||||
self.clock = clock
|
||||
self.rate_hz = rate_hz
|
||||
self.burst_count = burst_count
|
||||
self.store = store
|
||||
|
||||
# A ordered dictionary keeping track of actions, when they were last
|
||||
# performed and how often. Each entry is a mapping from a key of arbitrary type
|
||||
@@ -46,45 +50,10 @@ class Ratelimiter:
|
||||
OrderedDict()
|
||||
) # type: OrderedDict[Hashable, Tuple[float, int, float]]
|
||||
|
||||
def can_requester_do_action(
|
||||
async def can_do_action(
|
||||
self,
|
||||
requester: Requester,
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
_time_now_s: Optional[int] = None,
|
||||
) -> Tuple[bool, float]:
|
||||
"""Can the requester perform the action?
|
||||
|
||||
Args:
|
||||
requester: The requester to key off when rate limiting. The user property
|
||||
will be used.
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
|
||||
Returns:
|
||||
A tuple containing:
|
||||
* A bool indicating if they can perform the action now
|
||||
* The reactor timestamp for when the action can be performed next.
|
||||
-1 if rate_hz is less than or equal to zero
|
||||
"""
|
||||
# Disable rate limiting of users belonging to any AS that is configured
|
||||
# not to be rate limited in its registration file (rate_limited: true|false).
|
||||
if requester.app_service and not requester.app_service.is_rate_limited():
|
||||
return True, -1.0
|
||||
|
||||
return self.can_do_action(
|
||||
requester.user.to_string(), rate_hz, burst_count, update, _time_now_s
|
||||
)
|
||||
|
||||
def can_do_action(
|
||||
self,
|
||||
key: Hashable,
|
||||
requester: Optional[Requester],
|
||||
key: Optional[Hashable] = None,
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
@@ -92,9 +61,16 @@ class Ratelimiter:
|
||||
) -> Tuple[bool, float]:
|
||||
"""Can the entity (e.g. user or IP address) perform the action?
|
||||
|
||||
Checks if the user has ratelimiting disabled in the database by looking
|
||||
for null/zero values in the `ratelimit_override` table. (Non-zero
|
||||
values aren't honoured, as they're specific to the event sending
|
||||
ratelimiter, rather than all ratelimiters)
|
||||
|
||||
Args:
|
||||
key: The key we should use when rate limiting. Can be a user ID
|
||||
(when sending events), an IP address, etc.
|
||||
requester: The requester that is doing the action, if any. Used to check
|
||||
if the user has ratelimits disabled in the database.
|
||||
key: An arbitrary key used to classify an action. Defaults to the
|
||||
requester's user ID.
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
@@ -109,6 +85,30 @@ class Ratelimiter:
|
||||
* The reactor timestamp for when the action can be performed next.
|
||||
-1 if rate_hz is less than or equal to zero
|
||||
"""
|
||||
if key is None:
|
||||
if not requester:
|
||||
raise ValueError("Must supply at least one of `requester` or `key`")
|
||||
|
||||
key = requester.user.to_string()
|
||||
|
||||
if requester:
|
||||
# Disable rate limiting of users belonging to any AS that is configured
|
||||
# not to be rate limited in its registration file (rate_limited: true|false).
|
||||
if requester.app_service and not requester.app_service.is_rate_limited():
|
||||
return True, -1.0
|
||||
|
||||
# Check if ratelimiting has been disabled for the user.
|
||||
#
|
||||
# Note that we don't use the returned rate/burst count, as the table
|
||||
# is specifically for the event sending ratelimiter. Instead, we
|
||||
# only use it to (somewhat cheekily) infer whether the user should
|
||||
# be subject to any rate limiting or not.
|
||||
override = await self.store.get_ratelimit_for_user(
|
||||
requester.authenticated_entity
|
||||
)
|
||||
if override and not override.messages_per_second:
|
||||
return True, -1.0
|
||||
|
||||
# Override default values if set
|
||||
time_now_s = _time_now_s if _time_now_s is not None else self.clock.time()
|
||||
rate_hz = rate_hz if rate_hz is not None else self.rate_hz
|
||||
@@ -175,9 +175,10 @@ class Ratelimiter:
|
||||
else:
|
||||
del self.actions[key]
|
||||
|
||||
def ratelimit(
|
||||
async def ratelimit(
|
||||
self,
|
||||
key: Hashable,
|
||||
requester: Optional[Requester],
|
||||
key: Optional[Hashable] = None,
|
||||
rate_hz: Optional[float] = None,
|
||||
burst_count: Optional[int] = None,
|
||||
update: bool = True,
|
||||
@@ -185,8 +186,16 @@ class Ratelimiter:
|
||||
):
|
||||
"""Checks if an action can be performed. If not, raises a LimitExceededError
|
||||
|
||||
Checks if the user has ratelimiting disabled in the database by looking
|
||||
for null/zero values in the `ratelimit_override` table. (Non-zero
|
||||
values aren't honoured, as they're specific to the event sending
|
||||
ratelimiter, rather than all ratelimiters)
|
||||
|
||||
Args:
|
||||
key: An arbitrary key used to classify an action
|
||||
requester: The requester that is doing the action, if any. Used to check for
|
||||
if the user has ratelimits disabled.
|
||||
key: An arbitrary key used to classify an action. Defaults to the
|
||||
requester's user ID.
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
@@ -201,7 +210,8 @@ class Ratelimiter:
|
||||
"""
|
||||
time_now_s = _time_now_s if _time_now_s is not None else self.clock.time()
|
||||
|
||||
allowed, time_allowed = self.can_do_action(
|
||||
allowed, time_allowed = await self.can_do_action(
|
||||
requester,
|
||||
key,
|
||||
rate_hz=rate_hz,
|
||||
burst_count=burst_count,
|
||||
|
||||
@@ -57,7 +57,7 @@ class RoomVersion:
|
||||
state_res = attr.ib(type=int) # one of the StateResolutionVersions
|
||||
enforce_key_validity = attr.ib(type=bool)
|
||||
|
||||
# bool: before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules
|
||||
# Before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules
|
||||
special_case_aliases_auth = attr.ib(type=bool)
|
||||
# Strictly enforce canonicaljson, do not allow:
|
||||
# * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1]
|
||||
@@ -69,6 +69,8 @@ class RoomVersion:
|
||||
limit_notifications_power_levels = attr.ib(type=bool)
|
||||
# MSC2174/MSC2176: Apply updated redaction rules algorithm.
|
||||
msc2176_redaction_rules = attr.ib(type=bool)
|
||||
# MSC3083: Support the 'restricted' join_rule.
|
||||
msc3083_join_rules = attr.ib(type=bool)
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -82,6 +84,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -93,6 +96,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -104,6 +108,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -115,6 +120,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -126,6 +132,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -137,6 +144,7 @@ class RoomVersions:
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
@@ -148,6 +156,19 @@ class RoomVersions:
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=True,
|
||||
msc3083_join_rules=False,
|
||||
)
|
||||
MSC3083 = RoomVersion(
|
||||
"org.matrix.msc3083",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -162,4 +183,5 @@ KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V6,
|
||||
RoomVersions.MSC2176,
|
||||
)
|
||||
# Note that we do not include MSC3083 here unless it is enabled in the config.
|
||||
} # type: Dict[str, RoomVersion]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2015-2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,38 +12,131 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
import logging
|
||||
from typing import Iterable
|
||||
|
||||
from ._base import Config
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.config._base import Config, ConfigError
|
||||
from synapse.config._util import validate_config
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApiConfig(Config):
|
||||
section = "api"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
self.room_invite_state_types = config.get(
|
||||
"room_invite_state_types",
|
||||
[
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.RoomEncryption,
|
||||
EventTypes.Name,
|
||||
],
|
||||
def read_config(self, config: JsonDict, **kwargs):
|
||||
validate_config(_MAIN_SCHEMA, config, ())
|
||||
self.room_prejoin_state = list(self._get_prejoin_state_types(config))
|
||||
|
||||
def generate_config_section(cls, **kwargs) -> str:
|
||||
formatted_default_state_types = "\n".join(
|
||||
" # - %s" % (t,) for t in _DEFAULT_PREJOIN_STATE_TYPES
|
||||
)
|
||||
|
||||
def generate_config_section(cls, **kwargs):
|
||||
return """\
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
# Controls for the state that is shared with users who receive an invite
|
||||
# to a room
|
||||
#
|
||||
#room_invite_state_types:
|
||||
# - "{JoinRules}"
|
||||
# - "{CanonicalAlias}"
|
||||
# - "{RoomAvatar}"
|
||||
# - "{RoomEncryption}"
|
||||
# - "{Name}"
|
||||
""".format(
|
||||
**vars(EventTypes)
|
||||
)
|
||||
room_prejoin_state:
|
||||
# By default, the following state event types are shared with users who
|
||||
# receive invites to the room:
|
||||
#
|
||||
%(formatted_default_state_types)s
|
||||
#
|
||||
# Uncomment the following to disable these defaults (so that only the event
|
||||
# types listed in 'additional_event_types' are shared). Defaults to 'false'.
|
||||
#
|
||||
#disable_default_event_types: true
|
||||
|
||||
# Additional state event types to share with users when they are invited
|
||||
# to a room.
|
||||
#
|
||||
# By default, this list is empty (so only the default event types are shared).
|
||||
#
|
||||
#additional_event_types:
|
||||
# - org.example.custom.event.type
|
||||
""" % {
|
||||
"formatted_default_state_types": formatted_default_state_types
|
||||
}
|
||||
|
||||
def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]:
|
||||
"""Get the event types to include in the prejoin state
|
||||
|
||||
Parses the config and returns an iterable of the event types to be included.
|
||||
"""
|
||||
room_prejoin_state_config = config.get("room_prejoin_state") or {}
|
||||
|
||||
# backwards-compatibility support for room_invite_state_types
|
||||
if "room_invite_state_types" in config:
|
||||
# if both "room_invite_state_types" and "room_prejoin_state" are set, then
|
||||
# we don't really know what to do.
|
||||
if room_prejoin_state_config:
|
||||
raise ConfigError(
|
||||
"Can't specify both 'room_invite_state_types' and 'room_prejoin_state' "
|
||||
"in config"
|
||||
)
|
||||
|
||||
logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING)
|
||||
|
||||
yield from config["room_invite_state_types"]
|
||||
return
|
||||
|
||||
if not room_prejoin_state_config.get("disable_default_event_types"):
|
||||
yield from _DEFAULT_PREJOIN_STATE_TYPES
|
||||
|
||||
if self.spaces_enabled:
|
||||
# MSC1772 suggests adding m.room.create to the prejoin state
|
||||
yield EventTypes.Create
|
||||
|
||||
yield from room_prejoin_state_config.get("additional_event_types", [])
|
||||
|
||||
|
||||
_ROOM_INVITE_STATE_TYPES_WARNING = """\
|
||||
WARNING: The 'room_invite_state_types' configuration setting is now deprecated,
|
||||
and replaced with 'room_prejoin_state'. New features may not work correctly
|
||||
unless 'room_invite_state_types' is removed. See the sample configuration file for
|
||||
details of 'room_prejoin_state'.
|
||||
--------------------------------------------------------------------------------
|
||||
"""
|
||||
|
||||
_DEFAULT_PREJOIN_STATE_TYPES = [
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.RoomEncryption,
|
||||
EventTypes.Name,
|
||||
]
|
||||
|
||||
|
||||
# room_prejoin_state can either be None (as it is in the default config), or
|
||||
# an object containing other config settings
|
||||
_ROOM_PREJOIN_STATE_CONFIG_SCHEMA = {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"disable_default_event_types": {"type": "boolean"},
|
||||
"additional_event_types": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{"type": "null"},
|
||||
]
|
||||
}
|
||||
|
||||
# the legacy room_invite_state_types setting
|
||||
_ROOM_INVITE_STATE_TYPES_SCHEMA = {"type": "array", "items": {"type": "string"}}
|
||||
|
||||
_MAIN_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"room_prejoin_state": _ROOM_PREJOIN_STATE_CONFIG_SCHEMA,
|
||||
"room_invite_state_types": _ROOM_INVITE_STATE_TYPES_SCHEMA,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.config._base import Config
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@@ -27,7 +28,11 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC2858 (multiple SSO identity providers)
|
||||
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
|
||||
# Spaces (MSC1772, MSC2946, etc)
|
||||
|
||||
# Spaces (MSC1772, MSC2946, MSC3083, etc)
|
||||
self.spaces_enabled = experimental.get("spaces_enabled", False) # type: bool
|
||||
if self.spaces_enabled:
|
||||
KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083
|
||||
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
|
||||
|
||||
@@ -298,9 +298,9 @@ class RegistrationConfig(Config):
|
||||
#
|
||||
#allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: '.*@matrix\\.org'
|
||||
# pattern: '^[^@]+@matrix\\.org$'
|
||||
# - medium: email
|
||||
# pattern: '.*@vector\\.im'
|
||||
# pattern: '^[^@]+@vector\\.im$'
|
||||
# - medium: msisdn
|
||||
# pattern: '\\+44'
|
||||
|
||||
|
||||
@@ -162,7 +162,7 @@ def check(
|
||||
logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()])
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
_is_membership_change_allowed(event, auth_events)
|
||||
_is_membership_change_allowed(room_version_obj, event, auth_events)
|
||||
logger.debug("Allowing! %s", event)
|
||||
return
|
||||
|
||||
@@ -220,8 +220,19 @@ def _can_federate(event: EventBase, auth_events: StateMap[EventBase]) -> bool:
|
||||
|
||||
|
||||
def _is_membership_change_allowed(
|
||||
event: EventBase, auth_events: StateMap[EventBase]
|
||||
room_version: RoomVersion, event: EventBase, auth_events: StateMap[EventBase]
|
||||
) -> None:
|
||||
"""
|
||||
Confirms that the event which changes membership is an allowed change.
|
||||
|
||||
Args:
|
||||
room_version: The version of the room.
|
||||
event: The event to check.
|
||||
auth_events: The current auth events of the room.
|
||||
|
||||
Raises:
|
||||
AuthError if the event is not allowed.
|
||||
"""
|
||||
membership = event.content["membership"]
|
||||
|
||||
# Check if this is the room creator joining:
|
||||
@@ -315,14 +326,19 @@ def _is_membership_change_allowed(
|
||||
if user_level < invite_level:
|
||||
raise AuthError(403, "You don't have permission to invite users")
|
||||
elif Membership.JOIN == membership:
|
||||
# Joins are valid iff caller == target and they were:
|
||||
# invited: They are accepting the invitation
|
||||
# joined: It's a NOOP
|
||||
# Joins are valid iff caller == target and:
|
||||
# * They are not banned.
|
||||
# * They are accepting a previously sent invitation.
|
||||
# * They are already joined (it's a NOOP).
|
||||
# * The room is public or restricted.
|
||||
if event.user_id != target_user_id:
|
||||
raise AuthError(403, "Cannot force another user to join.")
|
||||
elif target_banned:
|
||||
raise AuthError(403, "You are banned from this room")
|
||||
elif join_rule == JoinRules.PUBLIC:
|
||||
elif join_rule == JoinRules.PUBLIC or (
|
||||
room_version.msc3083_join_rules
|
||||
and join_rule == JoinRules.MSC3083_RESTRICTED
|
||||
):
|
||||
pass
|
||||
elif join_rule == JoinRules.INVITE:
|
||||
if not caller_in_room and not caller_invited:
|
||||
|
||||
@@ -870,6 +870,7 @@ class FederationHandlerRegistry:
|
||||
|
||||
# A rate limiter for incoming room key requests per origin.
|
||||
self._room_key_request_rate_limiter = Ratelimiter(
|
||||
store=hs.get_datastore(),
|
||||
clock=self.clock,
|
||||
rate_hz=self.config.rc_key_requests.per_second,
|
||||
burst_count=self.config.rc_key_requests.burst_count,
|
||||
@@ -930,7 +931,9 @@ class FederationHandlerRegistry:
|
||||
# the limit, drop them.
|
||||
if (
|
||||
edu_type == EduTypes.RoomKeyRequest
|
||||
and not self._room_key_request_rate_limiter.can_do_action(origin)
|
||||
and not await self._room_key_request_rate_limiter.can_do_action(
|
||||
None, origin
|
||||
)
|
||||
):
|
||||
return
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ class BaseHandler:
|
||||
|
||||
# The rate_hz and burst_count are overridden on a per-user basis
|
||||
self.request_ratelimiter = Ratelimiter(
|
||||
clock=self.clock, rate_hz=0, burst_count=0
|
||||
store=self.store, clock=self.clock, rate_hz=0, burst_count=0
|
||||
)
|
||||
self._rc_message = self.hs.config.rc_message
|
||||
|
||||
@@ -57,6 +57,7 @@ class BaseHandler:
|
||||
# by the presence of rate limits in the config
|
||||
if self.hs.config.rc_admin_redaction:
|
||||
self.admin_redaction_ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=self.hs.config.rc_admin_redaction.per_second,
|
||||
burst_count=self.hs.config.rc_admin_redaction.burst_count,
|
||||
@@ -91,11 +92,6 @@ class BaseHandler:
|
||||
if app_service is not None:
|
||||
return # do not ratelimit app service senders
|
||||
|
||||
# Disable rate limiting of users belonging to any AS that is configured
|
||||
# not to be rate limited in its registration file (rate_limited: true|false).
|
||||
if requester.app_service and not requester.app_service.is_rate_limited():
|
||||
return
|
||||
|
||||
messages_per_second = self._rc_message.per_second
|
||||
burst_count = self._rc_message.burst_count
|
||||
|
||||
@@ -113,11 +109,11 @@ class BaseHandler:
|
||||
if is_admin_redaction and self.admin_redaction_ratelimiter:
|
||||
# If we have separate config for admin redactions, use a separate
|
||||
# ratelimiter as to not have user_ids clash
|
||||
self.admin_redaction_ratelimiter.ratelimit(user_id, update=update)
|
||||
await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
|
||||
else:
|
||||
# Override rate and burst count per-user
|
||||
self.request_ratelimiter.ratelimit(
|
||||
user_id,
|
||||
await self.request_ratelimiter.ratelimit(
|
||||
requester,
|
||||
rate_hz=messages_per_second,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
|
||||
@@ -238,6 +238,7 @@ class AuthHandler(BaseHandler):
|
||||
# Ratelimiter for failed auth during UIA. Uses same ratelimit config
|
||||
# as per `rc_login.failed_attempts`.
|
||||
self._failed_uia_attempts_ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
|
||||
burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
|
||||
@@ -248,6 +249,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
# Ratelimitier for failed /login attempts
|
||||
self._failed_login_attempts_ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
|
||||
burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
|
||||
@@ -352,7 +354,7 @@ class AuthHandler(BaseHandler):
|
||||
requester_user_id = requester.user.to_string()
|
||||
|
||||
# Check if we should be ratelimited due to too many previous failed attempts
|
||||
self._failed_uia_attempts_ratelimiter.ratelimit(requester_user_id, update=False)
|
||||
await self._failed_uia_attempts_ratelimiter.ratelimit(requester, update=False)
|
||||
|
||||
# build a list of supported flows
|
||||
supported_ui_auth_types = await self._get_available_ui_auth_types(
|
||||
@@ -373,7 +375,9 @@ class AuthHandler(BaseHandler):
|
||||
)
|
||||
except LoginError:
|
||||
# Update the ratelimiter to say we failed (`can_do_action` doesn't raise).
|
||||
self._failed_uia_attempts_ratelimiter.can_do_action(requester_user_id)
|
||||
await self._failed_uia_attempts_ratelimiter.can_do_action(
|
||||
requester,
|
||||
)
|
||||
raise
|
||||
|
||||
# find the completed login type
|
||||
@@ -982,8 +986,8 @@ class AuthHandler(BaseHandler):
|
||||
# We also apply account rate limiting using the 3PID as a key, as
|
||||
# otherwise using 3PID bypasses the ratelimiting based on user ID.
|
||||
if ratelimit:
|
||||
self._failed_login_attempts_ratelimiter.ratelimit(
|
||||
(medium, address), update=False
|
||||
await self._failed_login_attempts_ratelimiter.ratelimit(
|
||||
None, (medium, address), update=False
|
||||
)
|
||||
|
||||
# Check for login providers that support 3pid login types
|
||||
@@ -1016,8 +1020,8 @@ class AuthHandler(BaseHandler):
|
||||
# this code path, which is fine as then the per-user ratelimit
|
||||
# will kick in below.
|
||||
if ratelimit:
|
||||
self._failed_login_attempts_ratelimiter.can_do_action(
|
||||
(medium, address)
|
||||
await self._failed_login_attempts_ratelimiter.can_do_action(
|
||||
None, (medium, address)
|
||||
)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
||||
@@ -1039,8 +1043,8 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
# Check if we've hit the failed ratelimit (but don't update it)
|
||||
if ratelimit:
|
||||
self._failed_login_attempts_ratelimiter.ratelimit(
|
||||
qualified_user_id.lower(), update=False
|
||||
await self._failed_login_attempts_ratelimiter.ratelimit(
|
||||
None, qualified_user_id.lower(), update=False
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -1051,8 +1055,8 @@ class AuthHandler(BaseHandler):
|
||||
# exception and masking the LoginError. The actual ratelimiting
|
||||
# should have happened above.
|
||||
if ratelimit:
|
||||
self._failed_login_attempts_ratelimiter.can_do_action(
|
||||
qualified_user_id.lower()
|
||||
await self._failed_login_attempts_ratelimiter.can_do_action(
|
||||
None, qualified_user_id.lower()
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@@ -81,6 +81,7 @@ class DeviceMessageHandler:
|
||||
)
|
||||
|
||||
self._ratelimiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=hs.config.rc_key_requests.per_second,
|
||||
burst_count=hs.config.rc_key_requests.burst_count,
|
||||
@@ -191,8 +192,8 @@ class DeviceMessageHandler:
|
||||
if (
|
||||
message_type == EduTypes.RoomKeyRequest
|
||||
and user_id != sender_user_id
|
||||
and self._ratelimiter.can_do_action(
|
||||
(sender_user_id, requester.device_id)
|
||||
and await self._ratelimiter.can_do_action(
|
||||
requester, (sender_user_id, requester.device_id)
|
||||
)
|
||||
):
|
||||
continue
|
||||
|
||||
@@ -1711,7 +1711,7 @@ class FederationHandler(BaseHandler):
|
||||
member_handler = self.hs.get_room_member_handler()
|
||||
# We don't rate limit based on room ID, as that should be done by
|
||||
# sending server.
|
||||
member_handler.ratelimit_invite(None, event.state_key)
|
||||
await member_handler.ratelimit_invite(None, None, event.state_key)
|
||||
|
||||
# keep a record of the room version, if we don't yet know it.
|
||||
# (this may get overwritten if we later get a different room version in a
|
||||
|
||||
@@ -61,17 +61,19 @@ class IdentityHandler(BaseHandler):
|
||||
|
||||
# Ratelimiters for `/requestToken` endpoints.
|
||||
self._3pid_validation_ratelimiter_ip = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=hs.config.ratelimiting.rc_3pid_validation.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_3pid_validation.burst_count,
|
||||
)
|
||||
self._3pid_validation_ratelimiter_address = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=hs.config.ratelimiting.rc_3pid_validation.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_3pid_validation.burst_count,
|
||||
)
|
||||
|
||||
def ratelimit_request_token_requests(
|
||||
async def ratelimit_request_token_requests(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
medium: str,
|
||||
@@ -85,8 +87,12 @@ class IdentityHandler(BaseHandler):
|
||||
address: The actual threepid ID, e.g. the phone number or email address
|
||||
"""
|
||||
|
||||
self._3pid_validation_ratelimiter_ip.ratelimit((medium, request.getClientIP()))
|
||||
self._3pid_validation_ratelimiter_address.ratelimit((medium, address))
|
||||
await self._3pid_validation_ratelimiter_ip.ratelimit(
|
||||
None, (medium, request.getClientIP())
|
||||
)
|
||||
await self._3pid_validation_ratelimiter_address.ratelimit(
|
||||
None, (medium, address)
|
||||
)
|
||||
|
||||
async def threepid_from_creds(
|
||||
self, id_server: str, creds: Dict[str, str]
|
||||
|
||||
@@ -385,7 +385,7 @@ class EventCreationHandler:
|
||||
self._events_shard_config = self.config.worker.events_shard_config
|
||||
self._instance_name = hs.get_instance_name()
|
||||
|
||||
self.room_invite_state_types = self.hs.config.room_invite_state_types
|
||||
self.room_invite_state_types = self.hs.config.api.room_prejoin_state
|
||||
|
||||
self.membership_types_to_include_profile_data_in = (
|
||||
{Membership.JOIN, Membership.INVITE}
|
||||
|
||||
@@ -204,7 +204,7 @@ class RegistrationHandler(BaseHandler):
|
||||
Raises:
|
||||
SynapseError if there was a problem registering.
|
||||
"""
|
||||
self.check_registration_ratelimit(address)
|
||||
await self.check_registration_ratelimit(address)
|
||||
|
||||
result = await self.spam_checker.check_registration_for_spam(
|
||||
threepid,
|
||||
@@ -583,7 +583,7 @@ class RegistrationHandler(BaseHandler):
|
||||
errcode=Codes.EXCLUSIVE,
|
||||
)
|
||||
|
||||
def check_registration_ratelimit(self, address: Optional[str]) -> None:
|
||||
async def check_registration_ratelimit(self, address: Optional[str]) -> None:
|
||||
"""A simple helper method to check whether the registration rate limit has been hit
|
||||
for a given IP address
|
||||
|
||||
@@ -597,7 +597,7 @@ class RegistrationHandler(BaseHandler):
|
||||
if not address:
|
||||
return
|
||||
|
||||
self.ratelimiter.ratelimit(address)
|
||||
await self.ratelimiter.ratelimit(None, address)
|
||||
|
||||
async def register_with_store(
|
||||
self,
|
||||
|
||||
@@ -75,22 +75,26 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
self.allow_per_room_profiles = self.config.allow_per_room_profiles
|
||||
|
||||
self._join_rate_limiter_local = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=hs.config.ratelimiting.rc_joins_local.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_joins_local.burst_count,
|
||||
)
|
||||
self._join_rate_limiter_remote = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count,
|
||||
)
|
||||
|
||||
self._invites_per_room_limiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=hs.config.ratelimiting.rc_invites_per_room.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_invites_per_room.burst_count,
|
||||
)
|
||||
self._invites_per_user_limiter = Ratelimiter(
|
||||
store=self.store,
|
||||
clock=self.clock,
|
||||
rate_hz=hs.config.ratelimiting.rc_invites_per_user.per_second,
|
||||
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
|
||||
@@ -159,15 +163,20 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
async def forget(self, user: UserID, room_id: str) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
def ratelimit_invite(self, room_id: Optional[str], invitee_user_id: str):
|
||||
async def ratelimit_invite(
|
||||
self,
|
||||
requester: Optional[Requester],
|
||||
room_id: Optional[str],
|
||||
invitee_user_id: str,
|
||||
):
|
||||
"""Ratelimit invites by room and by target user.
|
||||
|
||||
If room ID is missing then we just rate limit by target user.
|
||||
"""
|
||||
if room_id:
|
||||
self._invites_per_room_limiter.ratelimit(room_id)
|
||||
await self._invites_per_room_limiter.ratelimit(requester, room_id)
|
||||
|
||||
self._invites_per_user_limiter.ratelimit(invitee_user_id)
|
||||
await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id)
|
||||
|
||||
async def _local_membership_update(
|
||||
self,
|
||||
@@ -237,7 +246,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
(
|
||||
allowed,
|
||||
time_allowed,
|
||||
) = self._join_rate_limiter_local.can_requester_do_action(requester)
|
||||
) = await self._join_rate_limiter_local.can_do_action(requester)
|
||||
|
||||
if not allowed:
|
||||
raise LimitExceededError(
|
||||
@@ -421,9 +430,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
if effective_membership_state == Membership.INVITE:
|
||||
target_id = target.to_string()
|
||||
if ratelimit:
|
||||
# Don't ratelimit application services.
|
||||
if not requester.app_service or requester.app_service.is_rate_limited():
|
||||
self.ratelimit_invite(room_id, target_id)
|
||||
await self.ratelimit_invite(requester, room_id, target_id)
|
||||
|
||||
# block any attempts to invite the server notices mxid
|
||||
if target_id == self._server_notices_mxid:
|
||||
@@ -534,7 +541,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
(
|
||||
allowed,
|
||||
time_allowed,
|
||||
) = self._join_rate_limiter_remote.can_requester_do_action(
|
||||
) = await self._join_rate_limiter_remote.can_do_action(
|
||||
requester,
|
||||
)
|
||||
|
||||
|
||||
@@ -614,3 +614,55 @@ __all__ = [
|
||||
"InFlightGauge",
|
||||
"BucketCollector",
|
||||
]
|
||||
|
||||
|
||||
try:
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
|
||||
jemalloc = ctypes.CDLL(ctypes.util.find_library("jemalloc"))
|
||||
|
||||
def get_val(name):
|
||||
allocated = ctypes.c_size_t(0)
|
||||
allocated_len = ctypes.c_size_t(ctypes.sizeof(allocated))
|
||||
jemalloc.mallctl(
|
||||
name.encode("ascii"),
|
||||
ctypes.byref(allocated),
|
||||
ctypes.byref(allocated_len),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
return allocated.value
|
||||
|
||||
def refresh_stats():
|
||||
epoch = ctypes.c_uint64(0)
|
||||
jemalloc.mallctl(
|
||||
b"epoch", None, None, ctypes.byref(epoch), ctypes.sizeof(epoch)
|
||||
)
|
||||
|
||||
class JemallocCollector(object):
|
||||
def collect(self):
|
||||
refresh_stats()
|
||||
|
||||
g = GaugeMetricFamily(
|
||||
"jemalloc_stats_app_memory",
|
||||
"",
|
||||
labels=["type"],
|
||||
)
|
||||
for t in (
|
||||
"allocated",
|
||||
"active",
|
||||
"resident",
|
||||
"mapped",
|
||||
"retained",
|
||||
"metadata",
|
||||
):
|
||||
g.add_metric([t], value=get_val(f"stats.{t}"))
|
||||
|
||||
yield g
|
||||
|
||||
REGISTRY.register(JemallocCollector())
|
||||
|
||||
|
||||
except Exception:
|
||||
logger.exception("Failed to start jemalloc metrics")
|
||||
|
||||
@@ -345,6 +345,15 @@ class RulesForRoom:
|
||||
# to self around in the callback.
|
||||
self.invalidate_all_cb = _Invalidation(rules_for_room_cache, room_id)
|
||||
|
||||
def get_data_for_memory_size(self):
|
||||
return (
|
||||
self.member_map,
|
||||
self.rules_by_user,
|
||||
self.state_group,
|
||||
self.sequence,
|
||||
self.uninteresting_user_set,
|
||||
)
|
||||
|
||||
async def get_rules(
|
||||
self, event: EventBase, context: EventContext
|
||||
) -> Dict[str, List[Dict[str, dict]]]:
|
||||
|
||||
@@ -77,7 +77,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
|
||||
async def _handle_request(self, request, user_id):
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
self.registration_handler.check_registration_ratelimit(content["address"])
|
||||
await self.registration_handler.check_registration_ratelimit(content["address"])
|
||||
|
||||
await self.registration_handler.register_with_store(
|
||||
user_id=user_id,
|
||||
|
||||
@@ -36,6 +36,7 @@ from synapse.rest.admin._base import (
|
||||
)
|
||||
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||
from synapse.storage.databases.main.media_repository import MediaSortOrder
|
||||
from synapse.storage.databases.main.stats import UserSortOrder
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -117,8 +118,26 @@ class UsersRestServletV2(RestServlet):
|
||||
guests = parse_boolean(request, "guests", default=True)
|
||||
deactivated = parse_boolean(request, "deactivated", default=False)
|
||||
|
||||
order_by = parse_string(
|
||||
request,
|
||||
"order_by",
|
||||
default=UserSortOrder.NAME.value,
|
||||
allowed_values=(
|
||||
UserSortOrder.NAME.value,
|
||||
UserSortOrder.DISPLAYNAME.value,
|
||||
UserSortOrder.GUEST.value,
|
||||
UserSortOrder.ADMIN.value,
|
||||
UserSortOrder.DEACTIVATED.value,
|
||||
UserSortOrder.USER_TYPE.value,
|
||||
UserSortOrder.AVATAR_URL.value,
|
||||
UserSortOrder.SHADOW_BANNED.value,
|
||||
),
|
||||
)
|
||||
|
||||
direction = parse_string(request, "dir", default="f", allowed_values=("f", "b"))
|
||||
|
||||
users, total = await self.store.get_users_paginate(
|
||||
start, limit, user_id, name, guests, deactivated
|
||||
start, limit, user_id, name, guests, deactivated, order_by, direction
|
||||
)
|
||||
ret = {"users": users, "total": total}
|
||||
if (start + limit) < total:
|
||||
|
||||
@@ -74,11 +74,13 @@ class LoginRestServlet(RestServlet):
|
||||
|
||||
self._well_known_builder = WellKnownBuilder(hs)
|
||||
self._address_ratelimiter = Ratelimiter(
|
||||
store=hs.get_datastore(),
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=self.hs.config.rc_login_address.per_second,
|
||||
burst_count=self.hs.config.rc_login_address.burst_count,
|
||||
)
|
||||
self._account_ratelimiter = Ratelimiter(
|
||||
store=hs.get_datastore(),
|
||||
clock=hs.get_clock(),
|
||||
rate_hz=self.hs.config.rc_login_account.per_second,
|
||||
burst_count=self.hs.config.rc_login_account.burst_count,
|
||||
@@ -141,20 +143,22 @@ class LoginRestServlet(RestServlet):
|
||||
appservice = self.auth.get_appservice_by_req(request)
|
||||
|
||||
if appservice.is_rate_limited():
|
||||
self._address_ratelimiter.ratelimit(request.getClientIP())
|
||||
await self._address_ratelimiter.ratelimit(
|
||||
None, request.getClientIP()
|
||||
)
|
||||
|
||||
result = await self._do_appservice_login(login_submission, appservice)
|
||||
elif self.jwt_enabled and (
|
||||
login_submission["type"] == LoginRestServlet.JWT_TYPE
|
||||
or login_submission["type"] == LoginRestServlet.JWT_TYPE_DEPRECATED
|
||||
):
|
||||
self._address_ratelimiter.ratelimit(request.getClientIP())
|
||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
||||
result = await self._do_jwt_login(login_submission)
|
||||
elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE:
|
||||
self._address_ratelimiter.ratelimit(request.getClientIP())
|
||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
||||
result = await self._do_token_login(login_submission)
|
||||
else:
|
||||
self._address_ratelimiter.ratelimit(request.getClientIP())
|
||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
||||
result = await self._do_other_login(login_submission)
|
||||
except KeyError:
|
||||
raise SynapseError(400, "Missing JSON keys.")
|
||||
@@ -258,7 +262,7 @@ class LoginRestServlet(RestServlet):
|
||||
# too often. This happens here rather than before as we don't
|
||||
# necessarily know the user before now.
|
||||
if ratelimit:
|
||||
self._account_ratelimiter.ratelimit(user_id.lower())
|
||||
await self._account_ratelimiter.ratelimit(None, user_id.lower())
|
||||
|
||||
if create_non_existent_users:
|
||||
canonical_uid = await self.auth_handler.check_user_exists(user_id)
|
||||
|
||||
@@ -103,7 +103,9 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
|
||||
# Raise if the provided next_link value isn't valid
|
||||
assert_valid_next_link(self.hs, next_link)
|
||||
|
||||
self.identity_handler.ratelimit_request_token_requests(request, "email", email)
|
||||
await self.identity_handler.ratelimit_request_token_requests(
|
||||
request, "email", email
|
||||
)
|
||||
|
||||
# The email will be sent to the stored address.
|
||||
# This avoids a potential account hijack by requesting a password reset to
|
||||
@@ -387,7 +389,9 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
|
||||
Codes.THREEPID_DENIED,
|
||||
)
|
||||
|
||||
self.identity_handler.ratelimit_request_token_requests(request, "email", email)
|
||||
await self.identity_handler.ratelimit_request_token_requests(
|
||||
request, "email", email
|
||||
)
|
||||
|
||||
if next_link:
|
||||
# Raise if the provided next_link value isn't valid
|
||||
@@ -468,7 +472,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
||||
Codes.THREEPID_DENIED,
|
||||
)
|
||||
|
||||
self.identity_handler.ratelimit_request_token_requests(
|
||||
await self.identity_handler.ratelimit_request_token_requests(
|
||||
request, "msisdn", msisdn
|
||||
)
|
||||
|
||||
|
||||
@@ -126,7 +126,9 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
|
||||
Codes.THREEPID_DENIED,
|
||||
)
|
||||
|
||||
self.identity_handler.ratelimit_request_token_requests(request, "email", email)
|
||||
await self.identity_handler.ratelimit_request_token_requests(
|
||||
request, "email", email
|
||||
)
|
||||
|
||||
existing_user_id = await self.hs.get_datastore().get_user_id_by_threepid(
|
||||
"email", email
|
||||
@@ -208,7 +210,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
|
||||
Codes.THREEPID_DENIED,
|
||||
)
|
||||
|
||||
self.identity_handler.ratelimit_request_token_requests(
|
||||
await self.identity_handler.ratelimit_request_token_requests(
|
||||
request, "msisdn", msisdn
|
||||
)
|
||||
|
||||
@@ -406,7 +408,7 @@ class RegisterRestServlet(RestServlet):
|
||||
|
||||
client_addr = request.getClientIP()
|
||||
|
||||
self.ratelimiter.ratelimit(client_addr, update=False)
|
||||
await self.ratelimiter.ratelimit(None, client_addr, update=False)
|
||||
|
||||
kind = b"user"
|
||||
if b"kind" in request.args:
|
||||
|
||||
@@ -329,6 +329,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
@cache_in_self
|
||||
def get_registration_ratelimiter(self) -> Ratelimiter:
|
||||
return Ratelimiter(
|
||||
store=self.get_datastore(),
|
||||
clock=self.get_clock(),
|
||||
rate_hz=self.config.rc_registration.per_second,
|
||||
burst_count=self.config.rc_registration.burst_count,
|
||||
|
||||
@@ -21,6 +21,7 @@ from typing import List, Optional, Tuple
|
||||
from synapse.api.constants import PresenceState
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.storage.database import DatabasePool
|
||||
from synapse.storage.databases.main.stats import UserSortOrder
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.util.id_generators import (
|
||||
IdGenerator,
|
||||
@@ -292,6 +293,8 @@ class DataStore(
|
||||
name: Optional[str] = None,
|
||||
guests: bool = True,
|
||||
deactivated: bool = False,
|
||||
order_by: UserSortOrder = UserSortOrder.USER_ID.value,
|
||||
direction: str = "f",
|
||||
) -> Tuple[List[JsonDict], int]:
|
||||
"""Function to retrieve a paginated list of users from
|
||||
users list. This will return a json list of users and the
|
||||
@@ -304,6 +307,8 @@ class DataStore(
|
||||
name: search for local part of user_id or display name
|
||||
guests: whether to in include guest users
|
||||
deactivated: whether to include deactivated users
|
||||
order_by: the sort order of the returned list
|
||||
direction: sort ascending or descending
|
||||
Returns:
|
||||
A tuple of a list of mappings from user to information and a count of total users.
|
||||
"""
|
||||
@@ -312,6 +317,14 @@ class DataStore(
|
||||
filters = []
|
||||
args = [self.hs.config.server_name]
|
||||
|
||||
# Set ordering
|
||||
order_by_column = UserSortOrder(order_by).value
|
||||
|
||||
if direction == "b":
|
||||
order = "DESC"
|
||||
else:
|
||||
order = "ASC"
|
||||
|
||||
# `name` is in database already in lower case
|
||||
if name:
|
||||
filters.append("(name LIKE ? OR LOWER(displayname) LIKE ?)")
|
||||
@@ -339,10 +352,15 @@ class DataStore(
|
||||
txn.execute(sql, args)
|
||||
count = txn.fetchone()[0]
|
||||
|
||||
sql = (
|
||||
"SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, displayname, avatar_url "
|
||||
+ sql_base
|
||||
+ " ORDER BY u.name LIMIT ? OFFSET ?"
|
||||
sql = """
|
||||
SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, displayname, avatar_url
|
||||
{sql_base}
|
||||
ORDER BY {order_by_column} {order}, u.name ASC
|
||||
LIMIT ? OFFSET ?
|
||||
""".format(
|
||||
sql_base=sql_base,
|
||||
order_by_column=order_by_column,
|
||||
order=order,
|
||||
)
|
||||
args += [limit, start]
|
||||
txn.execute(sql, args)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
import logging
|
||||
import threading
|
||||
from collections import namedtuple
|
||||
from typing import Dict, Iterable, List, Optional, Tuple, overload
|
||||
from typing import Container, Dict, Iterable, List, Optional, Tuple, overload
|
||||
|
||||
from constantly import NamedConstant, Names
|
||||
from typing_extensions import Literal
|
||||
@@ -544,7 +544,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
async def get_stripped_room_state_from_event_context(
|
||||
self,
|
||||
context: EventContext,
|
||||
state_types_to_include: List[EventTypes],
|
||||
state_types_to_include: Container[str],
|
||||
membership_user_id: Optional[str] = None,
|
||||
) -> List[JsonDict]:
|
||||
"""
|
||||
|
||||
@@ -1043,6 +1043,9 @@ class _JoinedHostsCache:
|
||||
|
||||
self._len = 0
|
||||
|
||||
def get_data_for_memory_size(self):
|
||||
return (self.hosts_to_joined_users, self.state_group)
|
||||
|
||||
async def get_destinations(self, state_entry: "_StateCacheEntry") -> Set[str]:
|
||||
"""Get set of destinations for a state entry
|
||||
|
||||
|
||||
@@ -66,18 +66,37 @@ TYPE_TO_ORIGIN_TABLE = {"room": ("rooms", "room_id"), "user": ("users", "name")}
|
||||
class UserSortOrder(Enum):
|
||||
"""
|
||||
Enum to define the sorting method used when returning users
|
||||
with get_users_media_usage_paginate
|
||||
with get_users_paginate in __init__.py
|
||||
and get_users_media_usage_paginate in stats.py
|
||||
|
||||
MEDIA_LENGTH = ordered by size of uploaded media. Smallest to largest.
|
||||
MEDIA_COUNT = ordered by number of uploaded media. Smallest to largest.
|
||||
When moves this to __init__.py gets `builtins.ImportError` with
|
||||
`most likely due to a circular import`
|
||||
|
||||
MEDIA_LENGTH = ordered by size of uploaded media.
|
||||
MEDIA_COUNT = ordered by number of uploaded media.
|
||||
USER_ID = ordered alphabetically by `user_id`.
|
||||
NAME = ordered alphabetically by `user_id`. This is for compatibility reasons,
|
||||
as the user_id is returned in the name field in the response in list users admin API.
|
||||
DISPLAYNAME = ordered alphabetically by `displayname`
|
||||
GUEST = ordered by `is_guest`
|
||||
ADMIN = ordered by `admin`
|
||||
DEACTIVATED = ordered by `deactivated`
|
||||
USER_TYPE = ordered alphabetically by `user_type`
|
||||
AVATAR_URL = ordered alphabetically by `avatar_url`
|
||||
SHADOW_BANNED = ordered by `shadow_banned`
|
||||
"""
|
||||
|
||||
MEDIA_LENGTH = "media_length"
|
||||
MEDIA_COUNT = "media_count"
|
||||
USER_ID = "user_id"
|
||||
NAME = "name"
|
||||
DISPLAYNAME = "displayname"
|
||||
GUEST = "is_guest"
|
||||
ADMIN = "admin"
|
||||
DEACTIVATED = "deactivated"
|
||||
USER_TYPE = "user_type"
|
||||
AVATAR_URL = "avatar_url"
|
||||
SHADOW_BANNED = "shadow_banned"
|
||||
|
||||
|
||||
class StatsStore(StateDeltasStore):
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import imp
|
||||
import importlib.util
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
@@ -454,8 +454,13 @@ def _upgrade_existing_database(
|
||||
)
|
||||
|
||||
module_name = "synapse.storage.v%d_%s" % (v, root_name)
|
||||
with open(absolute_path) as python_file:
|
||||
module = imp.load_source(module_name, absolute_path, python_file) # type: ignore
|
||||
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
module_name, absolute_path
|
||||
)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module) # type: ignore
|
||||
|
||||
logger.info("Running script %s", relative_path)
|
||||
module.run_create(cur, database_engine) # type: ignore
|
||||
if not is_empty:
|
||||
|
||||
@@ -231,8 +231,8 @@ class DomainSpecificString(
|
||||
# Deny iteration because it will bite you if you try to create a singleton
|
||||
# set by:
|
||||
# users = set(user)
|
||||
def __iter__(self):
|
||||
raise ValueError("Attempted to iterate a %s" % (type(self).__name__,))
|
||||
# def __iter__(self):
|
||||
# raise ValueError("Attempted to iterate a %s" % (type(self).__name__,))
|
||||
|
||||
# Because this class is a namedtuple of strings and booleans, it is deeply
|
||||
# immutable.
|
||||
|
||||
@@ -33,6 +33,7 @@ cache_hits = Gauge("synapse_util_caches_cache:hits", "", ["name"])
|
||||
cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name"])
|
||||
cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"])
|
||||
cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"])
|
||||
cache_memory_usage = Gauge("synapse_util_caches_cache_memory_usage", "", ["name"])
|
||||
|
||||
response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"])
|
||||
response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"])
|
||||
@@ -53,6 +54,7 @@ class CacheMetric:
|
||||
hits = attr.ib(default=0)
|
||||
misses = attr.ib(default=0)
|
||||
evicted_size = attr.ib(default=0)
|
||||
memory_usage = attr.ib(default=0)
|
||||
|
||||
def inc_hits(self):
|
||||
self.hits += 1
|
||||
@@ -80,6 +82,7 @@ class CacheMetric:
|
||||
cache_hits.labels(self._cache_name).set(self.hits)
|
||||
cache_evicted.labels(self._cache_name).set(self.evicted_size)
|
||||
cache_total.labels(self._cache_name).set(self.hits + self.misses)
|
||||
cache_memory_usage.labels(self._cache_name).set(self.memory_usage)
|
||||
if getattr(self._cache, "max_size", None):
|
||||
cache_max_size.labels(self._cache_name).set(self._cache.max_size)
|
||||
if self._collect_callback:
|
||||
|
||||
@@ -27,6 +27,7 @@ from typing import (
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
import random
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
@@ -34,6 +35,11 @@ from synapse.config import cache as cache_config
|
||||
from synapse.util.caches import CacheMetric, register_cache
|
||||
from synapse.util.caches.treecache import TreeCache
|
||||
|
||||
try:
|
||||
from pympler import asizeof
|
||||
except ImportError:
|
||||
asizeof = None
|
||||
|
||||
# Function type: the type used for invalidation callbacks
|
||||
FT = TypeVar("FT", bound=Callable[..., Any])
|
||||
|
||||
@@ -55,15 +61,34 @@ def enumerate_leaves(node, depth):
|
||||
|
||||
|
||||
class _Node:
|
||||
__slots__ = ["prev_node", "next_node", "key", "value", "callbacks"]
|
||||
__slots__ = [
|
||||
"prev_node",
|
||||
"next_node",
|
||||
"key",
|
||||
"value",
|
||||
"callbacks",
|
||||
"memory",
|
||||
"allocated_ts",
|
||||
]
|
||||
|
||||
def __init__(self, prev_node, next_node, key, value, callbacks=set()):
|
||||
self.prev_node = prev_node
|
||||
self.next_node = next_node
|
||||
def __init__(self, prev_node, next_node, key, value, allocated_ts, callbacks=set()):
|
||||
self.key = key
|
||||
self.value = value
|
||||
self.callbacks = callbacks
|
||||
|
||||
if asizeof:
|
||||
data = getattr(value, "get_data_for_memory_size", None)
|
||||
if data:
|
||||
self.memory = asizeof.asizeof(key) + asizeof.asizeof(data)
|
||||
else:
|
||||
self.memory = asizeof.asizeof(key) + asizeof.asizeof(value)
|
||||
else:
|
||||
self.memory = 0
|
||||
|
||||
self.prev_node = prev_node
|
||||
self.next_node = next_node
|
||||
self.allocated_ts = allocated_ts
|
||||
|
||||
|
||||
class LruCache(Generic[KT, VT]):
|
||||
"""
|
||||
@@ -82,6 +107,7 @@ class LruCache(Generic[KT, VT]):
|
||||
size_callback: Optional[Callable] = None,
|
||||
metrics_collection_callback: Optional[Callable[[], None]] = None,
|
||||
apply_cache_factor_from_config: bool = True,
|
||||
reactor=None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
@@ -110,6 +136,11 @@ class LruCache(Generic[KT, VT]):
|
||||
apply_cache_factor_from_config (bool): If true, `max_size` will be
|
||||
multiplied by a cache factor derived from the homeserver config
|
||||
"""
|
||||
if reactor is None:
|
||||
from twisted.internet import reactor as _reactor
|
||||
|
||||
reactor = _reactor
|
||||
|
||||
cache = cache_type()
|
||||
self.cache = cache # Used for introspection.
|
||||
self.apply_cache_factor_from_config = apply_cache_factor_from_config
|
||||
@@ -141,13 +172,14 @@ class LruCache(Generic[KT, VT]):
|
||||
# this is exposed for access from outside this class
|
||||
self.metrics = metrics
|
||||
|
||||
list_root = _Node(None, None, None, None)
|
||||
list_root = _Node(None, None, None, None, -1)
|
||||
list_root.next_node = list_root
|
||||
list_root.prev_node = list_root
|
||||
|
||||
lock = threading.Lock()
|
||||
|
||||
def evict():
|
||||
ten_minutes_ago = int(reactor.seconds()) - 10 * 60
|
||||
while cache_len() > self.max_size:
|
||||
todelete = list_root.prev_node
|
||||
evicted_len = delete_node(todelete)
|
||||
@@ -155,6 +187,24 @@ class LruCache(Generic[KT, VT]):
|
||||
if metrics:
|
||||
metrics.inc_evictions(evicted_len)
|
||||
|
||||
todelete = list_root.prev_node
|
||||
while 0 < todelete.allocated_ts < ten_minutes_ago + 60:
|
||||
if list_root == todelete:
|
||||
break
|
||||
|
||||
if ten_minutes_ago < todelete.allocated_ts:
|
||||
todelete = todelete.prev_node
|
||||
continue
|
||||
|
||||
next_todelete = todelete.prev_node
|
||||
|
||||
evicted_len = delete_node(todelete)
|
||||
cache.pop(todelete.key, None)
|
||||
if metrics:
|
||||
metrics.inc_evictions(evicted_len)
|
||||
|
||||
todelete = next_todelete
|
||||
|
||||
def synchronized(f: FT) -> FT:
|
||||
@wraps(f)
|
||||
def inner(*args, **kwargs):
|
||||
@@ -179,11 +229,18 @@ class LruCache(Generic[KT, VT]):
|
||||
def add_node(key, value, callbacks=set()):
|
||||
prev_node = list_root
|
||||
next_node = prev_node.next_node
|
||||
node = _Node(prev_node, next_node, key, value, callbacks)
|
||||
|
||||
ts = int(reactor.seconds()) + random.randint(-60, 60)
|
||||
print("Allocating at", ts)
|
||||
|
||||
node = _Node(prev_node, next_node, key, value, ts, callbacks)
|
||||
prev_node.next_node = node
|
||||
next_node.prev_node = node
|
||||
cache[key] = node
|
||||
|
||||
if metrics:
|
||||
metrics.memory_usage += node.memory
|
||||
|
||||
if size_callback:
|
||||
cached_cache_len[0] += size_callback(node.value)
|
||||
|
||||
@@ -205,6 +262,9 @@ class LruCache(Generic[KT, VT]):
|
||||
prev_node.next_node = next_node
|
||||
next_node.prev_node = prev_node
|
||||
|
||||
if metrics:
|
||||
metrics.memory_usage -= node.memory
|
||||
|
||||
deleted_len = 1
|
||||
if size_callback:
|
||||
deleted_len = size_callback(node.value)
|
||||
@@ -242,8 +302,13 @@ class LruCache(Generic[KT, VT]):
|
||||
):
|
||||
node = cache.get(key, None)
|
||||
if node is not None:
|
||||
move_node_to_front(node)
|
||||
node.callbacks.update(callbacks)
|
||||
ten_minutes_ago = int(reactor.seconds()) - 10 * 60
|
||||
if 0 < node.allocated_ts < ten_minutes_ago:
|
||||
delete_node(node)
|
||||
cache.pop(node.key, None)
|
||||
else:
|
||||
move_node_to_front(node)
|
||||
node.callbacks.update(callbacks)
|
||||
if update_metrics and metrics:
|
||||
metrics.inc_hits()
|
||||
return node.value
|
||||
|
||||
@@ -5,38 +5,25 @@ from synapse.types import create_requester
|
||||
from tests import unittest
|
||||
|
||||
|
||||
class TestRatelimiter(unittest.TestCase):
|
||||
class TestRatelimiter(unittest.HomeserverTestCase):
|
||||
def test_allowed_via_can_do_action(self):
|
||||
limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
|
||||
allowed, time_allowed = limiter.can_do_action(key="test_id", _time_now_s=0)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEquals(10.0, time_allowed)
|
||||
|
||||
allowed, time_allowed = limiter.can_do_action(key="test_id", _time_now_s=5)
|
||||
self.assertFalse(allowed)
|
||||
self.assertEquals(10.0, time_allowed)
|
||||
|
||||
allowed, time_allowed = limiter.can_do_action(key="test_id", _time_now_s=10)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEquals(20.0, time_allowed)
|
||||
|
||||
def test_allowed_user_via_can_requester_do_action(self):
|
||||
user_requester = create_requester("@user:example.com")
|
||||
limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
|
||||
allowed, time_allowed = limiter.can_requester_do_action(
|
||||
user_requester, _time_now_s=0
|
||||
limiter = Ratelimiter(
|
||||
store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1
|
||||
)
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(None, key="test_id", _time_now_s=0)
|
||||
)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEquals(10.0, time_allowed)
|
||||
|
||||
allowed, time_allowed = limiter.can_requester_do_action(
|
||||
user_requester, _time_now_s=5
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(None, key="test_id", _time_now_s=5)
|
||||
)
|
||||
self.assertFalse(allowed)
|
||||
self.assertEquals(10.0, time_allowed)
|
||||
|
||||
allowed, time_allowed = limiter.can_requester_do_action(
|
||||
user_requester, _time_now_s=10
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(None, key="test_id", _time_now_s=10)
|
||||
)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEquals(20.0, time_allowed)
|
||||
@@ -51,21 +38,23 @@ class TestRatelimiter(unittest.TestCase):
|
||||
)
|
||||
as_requester = create_requester("@user:example.com", app_service=appservice)
|
||||
|
||||
limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
|
||||
allowed, time_allowed = limiter.can_requester_do_action(
|
||||
as_requester, _time_now_s=0
|
||||
limiter = Ratelimiter(
|
||||
store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1
|
||||
)
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(as_requester, _time_now_s=0)
|
||||
)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEquals(10.0, time_allowed)
|
||||
|
||||
allowed, time_allowed = limiter.can_requester_do_action(
|
||||
as_requester, _time_now_s=5
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(as_requester, _time_now_s=5)
|
||||
)
|
||||
self.assertFalse(allowed)
|
||||
self.assertEquals(10.0, time_allowed)
|
||||
|
||||
allowed, time_allowed = limiter.can_requester_do_action(
|
||||
as_requester, _time_now_s=10
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(as_requester, _time_now_s=10)
|
||||
)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEquals(20.0, time_allowed)
|
||||
@@ -80,73 +69,89 @@ class TestRatelimiter(unittest.TestCase):
|
||||
)
|
||||
as_requester = create_requester("@user:example.com", app_service=appservice)
|
||||
|
||||
limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
|
||||
allowed, time_allowed = limiter.can_requester_do_action(
|
||||
as_requester, _time_now_s=0
|
||||
limiter = Ratelimiter(
|
||||
store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1
|
||||
)
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(as_requester, _time_now_s=0)
|
||||
)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEquals(-1, time_allowed)
|
||||
|
||||
allowed, time_allowed = limiter.can_requester_do_action(
|
||||
as_requester, _time_now_s=5
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(as_requester, _time_now_s=5)
|
||||
)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEquals(-1, time_allowed)
|
||||
|
||||
allowed, time_allowed = limiter.can_requester_do_action(
|
||||
as_requester, _time_now_s=10
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(as_requester, _time_now_s=10)
|
||||
)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEquals(-1, time_allowed)
|
||||
|
||||
def test_allowed_via_ratelimit(self):
|
||||
limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
|
||||
limiter = Ratelimiter(
|
||||
store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1
|
||||
)
|
||||
|
||||
# Shouldn't raise
|
||||
limiter.ratelimit(key="test_id", _time_now_s=0)
|
||||
self.get_success_or_raise(limiter.ratelimit(None, key="test_id", _time_now_s=0))
|
||||
|
||||
# Should raise
|
||||
with self.assertRaises(LimitExceededError) as context:
|
||||
limiter.ratelimit(key="test_id", _time_now_s=5)
|
||||
self.get_success_or_raise(
|
||||
limiter.ratelimit(None, key="test_id", _time_now_s=5)
|
||||
)
|
||||
self.assertEqual(context.exception.retry_after_ms, 5000)
|
||||
|
||||
# Shouldn't raise
|
||||
limiter.ratelimit(key="test_id", _time_now_s=10)
|
||||
self.get_success_or_raise(
|
||||
limiter.ratelimit(None, key="test_id", _time_now_s=10)
|
||||
)
|
||||
|
||||
def test_allowed_via_can_do_action_and_overriding_parameters(self):
|
||||
"""Test that we can override options of can_do_action that would otherwise fail
|
||||
an action
|
||||
"""
|
||||
# Create a Ratelimiter with a very low allowed rate_hz and burst_count
|
||||
limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
|
||||
limiter = Ratelimiter(
|
||||
store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1
|
||||
)
|
||||
|
||||
# First attempt should be allowed
|
||||
allowed, time_allowed = limiter.can_do_action(
|
||||
("test_id",),
|
||||
_time_now_s=0,
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(
|
||||
None,
|
||||
("test_id",),
|
||||
_time_now_s=0,
|
||||
)
|
||||
)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEqual(10.0, time_allowed)
|
||||
|
||||
# Second attempt, 1s later, will fail
|
||||
allowed, time_allowed = limiter.can_do_action(
|
||||
("test_id",),
|
||||
_time_now_s=1,
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(
|
||||
None,
|
||||
("test_id",),
|
||||
_time_now_s=1,
|
||||
)
|
||||
)
|
||||
self.assertFalse(allowed)
|
||||
self.assertEqual(10.0, time_allowed)
|
||||
|
||||
# But, if we allow 10 actions/sec for this request, we should be allowed
|
||||
# to continue.
|
||||
allowed, time_allowed = limiter.can_do_action(
|
||||
("test_id",), _time_now_s=1, rate_hz=10.0
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(None, ("test_id",), _time_now_s=1, rate_hz=10.0)
|
||||
)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEqual(1.1, time_allowed)
|
||||
|
||||
# Similarly if we allow a burst of 10 actions
|
||||
allowed, time_allowed = limiter.can_do_action(
|
||||
("test_id",), _time_now_s=1, burst_count=10
|
||||
allowed, time_allowed = self.get_success_or_raise(
|
||||
limiter.can_do_action(None, ("test_id",), _time_now_s=1, burst_count=10)
|
||||
)
|
||||
self.assertTrue(allowed)
|
||||
self.assertEqual(1.0, time_allowed)
|
||||
@@ -156,29 +161,72 @@ class TestRatelimiter(unittest.TestCase):
|
||||
fail an action
|
||||
"""
|
||||
# Create a Ratelimiter with a very low allowed rate_hz and burst_count
|
||||
limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
|
||||
limiter = Ratelimiter(
|
||||
store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1
|
||||
)
|
||||
|
||||
# First attempt should be allowed
|
||||
limiter.ratelimit(key=("test_id",), _time_now_s=0)
|
||||
self.get_success_or_raise(
|
||||
limiter.ratelimit(None, key=("test_id",), _time_now_s=0)
|
||||
)
|
||||
|
||||
# Second attempt, 1s later, will fail
|
||||
with self.assertRaises(LimitExceededError) as context:
|
||||
limiter.ratelimit(key=("test_id",), _time_now_s=1)
|
||||
self.get_success_or_raise(
|
||||
limiter.ratelimit(None, key=("test_id",), _time_now_s=1)
|
||||
)
|
||||
self.assertEqual(context.exception.retry_after_ms, 9000)
|
||||
|
||||
# But, if we allow 10 actions/sec for this request, we should be allowed
|
||||
# to continue.
|
||||
limiter.ratelimit(key=("test_id",), _time_now_s=1, rate_hz=10.0)
|
||||
self.get_success_or_raise(
|
||||
limiter.ratelimit(None, key=("test_id",), _time_now_s=1, rate_hz=10.0)
|
||||
)
|
||||
|
||||
# Similarly if we allow a burst of 10 actions
|
||||
limiter.ratelimit(key=("test_id",), _time_now_s=1, burst_count=10)
|
||||
self.get_success_or_raise(
|
||||
limiter.ratelimit(None, key=("test_id",), _time_now_s=1, burst_count=10)
|
||||
)
|
||||
|
||||
def test_pruning(self):
|
||||
limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1)
|
||||
limiter.can_do_action(key="test_id_1", _time_now_s=0)
|
||||
limiter = Ratelimiter(
|
||||
store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1
|
||||
)
|
||||
self.get_success_or_raise(
|
||||
limiter.can_do_action(None, key="test_id_1", _time_now_s=0)
|
||||
)
|
||||
|
||||
self.assertIn("test_id_1", limiter.actions)
|
||||
|
||||
limiter.can_do_action(key="test_id_2", _time_now_s=10)
|
||||
self.get_success_or_raise(
|
||||
limiter.can_do_action(None, key="test_id_2", _time_now_s=10)
|
||||
)
|
||||
|
||||
self.assertNotIn("test_id_1", limiter.actions)
|
||||
|
||||
def test_db_user_override(self):
|
||||
"""Test that users that have ratelimiting disabled in the DB aren't
|
||||
ratelimited.
|
||||
"""
|
||||
store = self.hs.get_datastore()
|
||||
|
||||
user_id = "@user:test"
|
||||
requester = create_requester(user_id)
|
||||
|
||||
self.get_success(
|
||||
store.db_pool.simple_insert(
|
||||
table="ratelimit_override",
|
||||
values={
|
||||
"user_id": user_id,
|
||||
"messages_per_second": None,
|
||||
"burst_count": None,
|
||||
},
|
||||
desc="test_db_user_override",
|
||||
)
|
||||
)
|
||||
|
||||
limiter = Ratelimiter(store=store, clock=None, rate_hz=0.1, burst_count=1)
|
||||
|
||||
# Shouldn't raise
|
||||
for _ in range(20):
|
||||
self.get_success_or_raise(limiter.ratelimit(requester, _time_now_s=0))
|
||||
|
||||
@@ -28,7 +28,7 @@ from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.rest.client.v1 import login, logout, profile, room
|
||||
from synapse.rest.client.v2_alpha import devices, sync
|
||||
from synapse.types import JsonDict
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
from tests import unittest
|
||||
from tests.server import FakeSite, make_request
|
||||
@@ -467,6 +467,8 @@ class UsersListTestCase(unittest.HomeserverTestCase):
|
||||
url = "/_synapse/admin/v2/users"
|
||||
|
||||
def prepare(self, reactor, clock, hs):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
self.admin_user = self.register_user("admin", "pass", admin=True)
|
||||
self.admin_user_tok = self.login("admin", "pass")
|
||||
|
||||
@@ -634,6 +636,26 @@ class UsersListTestCase(unittest.HomeserverTestCase):
|
||||
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
|
||||
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
|
||||
|
||||
# unkown order_by
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
self.url + "?order_by=bar",
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
|
||||
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
|
||||
|
||||
# invalid search order
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
self.url + "?dir=bar",
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
|
||||
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
|
||||
|
||||
def test_limit(self):
|
||||
"""
|
||||
Testing list of users with limit
|
||||
@@ -759,6 +781,103 @@ class UsersListTestCase(unittest.HomeserverTestCase):
|
||||
self.assertEqual(len(channel.json_body["users"]), 1)
|
||||
self.assertNotIn("next_token", channel.json_body)
|
||||
|
||||
def test_order_by(self):
|
||||
"""
|
||||
Testing order list with parameter `order_by`
|
||||
"""
|
||||
|
||||
user1 = self.register_user("user1", "pass1", admin=False, displayname="Name Z")
|
||||
user2 = self.register_user("user2", "pass2", admin=False, displayname="Name Y")
|
||||
|
||||
# Modify user
|
||||
self.get_success(self.store.set_user_deactivated_status(user1, True))
|
||||
self.get_success(self.store.set_shadow_banned(UserID.from_string(user1), True))
|
||||
|
||||
# Set avatar URL to all users, that no user has a NULL value to avoid
|
||||
# different sort order between SQlite and PostreSQL
|
||||
self.get_success(self.store.set_profile_avatar_url("user1", "mxc://url3"))
|
||||
self.get_success(self.store.set_profile_avatar_url("user2", "mxc://url2"))
|
||||
self.get_success(self.store.set_profile_avatar_url("admin", "mxc://url1"))
|
||||
|
||||
# order by default (name)
|
||||
self._order_test([self.admin_user, user1, user2], None)
|
||||
self._order_test([self.admin_user, user1, user2], None, "f")
|
||||
self._order_test([user2, user1, self.admin_user], None, "b")
|
||||
|
||||
# order by name
|
||||
self._order_test([self.admin_user, user1, user2], "name")
|
||||
self._order_test([self.admin_user, user1, user2], "name", "f")
|
||||
self._order_test([user2, user1, self.admin_user], "name", "b")
|
||||
|
||||
# order by displayname
|
||||
self._order_test([user2, user1, self.admin_user], "displayname")
|
||||
self._order_test([user2, user1, self.admin_user], "displayname", "f")
|
||||
self._order_test([self.admin_user, user1, user2], "displayname", "b")
|
||||
|
||||
# order by is_guest
|
||||
# like sort by ascending name, as no guest user here
|
||||
self._order_test([self.admin_user, user1, user2], "is_guest")
|
||||
self._order_test([self.admin_user, user1, user2], "is_guest", "f")
|
||||
self._order_test([self.admin_user, user1, user2], "is_guest", "b")
|
||||
|
||||
# order by admin
|
||||
self._order_test([user1, user2, self.admin_user], "admin")
|
||||
self._order_test([user1, user2, self.admin_user], "admin", "f")
|
||||
self._order_test([self.admin_user, user1, user2], "admin", "b")
|
||||
|
||||
# order by deactivated
|
||||
self._order_test([self.admin_user, user2, user1], "deactivated")
|
||||
self._order_test([self.admin_user, user2, user1], "deactivated", "f")
|
||||
self._order_test([user1, self.admin_user, user2], "deactivated", "b")
|
||||
|
||||
# order by user_type
|
||||
# like sort by ascending name, as no special user type here
|
||||
self._order_test([self.admin_user, user1, user2], "user_type")
|
||||
self._order_test([self.admin_user, user1, user2], "user_type", "f")
|
||||
self._order_test([self.admin_user, user1, user2], "is_guest", "b")
|
||||
|
||||
# order by shadow_banned
|
||||
self._order_test([self.admin_user, user2, user1], "shadow_banned")
|
||||
self._order_test([self.admin_user, user2, user1], "shadow_banned", "f")
|
||||
self._order_test([user1, self.admin_user, user2], "shadow_banned", "b")
|
||||
|
||||
# order by avatar_url
|
||||
self._order_test([self.admin_user, user2, user1], "avatar_url")
|
||||
self._order_test([self.admin_user, user2, user1], "avatar_url", "f")
|
||||
self._order_test([user1, user2, self.admin_user], "avatar_url", "b")
|
||||
|
||||
def _order_test(
|
||||
self,
|
||||
expected_user_list: List[str],
|
||||
order_by: Optional[str],
|
||||
dir: Optional[str] = None,
|
||||
):
|
||||
"""Request the list of users in a certain order. Assert that order is what
|
||||
we expect
|
||||
Args:
|
||||
expected_user_list: The list of user_id in the order we expect to get
|
||||
back from the server
|
||||
order_by: The type of ordering to give the server
|
||||
dir: The direction of ordering to give the server
|
||||
"""
|
||||
|
||||
url = self.url + "?deactivated=true&"
|
||||
if order_by is not None:
|
||||
url += "order_by=%s&" % (order_by,)
|
||||
if dir is not None and dir in ("b", "f"):
|
||||
url += "dir=%s" % (dir,)
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
url.encode("ascii"),
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
self.assertEqual(200, channel.code, msg=channel.json_body)
|
||||
self.assertEqual(channel.json_body["total"], len(expected_user_list))
|
||||
|
||||
returned_order = [row["name"] for row in channel.json_body["users"]]
|
||||
self.assertEqual(expected_user_list, returned_order)
|
||||
self._check_fields(channel.json_body["users"])
|
||||
|
||||
def _check_fields(self, content: JsonDict):
|
||||
"""Checks that the expected user attributes are present in content
|
||||
Args:
|
||||
|
||||
@@ -207,6 +207,226 @@ class EventAuthTestCase(unittest.TestCase):
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
def test_join_rules_public(self):
|
||||
"""
|
||||
Test joining a public room.
|
||||
"""
|
||||
creator = "@creator:example.com"
|
||||
pleb = "@joiner:example.com"
|
||||
|
||||
auth_events = {
|
||||
("m.room.create", ""): _create_event(creator),
|
||||
("m.room.member", creator): _join_event(creator),
|
||||
("m.room.join_rules", ""): _join_rules_event(creator, "public"),
|
||||
}
|
||||
|
||||
# Check join.
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user cannot be force-joined to a room.
|
||||
with self.assertRaises(AuthError):
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_member_event(pleb, "join", sender=creator),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# Banned should be rejected.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
|
||||
with self.assertRaises(AuthError):
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user who left can re-join.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user can send a join if they're in the room.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user can accept an invite.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(
|
||||
pleb, "invite", sender=creator
|
||||
)
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
def test_join_rules_invite(self):
|
||||
"""
|
||||
Test joining an invite only room.
|
||||
"""
|
||||
creator = "@creator:example.com"
|
||||
pleb = "@joiner:example.com"
|
||||
|
||||
auth_events = {
|
||||
("m.room.create", ""): _create_event(creator),
|
||||
("m.room.member", creator): _join_event(creator),
|
||||
("m.room.join_rules", ""): _join_rules_event(creator, "invite"),
|
||||
}
|
||||
|
||||
# A join without an invite is rejected.
|
||||
with self.assertRaises(AuthError):
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user cannot be force-joined to a room.
|
||||
with self.assertRaises(AuthError):
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_member_event(pleb, "join", sender=creator),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# Banned should be rejected.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
|
||||
with self.assertRaises(AuthError):
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user who left cannot re-join.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
|
||||
with self.assertRaises(AuthError):
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user can send a join if they're in the room.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user can accept an invite.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(
|
||||
pleb, "invite", sender=creator
|
||||
)
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
def test_join_rules_msc3083_restricted(self):
|
||||
"""
|
||||
Test joining a restricted room from MSC3083.
|
||||
|
||||
This is pretty much the same test as public.
|
||||
"""
|
||||
creator = "@creator:example.com"
|
||||
pleb = "@joiner:example.com"
|
||||
|
||||
auth_events = {
|
||||
("m.room.create", ""): _create_event(creator),
|
||||
("m.room.member", creator): _join_event(creator),
|
||||
("m.room.join_rules", ""): _join_rules_event(creator, "restricted"),
|
||||
}
|
||||
|
||||
# Older room versions don't understand this join rule
|
||||
with self.assertRaises(AuthError):
|
||||
event_auth.check(
|
||||
RoomVersions.V6,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# Check join.
|
||||
event_auth.check(
|
||||
RoomVersions.MSC3083,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user cannot be force-joined to a room.
|
||||
with self.assertRaises(AuthError):
|
||||
event_auth.check(
|
||||
RoomVersions.MSC3083,
|
||||
_member_event(pleb, "join", sender=creator),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# Banned should be rejected.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban")
|
||||
with self.assertRaises(AuthError):
|
||||
event_auth.check(
|
||||
RoomVersions.MSC3083,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user who left can re-join.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave")
|
||||
event_auth.check(
|
||||
RoomVersions.MSC3083,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user can send a join if they're in the room.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(pleb, "join")
|
||||
event_auth.check(
|
||||
RoomVersions.MSC3083,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
# A user can accept an invite.
|
||||
auth_events[("m.room.member", pleb)] = _member_event(
|
||||
pleb, "invite", sender=creator
|
||||
)
|
||||
event_auth.check(
|
||||
RoomVersions.MSC3083,
|
||||
_join_event(pleb),
|
||||
auth_events,
|
||||
do_sig_check=False,
|
||||
)
|
||||
|
||||
|
||||
# helpers for making events
|
||||
|
||||
@@ -225,19 +445,24 @@ def _create_event(user_id):
|
||||
)
|
||||
|
||||
|
||||
def _join_event(user_id):
|
||||
def _member_event(user_id, membership, sender=None):
|
||||
return make_event_from_dict(
|
||||
{
|
||||
"room_id": TEST_ROOM_ID,
|
||||
"event_id": _get_event_id(),
|
||||
"type": "m.room.member",
|
||||
"sender": user_id,
|
||||
"sender": sender or user_id,
|
||||
"state_key": user_id,
|
||||
"content": {"membership": "join"},
|
||||
"content": {"membership": membership},
|
||||
"prev_events": [],
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _join_event(user_id):
|
||||
return _member_event(user_id, "join")
|
||||
|
||||
|
||||
def _power_levels_event(sender, content):
|
||||
return make_event_from_dict(
|
||||
{
|
||||
@@ -277,6 +502,21 @@ def _random_state_event(sender):
|
||||
)
|
||||
|
||||
|
||||
def _join_rules_event(sender, join_rule):
|
||||
return make_event_from_dict(
|
||||
{
|
||||
"room_id": TEST_ROOM_ID,
|
||||
"event_id": _get_event_id(),
|
||||
"type": "m.room.join_rules",
|
||||
"sender": sender,
|
||||
"state_key": "",
|
||||
"content": {
|
||||
"join_rule": join_rule,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
event_count = 0
|
||||
|
||||
|
||||
|
||||
@@ -30,6 +30,23 @@ class LruCacheTestCase(unittest.HomeserverTestCase):
|
||||
self.assertEquals(cache.get("key"), "value")
|
||||
self.assertEquals(cache["key"], "value")
|
||||
|
||||
def test_time_evict(self):
|
||||
self.reactor.advance(100 * 60)
|
||||
|
||||
cache = LruCache(100, reactor=self.reactor)
|
||||
cache["key"] = "value"
|
||||
cache["key2"] = "value2"
|
||||
|
||||
cache._on_resize()
|
||||
self.assertEquals(cache.get("key"), "value")
|
||||
|
||||
self.reactor.advance(20 * 60)
|
||||
|
||||
print(self.reactor.seconds())
|
||||
|
||||
cache._on_resize()
|
||||
self.assertEquals(cache.get("key"), None)
|
||||
|
||||
def test_eviction(self):
|
||||
cache = LruCache(2)
|
||||
cache[1] = 1
|
||||
|
||||
@@ -122,7 +122,6 @@ def default_config(name, parse=False):
|
||||
"enable_registration_captcha": False,
|
||||
"macaroon_secret_key": "not even a little secret",
|
||||
"trusted_third_party_id_servers": [],
|
||||
"room_invite_state_types": [],
|
||||
"password_providers": [],
|
||||
"worker_replication_url": "",
|
||||
"worker_app": None,
|
||||
|
||||
Reference in New Issue
Block a user