1
0

Compare commits

...

12 Commits

Author SHA1 Message Date
H. Shay 095be7b573 newsfragment 2023-08-24 15:26:13 -07:00
H. Shay fd6bd53ce2 drop conflicting tests 2023-08-24 15:16:01 -07:00
H. Shay 6470011b76 allow for upserts to column full_user_id of table profiles and user_filters 2023-08-24 15:15:52 -07:00
H. Shay 0d33bafaf8 drop not null constraint on column user_id of tables profiles and user_filters 2023-08-24 15:14:28 -07:00
H. Shay f6401d7ee6 stop writing to column user_id of table profiles 2023-08-24 15:13:39 -07:00
H. Shay 890bfb415a stop writing to column user_id of table user_filters 2023-08-24 15:12:24 -07:00
Amirreza Aflakparast 5427cc20b9 Update URLs to matrix.org blog categories. (#16008) 2023-08-24 11:06:06 -04:00
David Robertson e691243e19 Fix typechecking with twisted trunk (#16121) 2023-08-24 14:53:07 +00:00
Will Hunt 0538e3e2db Add Retry-After to M_LIMIT_EXCEEDED error responses (#16136)
Implements MSC4041 behind an experimental configuration flag.
2023-08-24 10:40:26 -04:00
dependabot[bot] e3333bacff Bump serde_json from 1.0.104 to 1.0.105 (#16140)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-24 08:49:37 -04:00
H. Shay 851cbdcb57 Merge branch 'release-v1.91' into develop 2023-08-23 11:44:01 -07:00
Patrick Cloke 33fa82a34c Stabilize support for MSC3958 (suppress notifications from edits). (#16113) 2023-08-23 13:22:34 -04:00
32 changed files with 265 additions and 242 deletions
+2 -2
View File
@@ -54,8 +54,8 @@ jobs:
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref || 'trunk' }}
poetry install --no-interaction --extras "all test"
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- name: Remove unhelpful options from mypy config
run: sed -e '/warn_unused_ignores = True/d' -e '/warn_redundant_casts = True/d' -i mypy.ini
- run: poetry run mypy
trial:
Generated
+2 -2
View File
@@ -352,9 +352,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.104"
version = "1.0.105"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c"
checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360"
dependencies = [
"itoa",
"ryu",
+1
View File
@@ -0,0 +1 @@
Update links to the matrix.org blog.
+1
View File
@@ -0,0 +1 @@
Suppress notifications from message edits per [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958).
+1
View File
@@ -0,0 +1 @@
Attempt to fix the twisted trunk job.
+1
View File
@@ -0,0 +1 @@
Return a `Retry-After` with `M_LIMIT_EXCEEDED` error responses.
+1
View File
@@ -0,0 +1 @@
Stop writing to column user_id of tables profiles and user_filters.
+2 -2
View File
@@ -12,7 +12,7 @@ Note that this schedule might be modified depending on the availability of the
Synapse team, e.g. releases may be skipped to avoid holidays.
Release announcements can be found in the
[release category of the Matrix blog](https://matrix.org/blog/category/releases).
[release category of the Matrix blog](https://matrix.org/category/releases).
## Bugfix releases
@@ -34,4 +34,4 @@ be held to be released together.
In some cases, a pre-disclosure of a security release will be issued as a notice
to Synapse operators that there is an upcoming security release. These can be
found in the [security category of the Matrix blog](https://matrix.org/blog/category/security).
found in the [security category of the Matrix blog](https://matrix.org/category/security).
-1
View File
@@ -197,7 +197,6 @@ fn bench_eval_message(b: &mut Bencher) {
false,
false,
false,
false,
);
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
+1 -1
View File
@@ -228,7 +228,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
// We don't want to notify on edits *unless* the edit directly mentions a
// user, which is handled above.
PushRule {
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3958.suppress_edits"),
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_edits"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventPropertyIs(
EventPropertyIsCondition {
+1 -1
View File
@@ -564,7 +564,7 @@ fn test_requires_room_version_supports_condition() {
};
let rules = PushRules::new(vec![custom_rule]);
result = evaluator.run(
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false),
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true),
None,
None,
);
-9
View File
@@ -527,7 +527,6 @@ pub struct FilteredPushRules {
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc3958_suppress_edits_enabled: bool,
}
#[pymethods]
@@ -539,7 +538,6 @@ impl FilteredPushRules {
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc3958_suppress_edits_enabled: bool,
) -> Self {
Self {
push_rules,
@@ -547,7 +545,6 @@ impl FilteredPushRules {
msc1767_enabled,
msc3381_polls_enabled,
msc3664_enabled,
msc3958_suppress_edits_enabled,
}
}
@@ -584,12 +581,6 @@ impl FilteredPushRules {
return false;
}
if !self.msc3958_suppress_edits_enabled
&& rule.rule_id == "global/override/.org.matrix.msc3958.suppress_edits"
{
return false;
}
true
})
.map(|r| {
-1
View File
@@ -46,7 +46,6 @@ class FilteredPushRules:
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc3958_suppress_edits_enabled: bool,
): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
+9 -1
View File
@@ -16,6 +16,7 @@
"""Contains exceptions and error codes."""
import logging
import math
import typing
from enum import Enum
from http import HTTPStatus
@@ -503,6 +504,8 @@ class InvalidCaptchaError(SynapseError):
class LimitExceededError(SynapseError):
"""A client has sent too many requests and is being throttled."""
include_retry_after_header = False
def __init__(
self,
code: int = 429,
@@ -510,7 +513,12 @@ class LimitExceededError(SynapseError):
retry_after_ms: Optional[int] = None,
errcode: str = Codes.LIMIT_EXCEEDED,
):
super().__init__(code, msg, errcode)
headers = (
{"Retry-After": str(math.ceil(retry_after_ms / 1000))}
if self.include_retry_after_header and retry_after_ms is not None
else None
)
super().__init__(code, msg, errcode, headers=headers)
self.retry_after_ms = retry_after_ms
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
+9 -5
View File
@@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, Any, Optional
import attr
import attr.validators
from synapse.api.errors import LimitExceededError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.config import ConfigError
from synapse.config._base import Config, RootConfig
@@ -383,11 +384,6 @@ class ExperimentalConfig(Config):
# MSC3391: Removing account data.
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
# MSC3959: Do not generate notifications for edits.
self.msc3958_supress_edit_notifs = experimental.get(
"msc3958_supress_edit_notifs", False
)
# MSC3967: Do not require UIA when first uploading cross signing keys
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
@@ -411,3 +407,11 @@ class ExperimentalConfig(Config):
self.msc4010_push_rules_account_data = experimental.get(
"msc4010_push_rules_account_data", False
)
# MSC4041: Use HTTP header Retry-After to enable library-assisted retry handling
#
# This is a bit hacky, but the most reasonable way to *alway* include the
# headers.
LimitExceededError.include_retry_after_header = experimental.get(
"msc4041_enabled", False
)
+16 -16
View File
@@ -1474,23 +1474,23 @@ class EventCreationHandler:
# We now persist the event (and update the cache in parallel, since we
# don't want to block on it).
event, context = events_and_context[0]
#
# Note: mypy gets confused if we inline dl and check with twisted#11770.
# Some kind of bug in mypy's deduction?
deferreds = (
run_in_background(
self._persist_events,
requester=requester,
events_and_context=events_and_context,
ratelimit=ratelimit,
extra_users=extra_users,
),
run_in_background(
self.cache_joined_hosts_for_events, events_and_context
).addErrback(log_failure, "cache_joined_hosts_for_event failed"),
)
result, _ = await make_deferred_yieldable(
gather_results(
(
run_in_background(
self._persist_events,
requester=requester,
events_and_context=events_and_context,
ratelimit=ratelimit,
extra_users=extra_users,
),
run_in_background(
self.cache_joined_hosts_for_events, events_and_context
).addErrback(log_failure, "cache_joined_hosts_for_event failed"),
),
consumeErrors=True,
)
gather_results(deferreds, consumeErrors=True)
).addErrback(unwrapFirstError)
return result
+10 -9
View File
@@ -809,23 +809,24 @@ def run_in_background( # type: ignore[misc]
# `res` may be a coroutine, `Deferred`, some other kind of awaitable, or a plain
# value. Convert it to a `Deferred`.
d: "defer.Deferred[R]"
if isinstance(res, typing.Coroutine):
# Wrap the coroutine in a `Deferred`.
res = defer.ensureDeferred(res)
d = defer.ensureDeferred(res)
elif isinstance(res, defer.Deferred):
pass
d = res
elif isinstance(res, Awaitable):
# `res` is probably some kind of completed awaitable, such as a `DoneAwaitable`
# or `Future` from `make_awaitable`.
res = defer.ensureDeferred(_unwrap_awaitable(res))
d = defer.ensureDeferred(_unwrap_awaitable(res))
else:
# `res` is a plain value. Wrap it in a `Deferred`.
res = defer.succeed(res)
d = defer.succeed(res)
if res.called and not res.paused:
if d.called and not d.paused:
# The function should have maintained the logcontext, so we can
# optimise out the messing about
return res
return d
# The function may have reset the context before returning, so
# we need to restore it now.
@@ -843,8 +844,8 @@ def run_in_background( # type: ignore[misc]
# which is supposed to have a single entry and exit point. But
# by spawning off another deferred, we are effectively
# adding a new exit point.)
res.addBoth(_set_context_cb, ctx)
return res
d.addBoth(_set_context_cb, ctx)
return d
T = TypeVar("T")
@@ -877,7 +878,7 @@ def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]
ResultT = TypeVar("ResultT")
def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT:
def _set_context_cb(result: ResultT, context: LoggingContextOrSentinel) -> ResultT:
"""A callback function which just sets the logging context"""
set_current_context(context)
return result
+2
View File
@@ -98,6 +98,8 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
"event_push_summary": "event_push_summary_unique_index2",
"receipts_linearized": "receipts_linearized_unique_index",
"receipts_graph": "receipts_graph_unique_index",
"profiles": "profiles_full_user_id_key_idx",
"user_filters": "full_users_filters_unique_idx",
}
+2 -3
View File
@@ -187,14 +187,13 @@ class FilteringWorkerStore(SQLBaseStore):
filter_id = max_id + 1
sql = (
"INSERT INTO user_filters (full_user_id, user_id, filter_id, filter_json)"
"VALUES(?, ?, ?, ?)"
"INSERT INTO user_filters (full_user_id, filter_id, filter_json)"
"VALUES(?, ?, ?)"
)
txn.execute(
sql,
(
user_id.to_string(),
user_id.localpart,
filter_id,
bytearray(def_json),
),
+4 -8
View File
@@ -173,10 +173,9 @@ class ProfileWorkerStore(SQLBaseStore):
)
async def create_profile(self, user_id: UserID) -> None:
user_localpart = user_id.localpart
await self.db_pool.simple_insert(
table="profiles",
values={"user_id": user_localpart, "full_user_id": user_id.to_string()},
values={"full_user_id": user_id.to_string()},
desc="create_profile",
)
@@ -191,13 +190,11 @@ class ProfileWorkerStore(SQLBaseStore):
new_displayname: The new display name. If this is None, the user's display
name is removed.
"""
user_localpart = user_id.localpart
await self.db_pool.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
keyvalues={"full_user_id": user_id.to_string()},
values={
"displayname": new_displayname,
"full_user_id": user_id.to_string(),
},
desc="set_profile_displayname",
)
@@ -213,11 +210,10 @@ class ProfileWorkerStore(SQLBaseStore):
new_avatar_url: The new avatar URL. If this is None, the user's avatar is
removed.
"""
user_localpart = user_id.localpart
await self.db_pool.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
values={"avatar_url": new_avatar_url, "full_user_id": user_id.to_string()},
keyvalues={"full_user_id": user_id.to_string()},
values={"avatar_url": new_avatar_url},
desc="set_profile_avatar_url",
)
@@ -88,7 +88,6 @@ def _load_rules(
msc1767_enabled=experimental_config.msc1767_enabled,
msc3664_enabled=experimental_config.msc3664_enabled,
msc3381_polls_enabled=experimental_config.msc3381_polls_enabled,
msc3958_suppress_edits_enabled=experimental_config.msc3958_supress_edit_notifs,
)
return filtered_rules
@@ -2404,7 +2404,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
shadow_banned: bool,
approved: bool,
) -> None:
user_id_obj = UserID.from_string(user_id)
UserID.from_string(user_id)
now = int(self._clock.time())
@@ -2464,12 +2464,9 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
if create_profile_with_displayname:
# set a default displayname serverside to avoid ugly race
# between auto-joins and clients trying to set displaynames
#
# *obviously* the 'profiles' table uses localpart for user_id
# while everything else uses the full mxid.
txn.execute(
"INSERT INTO profiles(full_user_id, user_id, displayname) VALUES (?,?,?)",
(user_id, user_id_obj.localpart, create_profile_with_displayname),
"INSERT INTO profiles(full_user_id, displayname) VALUES (?,?)",
(user_id, create_profile_with_displayname),
)
if self.hs.config.stats.stats_enabled:
+4 -1
View File
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
SCHEMA_VERSION = 80 # remember to update the list below when updating
SCHEMA_VERSION = 81 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -114,6 +114,9 @@ Changes in SCHEMA_VERSION = 79
Changes in SCHEMA_VERSION = 80
- The event_txn_id_device_id is always written to for new events.
- Add tables for the task scheduler.
Changes in SCHEMA_VERSION = 81
- We no longer write to column user_id of tables profiles and user_filters
"""
@@ -0,0 +1,64 @@
# Copyright 2023 The Matrix.org Foundation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
"""
Update to drop the NOT NULL constraint on column `user_id` so that we can cease to
write to it without inserts to other columns triggering the constraint
"""
if isinstance(database_engine, PostgresEngine):
drop_sql = """
ALTER TABLE profiles ALTER COLUMN user_id DROP NOT NULL
"""
cur.execute(drop_sql)
else:
# irritatingly in SQLite we need to rewrite the table to drop the constraint.
cur.execute("DROP TABLE IF EXISTS temp_profiles")
create_sql = """
CREATE TABLE temp_profiles (
full_user_id text NOT NULL,
user_id text,
displayname text,
avatar_url text,
UNIQUE (full_user_id),
UNIQUE (user_id)
)
"""
cur.execute(create_sql)
copy_sql = """
INSERT INTO temp_profiles (
user_id,
displayname,
avatar_url,
full_user_id)
SELECT user_id, displayname, avatar_url, full_user_id FROM profiles
"""
cur.execute(copy_sql)
drop_sql = """
DROP TABLE profiles
"""
cur.execute(drop_sql)
rename_sql = """
ALTER TABLE temp_profiles RENAME to profiles
"""
cur.execute(rename_sql)
@@ -0,0 +1,68 @@
# Copyright 2023 The Matrix.org Foundation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
"""
Update to drop the NOT NULL constraint on column user_id so that we can cease to
write to it without inserts to other columns triggering the constraint
"""
if isinstance(database_engine, PostgresEngine):
drop_sql = """
ALTER TABLE user_filters ALTER COLUMN user_id DROP NOT NULL
"""
cur.execute(drop_sql)
else:
# irritatingly in SQLite we need to rewrite the table to drop the constraint.
cur.execute("DROP TABLE IF EXISTS temp_user_filters")
create_sql = """
CREATE TABLE temp_user_filters (
full_user_id text NOT NULL,
user_id text,
filter_id bigint NOT NULL,
filter_json bytea NOT NULL
)
"""
cur.execute(create_sql)
index_sql = """
CREATE UNIQUE INDEX IF NOT EXISTS user_filters_full_user_id_unique ON
temp_user_filters (full_user_id, filter_id)
"""
cur.execute(index_sql)
copy_sql = """
INSERT INTO temp_user_filters (
user_id,
filter_id,
filter_json,
full_user_id)
SELECT user_id, filter_id, filter_json, full_user_id FROM user_filters
"""
cur.execute(copy_sql)
drop_sql = """
DROP TABLE user_filters
"""
cur.execute(drop_sql)
rename_sql = """
ALTER TABLE temp_user_filters RENAME to user_filters
"""
cur.execute(rename_sql)
+1 -1
View File
@@ -470,7 +470,7 @@ class CacheMultipleEntries(CacheEntry[KT, VT]):
def deferred(self, key: KT) -> "defer.Deferred[VT]":
if not self._deferred:
self._deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
return self._deferred.observe().addCallback(lambda res: res.get(key))
return self._deferred.observe().addCallback(lambda res: res[key])
def add_invalidation_callback(
self, key: KT, callback: Optional[Callable[[], None]]
+36
View File
@@ -0,0 +1,36 @@
# Copyright 2023 The Matrix.org Foundation C.I.C.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.errors import LimitExceededError
from tests import unittest
class ErrorsTestCase(unittest.TestCase):
# Create a sub-class to avoid mutating the class-level property.
class LimitExceededErrorHeaders(LimitExceededError):
include_retry_after_header = True
def test_limit_exceeded_header(self) -> None:
err = ErrorsTestCase.LimitExceededErrorHeaders(retry_after_ms=100)
self.assertEqual(err.error_dict(None).get("retry_after_ms"), 100)
assert err.headers is not None
self.assertEqual(err.headers.get("Retry-After"), "1")
def test_limit_exceeded_rounding(self) -> None:
err = ErrorsTestCase.LimitExceededErrorHeaders(retry_after_ms=3001)
self.assertEqual(err.error_dict(None).get("retry_after_ms"), 3001)
assert err.headers is not None
self.assertEqual(err.headers.get("Retry-After"), "4")
@@ -382,7 +382,6 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
)
)
@override_config({"experimental_features": {"msc3958_supress_edit_notifs": True}})
def test_suppress_edits(self) -> None:
"""Under the default push rules, event edits should not generate notifications."""
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
+18 -6
View File
@@ -169,7 +169,8 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"account": {"per_second": 10000, "burst_count": 10000},
}
},
"experimental_features": {"msc4041_enabled": True},
}
)
def test_POST_ratelimiting_per_address(self) -> None:
@@ -189,12 +190,15 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
if i == 5:
self.assertEqual(channel.code, 429, msg=channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
retry_header = channel.headers.getRawHeaders("Retry-After")
else:
self.assertEqual(channel.code, 200, msg=channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.assertLess(retry_after_ms, 6000)
assert retry_header
self.assertLessEqual(int(retry_header[0]), 6)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
@@ -217,7 +221,8 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
# which sets these values to 10000, but as we're overriding the entire
# rc_login dict here, we need to set this manually as well
"address": {"per_second": 10000, "burst_count": 10000},
}
},
"experimental_features": {"msc4041_enabled": True},
}
)
def test_POST_ratelimiting_per_account(self) -> None:
@@ -234,12 +239,15 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
if i == 5:
self.assertEqual(channel.code, 429, msg=channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
retry_header = channel.headers.getRawHeaders("Retry-After")
else:
self.assertEqual(channel.code, 200, msg=channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.assertLess(retry_after_ms, 6000)
assert retry_header
self.assertLessEqual(int(retry_header[0]), 6)
self.reactor.advance(retry_after_ms / 1000.0)
@@ -262,7 +270,8 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
# rc_login dict here, we need to set this manually as well
"address": {"per_second": 10000, "burst_count": 10000},
"failed_attempts": {"per_second": 0.17, "burst_count": 5},
}
},
"experimental_features": {"msc4041_enabled": True},
}
)
def test_POST_ratelimiting_per_account_failed_attempts(self) -> None:
@@ -279,12 +288,15 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
if i == 5:
self.assertEqual(channel.code, 429, msg=channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
retry_header = channel.headers.getRawHeaders("Retry-After")
else:
self.assertEqual(channel.code, 403, msg=channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
self.assertTrue(retry_after_ms < 6000)
self.assertLess(retry_after_ms, 6000)
assert retry_header
self.assertLessEqual(int(retry_header[0]), 6)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
-63
View File
@@ -15,8 +15,6 @@
from twisted.test.proto_helpers import MemoryReactor
from synapse.server import HomeServer
from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import PostgresEngine
from synapse.types import UserID
from synapse.util import Clock
@@ -64,64 +62,3 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase):
self.assertIsNone(
self.get_success(self.store.get_profile_avatar_url(self.u_frank))
)
def test_profiles_bg_migration(self) -> None:
"""
Test background job that copies entries from column user_id to full_user_id, adding
the hostname in the process.
"""
updater = self.hs.get_datastores().main.db_pool.updates
# drop the constraint so we can insert nulls in full_user_id to populate the test
if isinstance(self.store.database_engine, PostgresEngine):
def f(txn: LoggingTransaction) -> None:
txn.execute(
"ALTER TABLE profiles DROP CONSTRAINT full_user_id_not_null"
)
self.get_success(self.store.db_pool.runInteraction("", f))
for i in range(0, 70):
self.get_success(
self.store.db_pool.simple_insert(
"profiles",
{"user_id": f"hello{i:02}"},
)
)
# re-add the constraint so that when it's validated it actually exists
if isinstance(self.store.database_engine, PostgresEngine):
def f(txn: LoggingTransaction) -> None:
txn.execute(
"ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID"
)
self.get_success(self.store.db_pool.runInteraction("", f))
self.get_success(
self.store.db_pool.simple_insert(
"background_updates",
values={
"update_name": "populate_full_user_id_profiles",
"progress_json": "{}",
},
)
)
self.get_success(
updater.run_background_updates(False),
)
expected_values = []
for i in range(0, 70):
expected_values.append((f"@hello{i:02}:{self.hs.hostname}",))
res = self.get_success(
self.store.db_pool.execute(
"", None, "SELECT full_user_id from profiles ORDER BY full_user_id"
)
)
self.assertEqual(len(res), len(expected_values))
self.assertEqual(res, expected_values)
-94
View File
@@ -1,94 +0,0 @@
# Copyright 2023 The Matrix.org Foundation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.test.proto_helpers import MemoryReactor
from synapse.server import HomeServer
from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import PostgresEngine
from synapse.util import Clock
from tests import unittest
class UserFiltersStoreTestCase(unittest.HomeserverTestCase):
"""
Test background migration that copies entries from column user_id to full_user_id, adding
the hostname in the process.
"""
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
def test_bg_migration(self) -> None:
updater = self.hs.get_datastores().main.db_pool.updates
# drop the constraint so we can insert nulls in full_user_id to populate the test
if isinstance(self.store.database_engine, PostgresEngine):
def f(txn: LoggingTransaction) -> None:
txn.execute(
"ALTER TABLE user_filters DROP CONSTRAINT full_user_id_not_null"
)
self.get_success(self.store.db_pool.runInteraction("", f))
for i in range(0, 70):
self.get_success(
self.store.db_pool.simple_insert(
"user_filters",
{
"user_id": f"hello{i:02}",
"filter_id": i,
"filter_json": bytearray(i),
},
)
)
# re-add the constraint so that when it's validated it actually exists
if isinstance(self.store.database_engine, PostgresEngine):
def f(txn: LoggingTransaction) -> None:
txn.execute(
"ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID"
)
self.get_success(self.store.db_pool.runInteraction("", f))
self.get_success(
self.store.db_pool.simple_insert(
"background_updates",
values={
"update_name": "populate_full_user_id_user_filters",
"progress_json": "{}",
},
)
)
self.get_success(
updater.run_background_updates(False),
)
expected_values = []
for i in range(0, 70):
expected_values.append((f"@hello{i:02}:{self.hs.hostname}",))
res = self.get_success(
self.store.db_pool.execute(
"", None, "SELECT full_user_id from user_filters ORDER BY full_user_id"
)
)
self.assertEqual(len(res), len(expected_values))
self.assertEqual(res, expected_values)
+6 -8
View File
@@ -60,11 +60,9 @@ class ObservableDeferredTest(TestCase):
observer1.addBoth(check_called_first)
# store the results
results: List[Optional[ObservableDeferred[int]]] = [None, None]
results: List[Optional[int]] = [None, None]
def check_val(
res: ObservableDeferred[int], idx: int
) -> ObservableDeferred[int]:
def check_val(res: int, idx: int) -> int:
results[idx] = res
return res
@@ -93,14 +91,14 @@ class ObservableDeferredTest(TestCase):
observer1.addBoth(check_called_first)
# store the results
results: List[Optional[ObservableDeferred[str]]] = [None, None]
results: List[Optional[Failure]] = [None, None]
def check_val(res: ObservableDeferred[str], idx: int) -> None:
def check_failure(res: Failure, idx: int) -> None:
results[idx] = res
return None
observer1.addErrback(check_val, 0)
observer2.addErrback(check_val, 1)
observer1.addErrback(check_failure, 0)
observer2.addErrback(check_failure, 1)
try:
raise Exception("gah!")