Compare commits
9 Commits
rei/p/stca
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5a97e401b1 | ||
|
|
6b6dcdc338 | ||
|
|
aa874a1390 | ||
|
|
2bf31f7807 | ||
|
|
57ca8ab10f | ||
|
|
aa58e8a28a | ||
|
|
b9f2f6d3c4 | ||
|
|
8c36d332d5 | ||
|
|
76aa5537ad |
26
CHANGES.md
26
CHANGES.md
@@ -1,3 +1,29 @@
|
||||
Synapse 1.49.2 (2021-12-21)
|
||||
===========================
|
||||
|
||||
This release fixes a regression introduced in Synapse 1.49.0 which could cause `/sync` requests to take significantly longer. This would particularly affect "initial" syncs for users participating in a large number of rooms, and in extreme cases, could make it impossible for such users to log in on a new client.
|
||||
|
||||
**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support Python 3.6 and PostgreSQL 9.6, both of which have now reached upstream end-of-life. Synapse will require Python 3.7+ and PostgreSQL 10+.
|
||||
|
||||
**Note:** We will also stop producing packages for Ubuntu 18.04 (Bionic Beaver) after this release, as it uses Python 3.6.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a performance regression in `/sync` handling, introduced in 1.49.0. ([\#11583](https://github.com/matrix-org/synapse/issues/11583))
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Work around a build problem on Debian Buster. ([\#11625](https://github.com/matrix-org/synapse/issues/11625))
|
||||
|
||||
|
||||
Synapse 1.49.1 (2021-12-21)
|
||||
===========================
|
||||
|
||||
Not released due to problems building the debian packages.
|
||||
|
||||
|
||||
Synapse 1.49.0 (2021-12-14)
|
||||
===========================
|
||||
|
||||
|
||||
12
book.toml
12
book.toml
@@ -34,6 +34,14 @@ additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
"docs/website_files/version-picker.css",
|
||||
]
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
additional-js = [
|
||||
"docs/website_files/table-of-contents.js",
|
||||
"docs/website_files/version-picker.js",
|
||||
"docs/website_files/version.js",
|
||||
]
|
||||
theme = "docs/website_files/theme"
|
||||
|
||||
[preprocessor.schema_versions]
|
||||
command = "./scripts-dev/schema_versions.py"
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Send and handle cross-signing messages using the stable prefix.
|
||||
@@ -1 +0,0 @@
|
||||
Deduplicate in-flight requests in `_get_state_for_groups`.
|
||||
@@ -1 +0,0 @@
|
||||
Allow specific, experimental events to be created without `prev_events`. Used by [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716).
|
||||
@@ -1 +0,0 @@
|
||||
A test helper (`wait_for_background_updates`) no longer depends on classes defining a `store` property.
|
||||
@@ -1 +0,0 @@
|
||||
Add type hints to `synapse.appservice`.
|
||||
@@ -1 +0,0 @@
|
||||
Allow guests to send state events per [MSC3419](https://github.com/matrix-org/matrix-doc/pull/3419).
|
||||
@@ -1 +0,0 @@
|
||||
Document the usage of refresh tokens.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing type hints to `synapse.config` module.
|
||||
@@ -1 +0,0 @@
|
||||
Add test to ensure we share the same `state_group` across the whole historical batch when using the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long-standing bug where relations from other rooms could be included in the bundled aggregations of an event.
|
||||
@@ -1 +0,0 @@
|
||||
Use HTTPStatus constants in place of literals in `tests.rest.client.test_auth`.
|
||||
@@ -1 +0,0 @@
|
||||
Add a receipt types constant for `m.read`.
|
||||
@@ -1 +0,0 @@
|
||||
Clean up `synapse.rest.admin`.
|
||||
@@ -1 +0,0 @@
|
||||
Improvements to log messages around handling stream ids.
|
||||
@@ -1 +0,0 @@
|
||||
Add experimental support for MSC3202: allowing application services to masquerade as specific devices.
|
||||
@@ -1 +0,0 @@
|
||||
Support unprefixed versions of fallback key property names.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing `errcode` to `parse_string` and `parse_boolean`.
|
||||
@@ -1 +0,0 @@
|
||||
Use HTTPStatus constants in place of literals in `synapse.http`.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing type hints to storage classes.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in Synapse 1.17.0 where a pusher created for an email with capital letters would fail to be created.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing type hints to storage classes.
|
||||
@@ -1 +0,0 @@
|
||||
Fix an inaccurate and misleading comment in the `/sync` code.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing type hints to storage classes.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing type hints to storage classes.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing type hints to `synapse.logging.context`.
|
||||
@@ -1 +0,0 @@
|
||||
Stop populating unused database column `state_events.prev_state`.
|
||||
@@ -1 +0,0 @@
|
||||
Minor efficiency improvements in event persistence.
|
||||
@@ -1 +0,0 @@
|
||||
Add some safety checks that storage functions are used correctly.
|
||||
@@ -1 +0,0 @@
|
||||
Make `get_device` return `None` if the device doesn't exist rather than raising an exception.
|
||||
@@ -1 +0,0 @@
|
||||
Split the HTML parsing code from the URL preview resource code.
|
||||
@@ -1 +0,0 @@
|
||||
Remove redundant `COALESCE()`s around `COUNT()`s in database queries.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing type hints to `synapse.http`.
|
||||
@@ -1 +0,0 @@
|
||||
Convert `EventStreamResult` from a `namedtuple` to `attrs` to improve type hints.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing type hints to storage classes.
|
||||
@@ -1 +0,0 @@
|
||||
Add some safety checks that storage functions are used correctly.
|
||||
@@ -1 +0,0 @@
|
||||
Add [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) and [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) to `/versions` -> `unstable_features` to detect server support.
|
||||
@@ -1 +0,0 @@
|
||||
Add missing type hints to storage classes.
|
||||
@@ -1 +0,0 @@
|
||||
Add type hints to `synapse/tests/rest/admin`.
|
||||
@@ -1 +0,0 @@
|
||||
Deduplicate in-flight requests in `_get_state_for_groups`.
|
||||
@@ -1 +0,0 @@
|
||||
Deduplicate in-flight requests in `_get_state_for_groups`.
|
||||
12
debian/changelog
vendored
12
debian/changelog
vendored
@@ -1,3 +1,15 @@
|
||||
matrix-synapse-py3 (1.49.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.49.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Dec 2021 17:31:03 +0000
|
||||
|
||||
matrix-synapse-py3 (1.49.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.49.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Dec 2021 11:07:30 +0000
|
||||
|
||||
matrix-synapse-py3 (1.49.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.49.0.
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
- [SSO Mapping Providers](sso_mapping_providers.md)
|
||||
- [Password Auth Providers](password_auth_providers.md)
|
||||
- [JSON Web Tokens](jwt.md)
|
||||
- [Refresh Tokens](usage/configuration/user_authentication/refresh_tokens.md)
|
||||
- [Registration Captcha](CAPTCHA_SETUP.md)
|
||||
- [Application Services](application_services.md)
|
||||
- [Server Notices](server_notices.md)
|
||||
|
||||
@@ -14,8 +14,8 @@ i.e. when a version reaches End of Life Synapse will withdraw support for that
|
||||
version in future releases.
|
||||
|
||||
Details on the upstream support life cycles for Python and PostgreSQL are
|
||||
documented at https://endoflife.date/python and
|
||||
https://endoflife.date/postgresql.
|
||||
documented at [https://endoflife.date/python](https://endoflife.date/python) and
|
||||
[https://endoflife.date/postgresql](https://endoflife.date/postgresql).
|
||||
|
||||
|
||||
Context
|
||||
|
||||
@@ -1,139 +0,0 @@
|
||||
# Refresh Tokens
|
||||
|
||||
Synapse supports refresh tokens since version 1.49 (some earlier versions had support for an earlier, experimental draft of [MSC2918] which is not compatible).
|
||||
|
||||
|
||||
[MSC2918]: https://github.com/matrix-org/matrix-doc/blob/main/proposals/2918-refreshtokens.md#msc2918-refresh-tokens
|
||||
|
||||
|
||||
## Background and motivation
|
||||
|
||||
Synapse users' sessions are identified by **access tokens**; access tokens are
|
||||
issued to users on login. Each session gets a unique access token which identifies
|
||||
it; the access token must be kept secret as it grants access to the user's account.
|
||||
|
||||
Traditionally, these access tokens were eternally valid (at least until the user
|
||||
explicitly chose to log out).
|
||||
|
||||
In some cases, it may be desirable for these access tokens to expire so that the
|
||||
potential damage caused by leaking an access token is reduced.
|
||||
On the other hand, forcing a user to re-authenticate (log in again) often might
|
||||
be too much of an inconvenience.
|
||||
|
||||
**Refresh tokens** are a mechanism to avoid some of this inconvenience whilst
|
||||
still getting most of the benefits of short access token lifetimes.
|
||||
Refresh tokens are also a concept present in OAuth 2 — further reading is available
|
||||
[here](https://datatracker.ietf.org/doc/html/rfc6749#section-1.5).
|
||||
|
||||
When refresh tokens are in use, both an access token and a refresh token will be
|
||||
issued to users on login. The access token will expire after a predetermined amount
|
||||
of time, but otherwise works in the same way as before. When the access token is
|
||||
close to expiring (or has expired), the user's client should present the homeserver
|
||||
(Synapse) with the refresh token.
|
||||
|
||||
The homeserver will then generate a new access token and refresh token for the user
|
||||
and return them. The old refresh token is invalidated and can not be used again*.
|
||||
|
||||
Finally, refresh tokens also make it possible for sessions to be logged out if they
|
||||
are inactive for too long, before the session naturally ends; see the configuration
|
||||
guide below.
|
||||
|
||||
|
||||
*To prevent issues if clients lose connection half-way through refreshing a token,
|
||||
the refresh token is only invalidated once the new access token has been used at
|
||||
least once. For all intents and purposes, the above simplification is sufficient.
|
||||
|
||||
|
||||
## Caveats
|
||||
|
||||
There are some caveats:
|
||||
|
||||
* If a third party gets both your access token and refresh token, they will be able to
|
||||
continue to enjoy access to your session.
|
||||
* This is still an improvement because you (the user) will notice when *your*
|
||||
session expires and you're not able to use your refresh token.
|
||||
That would be a giveaway that someone else has compromised your session.
|
||||
You would be able to log in again and terminate that session.
|
||||
Previously (with long-lived access tokens), a third party that has your access
|
||||
token could go undetected for a very long time.
|
||||
* Clients need to implement support for refresh tokens in order for them to be a
|
||||
useful mechanism.
|
||||
* It is up to homeserver administrators if they want to issue long-lived access
|
||||
tokens to clients not implementing refresh tokens.
|
||||
* For compatibility, it is likely that they should, at least until client support
|
||||
is widespread.
|
||||
* Users with clients that support refresh tokens will still benefit from the
|
||||
added security; it's not possible to downgrade a session to using long-lived
|
||||
access tokens so this effectively gives users the choice.
|
||||
* In a closed environment where all users use known clients, this may not be
|
||||
an issue as the homeserver administrator can know if the clients have refresh
|
||||
token support. In that case, the non-refreshable access token lifetime
|
||||
may be set to a short duration so that a similar level of security is provided.
|
||||
|
||||
|
||||
## Configuration Guide
|
||||
|
||||
The following configuration options, in the `registration` section, are related:
|
||||
|
||||
* `session_lifetime`: maximum length of a session, even if it's refreshed.
|
||||
In other words, the client must log in again after this time period.
|
||||
In most cases, this can be unset (infinite) or set to a long time (years or months).
|
||||
* `refreshable_access_token_lifetime`: lifetime of access tokens that are created
|
||||
by clients supporting refresh tokens.
|
||||
This should be short; a good value might be 5 minutes (`5m`).
|
||||
* `nonrefreshable_access_token_lifetime`: lifetime of access tokens that are created
|
||||
by clients which don't support refresh tokens.
|
||||
Make this short if you want to effectively force use of refresh tokens.
|
||||
Make this long if you don't want to inconvenience users of clients which don't
|
||||
support refresh tokens (by forcing them to frequently re-authenticate using
|
||||
login credentials).
|
||||
* `refresh_token_lifetime`: lifetime of refresh tokens.
|
||||
In other words, the client must refresh within this time period to maintain its session.
|
||||
Unless you want to log inactive sessions out, it is often fine to use a long
|
||||
value here or even leave it unset (infinite).
|
||||
Beware that making it too short will inconvenience clients that do not connect
|
||||
very often, including mobile clients and clients of infrequent users (by making
|
||||
it more difficult for them to refresh in time, which may force them to need to
|
||||
re-authenticate using login credentials).
|
||||
|
||||
**Note:** All four options above only apply when tokens are created (by logging in or refreshing).
|
||||
Changes to these settings do not apply retroactively.
|
||||
|
||||
|
||||
### Using refresh token expiry to log out inactive sessions
|
||||
|
||||
If you'd like to force sessions to be logged out upon inactivity, you can enable
|
||||
refreshable access token expiry and refresh token expiry.
|
||||
|
||||
This works because a client must refresh at least once within a period of
|
||||
`refresh_token_lifetime` in order to maintain valid credentials to access the
|
||||
account.
|
||||
|
||||
(It's suggested that `refresh_token_lifetime` should be longer than
|
||||
`refreshable_access_token_lifetime` and this section assumes that to be the case
|
||||
for simplicity.)
|
||||
|
||||
Note: this will only affect sessions using refresh tokens. You may wish to
|
||||
set a short `nonrefreshable_access_token_lifetime` to prevent this being bypassed
|
||||
by clients that do not support refresh tokens.
|
||||
|
||||
|
||||
#### Choosing values that guarantee permitting some inactivity
|
||||
|
||||
It may be desirable to permit some short periods of inactivity, for example to
|
||||
accommodate brief outages in client connectivity.
|
||||
|
||||
The following model aims to provide guidance for choosing `refresh_token_lifetime`
|
||||
and `refreshable_access_token_lifetime` to satisfy requirements of the form:
|
||||
|
||||
1. inactivity longer than `L` **MUST** cause the session to be logged out; and
|
||||
2. inactivity shorter than `S` **MUST NOT** cause the session to be logged out.
|
||||
|
||||
This model makes the weakest assumption that all active clients will refresh as
|
||||
needed to maintain an active access token, but no sooner.
|
||||
*In reality, clients may refresh more often than this model assumes, but the
|
||||
above requirements will still hold.*
|
||||
|
||||
To satisfy the above model,
|
||||
* `refresh_token_lifetime` should be set to `L`; and
|
||||
* `refreshable_access_token_lifetime` should be set to `L - S`.
|
||||
@@ -24,6 +24,11 @@ Finally, we also stylise the chapter titles in the left sidebar by indenting the
|
||||
slightly so that they are more visually distinguishable from the section headers
|
||||
(the bold titles). This is done through the `indent-section-headers.css` file.
|
||||
|
||||
In addition to these modifications, we have added a version picker to the documentation.
|
||||
Users can switch between documentations for different versions of Synapse.
|
||||
This functionality was implemented through the `version-picker.js` and
|
||||
`version-picker.css` files.
|
||||
|
||||
More information can be found in mdbook's official documentation for
|
||||
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
|
||||
and
|
||||
|
||||
@@ -131,6 +131,18 @@
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
{{/if}}
|
||||
<div class="version-picker">
|
||||
<div class="dropdown">
|
||||
<div class="select">
|
||||
<span></span>
|
||||
<i class="fa fa-chevron-down"></i>
|
||||
</div>
|
||||
<input type="hidden" name="version">
|
||||
<ul class="dropdown-menu">
|
||||
<!-- Versions will be added dynamically in version-picker.js -->
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">{{ book_title }}</h1>
|
||||
@@ -309,4 +321,4 @@
|
||||
{{/if}}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
|
||||
78
docs/website_files/version-picker.css
Normal file
78
docs/website_files/version-picker.css
Normal file
@@ -0,0 +1,78 @@
|
||||
.version-picker {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.version-picker .dropdown {
|
||||
width: 130px;
|
||||
max-height: 29px;
|
||||
margin-left: 10px;
|
||||
display: inline-block;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
position: relative;
|
||||
font-size: 13px;
|
||||
color: var(--fg);
|
||||
height: 100%;
|
||||
text-align: left;
|
||||
}
|
||||
.version-picker .dropdown .select {
|
||||
cursor: pointer;
|
||||
display: block;
|
||||
padding: 5px 2px 5px 15px;
|
||||
}
|
||||
.version-picker .dropdown .select > i {
|
||||
font-size: 10px;
|
||||
color: var(--fg);
|
||||
cursor: pointer;
|
||||
float: right;
|
||||
line-height: 20px !important;
|
||||
}
|
||||
.version-picker .dropdown:hover {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
}
|
||||
.version-picker .dropdown:active {
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active:hover,
|
||||
.version-picker .dropdown.active {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 2px 2px 0 0;
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active .select > i {
|
||||
transform: rotate(-180deg);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
position: absolute;
|
||||
background-color: var(--theme-popup-bg);
|
||||
width: 100%;
|
||||
left: -1px;
|
||||
right: 1px;
|
||||
margin-top: 1px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 0 0 4px 4px;
|
||||
overflow: hidden;
|
||||
display: none;
|
||||
max-height: 300px;
|
||||
overflow-y: auto;
|
||||
z-index: 9;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li {
|
||||
font-size: 12px;
|
||||
padding: 6px 20px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
padding: 0;
|
||||
list-style: none;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li:hover {
|
||||
background-color: var(--theme-hover);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li.active::before {
|
||||
display: inline-block;
|
||||
content: "✓";
|
||||
margin-inline-start: -14px;
|
||||
width: 14px;
|
||||
}
|
||||
127
docs/website_files/version-picker.js
Normal file
127
docs/website_files/version-picker.js
Normal file
@@ -0,0 +1,127 @@
|
||||
|
||||
const dropdown = document.querySelector('.version-picker .dropdown');
|
||||
const dropdownMenu = dropdown.querySelector('.dropdown-menu');
|
||||
|
||||
fetchVersions(dropdown, dropdownMenu).then(() => {
|
||||
initializeVersionDropdown(dropdown, dropdownMenu);
|
||||
});
|
||||
|
||||
/**
|
||||
* Initialize the dropdown functionality for version selection.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
*/
|
||||
function initializeVersionDropdown(dropdown, dropdownMenu) {
|
||||
// Toggle the dropdown menu on click
|
||||
dropdown.addEventListener('click', function () {
|
||||
this.setAttribute('tabindex', 1);
|
||||
this.classList.toggle('active');
|
||||
dropdownMenu.style.display = (dropdownMenu.style.display === 'block') ? 'none' : 'block';
|
||||
});
|
||||
|
||||
// Remove the 'active' class and hide the dropdown menu on focusout
|
||||
dropdown.addEventListener('focusout', function () {
|
||||
this.classList.remove('active');
|
||||
dropdownMenu.style.display = 'none';
|
||||
});
|
||||
|
||||
// Handle item selection within the dropdown menu
|
||||
const dropdownMenuItems = dropdownMenu.querySelectorAll('li');
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.addEventListener('click', function () {
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.classList.remove('active');
|
||||
});
|
||||
this.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = this.textContent;
|
||||
dropdown.querySelector('input').value = this.getAttribute('id');
|
||||
|
||||
window.location.href = changeVersion(window.location.href, this.textContent);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* This function fetches the available versions from a GitHub repository
|
||||
* and inserts them into the version picker.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
* @returns {Promise<Array<string>>} A promise that resolves with an array of available versions.
|
||||
*/
|
||||
function fetchVersions(dropdown, dropdownMenu) {
|
||||
return new Promise((resolve, reject) => {
|
||||
window.addEventListener("load", () => {
|
||||
|
||||
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
|
||||
cache: "force-cache",
|
||||
}).then(res =>
|
||||
res.json()
|
||||
).then(resObject => {
|
||||
const excluded = ['dev-docs', 'v1.91.0', 'v1.80.0', 'v1.69.0'];
|
||||
const tree = resObject.tree.filter(item => item.type === "tree" && !excluded.includes(item.path));
|
||||
const versions = tree.map(item => item.path).sort(sortVersions);
|
||||
|
||||
// Create a list of <li> items for versions
|
||||
versions.forEach((version) => {
|
||||
const li = document.createElement("li");
|
||||
li.textContent = version;
|
||||
li.id = version;
|
||||
|
||||
if (window.SYNAPSE_VERSION === version) {
|
||||
li.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = version;
|
||||
dropdown.querySelector('input').value = version;
|
||||
}
|
||||
|
||||
dropdownMenu.appendChild(li);
|
||||
});
|
||||
|
||||
resolve(versions);
|
||||
|
||||
}).catch(ex => {
|
||||
console.error("Failed to fetch version data", ex);
|
||||
reject(ex);
|
||||
})
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom sorting function to sort an array of version strings.
|
||||
*
|
||||
* @param {string} a - The first version string to compare.
|
||||
* @param {string} b - The second version string to compare.
|
||||
* @returns {number} - A negative number if a should come before b, a positive number if b should come before a, or 0 if they are equal.
|
||||
*/
|
||||
function sortVersions(a, b) {
|
||||
// Put 'develop' and 'latest' at the top
|
||||
if (a === 'develop' || a === 'latest') return -1;
|
||||
if (b === 'develop' || b === 'latest') return 1;
|
||||
|
||||
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
|
||||
return versionB.localeCompare(versionA);
|
||||
}
|
||||
|
||||
/**
|
||||
* Change the version in a URL path.
|
||||
*
|
||||
* @param {string} url - The original URL to be modified.
|
||||
* @param {string} newVersion - The new version to replace the existing version in the URL.
|
||||
* @returns {string} The updated URL with the new version.
|
||||
*/
|
||||
function changeVersion(url, newVersion) {
|
||||
const parsedURL = new URL(url);
|
||||
const pathSegments = parsedURL.pathname.split('/');
|
||||
|
||||
// Modify the version
|
||||
pathSegments[2] = newVersion;
|
||||
|
||||
// Reconstruct the URL
|
||||
parsedURL.pathname = pathSegments.join('/');
|
||||
|
||||
return parsedURL.href;
|
||||
}
|
||||
1
docs/website_files/version.js
Normal file
1
docs/website_files/version.js
Normal file
@@ -0,0 +1 @@
|
||||
window.SYNAPSE_VERSION = 'v1.49';
|
||||
32
mypy.ini
32
mypy.ini
@@ -25,8 +25,11 @@ exclude = (?x)
|
||||
^(
|
||||
|synapse/storage/databases/__init__.py
|
||||
|synapse/storage/databases/main/__init__.py
|
||||
|synapse/storage/databases/main/account_data.py
|
||||
|synapse/storage/databases/main/cache.py
|
||||
|synapse/storage/databases/main/devices.py
|
||||
|synapse/storage/databases/main/e2e_room_keys.py
|
||||
|synapse/storage/databases/main/end_to_end_keys.py
|
||||
|synapse/storage/databases/main/event_federation.py
|
||||
|synapse/storage/databases/main/event_push_actions.py
|
||||
|synapse/storage/databases/main/events_bg_updates.py
|
||||
@@ -37,10 +40,12 @@ exclude = (?x)
|
||||
|synapse/storage/databases/main/purge_events.py
|
||||
|synapse/storage/databases/main/push_rule.py
|
||||
|synapse/storage/databases/main/receipts.py
|
||||
|synapse/storage/databases/main/room.py
|
||||
|synapse/storage/databases/main/roommember.py
|
||||
|synapse/storage/databases/main/search.py
|
||||
|synapse/storage/databases/main/state.py
|
||||
|synapse/storage/databases/main/stats.py
|
||||
|synapse/storage/databases/main/transactions.py
|
||||
|synapse/storage/databases/main/user_directory.py
|
||||
|synapse/storage/schema/
|
||||
|
||||
@@ -140,9 +145,6 @@ disallow_untyped_defs = True
|
||||
[mypy-synapse.app.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.appservice.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.config._base]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -161,12 +163,6 @@ disallow_untyped_defs = False
|
||||
[mypy-synapse.handlers.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.http.server]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.logging.context]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.metrics.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -185,27 +181,15 @@ disallow_untyped_defs = True
|
||||
[mypy-synapse.state.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.account_data]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.client_ips]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.e2e_room_keys]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.end_to_end_keys]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.events_worker]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.room]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.room_batch]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -215,9 +199,6 @@ disallow_untyped_defs = True
|
||||
[mypy-synapse.storage.databases.main.state_deltas]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.transactions]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.user_erasure_store]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -242,9 +223,6 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.storage.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.rest.admin.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.rest.client.test_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
|
||||
@@ -17,12 +17,11 @@
|
||||
from typing import Any, List, Optional, Type, Union
|
||||
|
||||
from twisted.internet import protocol
|
||||
from twisted.internet.defer import Deferred
|
||||
|
||||
class RedisProtocol(protocol.Protocol):
|
||||
def publish(self, channel: str, message: bytes): ...
|
||||
def ping(self) -> "Deferred[None]": ...
|
||||
def set(
|
||||
async def ping(self) -> None: ...
|
||||
async def set(
|
||||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
@@ -30,8 +29,8 @@ class RedisProtocol(protocol.Protocol):
|
||||
pexpire: Optional[int] = None,
|
||||
only_if_not_exists: bool = False,
|
||||
only_if_exists: bool = False,
|
||||
) -> "Deferred[None]": ...
|
||||
def get(self, key: str) -> "Deferred[Any]": ...
|
||||
) -> None: ...
|
||||
async def get(self, key: str) -> Any: ...
|
||||
|
||||
class SubscriberProtocol(RedisProtocol):
|
||||
def __init__(self, *args, **kwargs): ...
|
||||
|
||||
@@ -47,7 +47,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.49.0"
|
||||
__version__ = "1.49.2"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -155,11 +155,7 @@ class Auth:
|
||||
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
(
|
||||
user_id,
|
||||
device_id,
|
||||
app_service,
|
||||
) = await self._get_appservice_user_id_and_device_id(request)
|
||||
user_id, app_service = await self._get_appservice_user_id(request)
|
||||
if user_id and app_service:
|
||||
if ip_addr and self._track_appservice_user_ips:
|
||||
await self.store.insert_client_ip(
|
||||
@@ -167,22 +163,16 @@ class Auth:
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
user_agent=user_agent,
|
||||
device_id="dummy-device"
|
||||
if device_id is None
|
||||
else device_id, # stubbed
|
||||
device_id="dummy-device", # stubbed
|
||||
)
|
||||
|
||||
requester = create_requester(
|
||||
user_id, app_service=app_service, device_id=device_id
|
||||
)
|
||||
requester = create_requester(user_id, app_service=app_service)
|
||||
|
||||
request.requester = user_id
|
||||
if user_id in self._force_tracing_for_users:
|
||||
opentracing.force_tracing()
|
||||
opentracing.set_tag("authenticated_entity", user_id)
|
||||
opentracing.set_tag("user_id", user_id)
|
||||
if device_id is not None:
|
||||
opentracing.set_tag("device_id", device_id)
|
||||
opentracing.set_tag("appservice_id", app_service.id)
|
||||
|
||||
return requester
|
||||
@@ -284,81 +274,33 @@ class Auth:
|
||||
403, "Application service has not registered this user (%s)" % user_id
|
||||
)
|
||||
|
||||
async def _get_appservice_user_id_and_device_id(
|
||||
async def _get_appservice_user_id(
|
||||
self, request: Request
|
||||
) -> Tuple[Optional[str], Optional[str], Optional[ApplicationService]]:
|
||||
"""
|
||||
Given a request, reads the request parameters to determine:
|
||||
- whether it's an application service that's making this request
|
||||
- what user the application service should be treated as controlling
|
||||
(the user_id URI parameter allows an application service to masquerade
|
||||
any applicable user in its namespace)
|
||||
- what device the application service should be treated as controlling
|
||||
(the device_id[^1] URI parameter allows an application service to masquerade
|
||||
as any device that exists for the relevant user)
|
||||
|
||||
[^1] Unstable and provided by MSC3202.
|
||||
Must use `org.matrix.msc3202.device_id` in place of `device_id` for now.
|
||||
|
||||
Returns:
|
||||
3-tuple of
|
||||
(user ID?, device ID?, application service?)
|
||||
|
||||
Postconditions:
|
||||
- If an application service is returned, so is a user ID
|
||||
- A user ID is never returned without an application service
|
||||
- A device ID is never returned without a user ID or an application service
|
||||
- The returned application service, if present, is permitted to control the
|
||||
returned user ID.
|
||||
- The returned device ID, if present, has been checked to be a valid device ID
|
||||
for the returned user ID.
|
||||
"""
|
||||
DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id"
|
||||
|
||||
) -> Tuple[Optional[str], Optional[ApplicationService]]:
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
self.get_access_token_from_request(request)
|
||||
)
|
||||
if app_service is None:
|
||||
return None, None, None
|
||||
return None, None
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(request.getClientIP())
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
return None, None, None
|
||||
return None, None
|
||||
|
||||
# This will always be set by the time Twisted calls us.
|
||||
assert request.args is not None
|
||||
|
||||
if b"user_id" in request.args:
|
||||
effective_user_id = request.args[b"user_id"][0].decode("utf8")
|
||||
await self.validate_appservice_can_control_user_id(
|
||||
app_service, effective_user_id
|
||||
)
|
||||
else:
|
||||
effective_user_id = app_service.sender
|
||||
if b"user_id" not in request.args:
|
||||
return app_service.sender, app_service
|
||||
|
||||
effective_device_id: Optional[str] = None
|
||||
user_id = request.args[b"user_id"][0].decode("utf8")
|
||||
await self.validate_appservice_can_control_user_id(app_service, user_id)
|
||||
|
||||
if (
|
||||
self.hs.config.experimental.msc3202_device_masquerading_enabled
|
||||
and DEVICE_ID_ARG_NAME in request.args
|
||||
):
|
||||
effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8")
|
||||
# We only just set this so it can't be None!
|
||||
assert effective_device_id is not None
|
||||
device_opt = await self.store.get_device(
|
||||
effective_user_id, effective_device_id
|
||||
)
|
||||
if device_opt is None:
|
||||
# For now, use 400 M_EXCLUSIVE if the device doesn't exist.
|
||||
# This is an open thread of discussion on MSC3202 as of 2021-12-09.
|
||||
raise AuthError(
|
||||
400,
|
||||
f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})",
|
||||
Codes.EXCLUSIVE,
|
||||
)
|
||||
if app_service.sender == user_id:
|
||||
return app_service.sender, app_service
|
||||
|
||||
return effective_user_id, effective_device_id, app_service
|
||||
return user_id, app_service
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
|
||||
@@ -253,9 +253,5 @@ class GuestAccess:
|
||||
FORBIDDEN: Final = "forbidden"
|
||||
|
||||
|
||||
class ReceiptTypes:
|
||||
READ: Final = "m.read"
|
||||
|
||||
|
||||
class ReadReceiptEventFields:
|
||||
MSC2285_HIDDEN: Final = "org.matrix.msc2285.hidden"
|
||||
|
||||
@@ -11,14 +11,10 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import re
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Pattern
|
||||
|
||||
import attr
|
||||
from netaddr import IPSet
|
||||
from typing import TYPE_CHECKING, Iterable, List, Match, Optional
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.events import EventBase
|
||||
@@ -37,13 +33,6 @@ class ApplicationServiceState(Enum):
|
||||
UP = "up"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class Namespace:
|
||||
exclusive: bool
|
||||
group_id: Optional[str]
|
||||
regex: Pattern[str]
|
||||
|
||||
|
||||
class ApplicationService:
|
||||
"""Defines an application service. This definition is mostly what is
|
||||
provided to the /register AS API.
|
||||
@@ -61,17 +50,17 @@ class ApplicationService:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
token: str,
|
||||
hostname: str,
|
||||
id: str,
|
||||
sender: str,
|
||||
url: Optional[str] = None,
|
||||
namespaces: Optional[JsonDict] = None,
|
||||
hs_token: Optional[str] = None,
|
||||
protocols: Optional[Iterable[str]] = None,
|
||||
rate_limited: bool = True,
|
||||
ip_range_whitelist: Optional[IPSet] = None,
|
||||
supports_ephemeral: bool = False,
|
||||
token,
|
||||
hostname,
|
||||
id,
|
||||
sender,
|
||||
url=None,
|
||||
namespaces=None,
|
||||
hs_token=None,
|
||||
protocols=None,
|
||||
rate_limited=True,
|
||||
ip_range_whitelist=None,
|
||||
supports_ephemeral=False,
|
||||
):
|
||||
self.token = token
|
||||
self.url = (
|
||||
@@ -96,33 +85,27 @@ class ApplicationService:
|
||||
|
||||
self.rate_limited = rate_limited
|
||||
|
||||
def _check_namespaces(
|
||||
self, namespaces: Optional[JsonDict]
|
||||
) -> Dict[str, List[Namespace]]:
|
||||
def _check_namespaces(self, namespaces):
|
||||
# Sanity check that it is of the form:
|
||||
# {
|
||||
# users: [ {regex: "[A-z]+.*", exclusive: true}, ...],
|
||||
# aliases: [ {regex: "[A-z]+.*", exclusive: true}, ...],
|
||||
# rooms: [ {regex: "[A-z]+.*", exclusive: true}, ...],
|
||||
# }
|
||||
if namespaces is None:
|
||||
if not namespaces:
|
||||
namespaces = {}
|
||||
|
||||
result: Dict[str, List[Namespace]] = {}
|
||||
|
||||
for ns in ApplicationService.NS_LIST:
|
||||
result[ns] = []
|
||||
|
||||
if ns not in namespaces:
|
||||
namespaces[ns] = []
|
||||
continue
|
||||
|
||||
if not isinstance(namespaces[ns], list):
|
||||
if type(namespaces[ns]) != list:
|
||||
raise ValueError("Bad namespace value for '%s'" % ns)
|
||||
for regex_obj in namespaces[ns]:
|
||||
if not isinstance(regex_obj, dict):
|
||||
raise ValueError("Expected dict regex for ns '%s'" % ns)
|
||||
exclusive = regex_obj.get("exclusive")
|
||||
if not isinstance(exclusive, bool):
|
||||
if not isinstance(regex_obj.get("exclusive"), bool):
|
||||
raise ValueError("Expected bool for 'exclusive' in ns '%s'" % ns)
|
||||
group_id = regex_obj.get("group_id")
|
||||
if group_id:
|
||||
@@ -143,26 +126,22 @@ class ApplicationService:
|
||||
)
|
||||
|
||||
regex = regex_obj.get("regex")
|
||||
if not isinstance(regex, str):
|
||||
if isinstance(regex, str):
|
||||
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
||||
else:
|
||||
raise ValueError("Expected string for 'regex' in ns '%s'" % ns)
|
||||
return namespaces
|
||||
|
||||
# Pre-compile regex.
|
||||
result[ns].append(Namespace(exclusive, group_id, re.compile(regex)))
|
||||
|
||||
return result
|
||||
|
||||
def _matches_regex(
|
||||
self, namespace_key: str, test_string: str
|
||||
) -> Optional[Namespace]:
|
||||
for namespace in self.namespaces[namespace_key]:
|
||||
if namespace.regex.match(test_string):
|
||||
return namespace
|
||||
def _matches_regex(self, test_string: str, namespace_key: str) -> Optional[Match]:
|
||||
for regex_obj in self.namespaces[namespace_key]:
|
||||
if regex_obj["regex"].match(test_string):
|
||||
return regex_obj
|
||||
return None
|
||||
|
||||
def _is_exclusive(self, namespace_key: str, test_string: str) -> bool:
|
||||
namespace = self._matches_regex(namespace_key, test_string)
|
||||
if namespace:
|
||||
return namespace.exclusive
|
||||
def _is_exclusive(self, ns_key: str, test_string: str) -> bool:
|
||||
regex_obj = self._matches_regex(test_string, ns_key)
|
||||
if regex_obj:
|
||||
return regex_obj["exclusive"]
|
||||
return False
|
||||
|
||||
async def _matches_user(
|
||||
@@ -281,15 +260,15 @@ class ApplicationService:
|
||||
|
||||
def is_interested_in_user(self, user_id: str) -> bool:
|
||||
return (
|
||||
bool(self._matches_regex(ApplicationService.NS_USERS, user_id))
|
||||
bool(self._matches_regex(user_id, ApplicationService.NS_USERS))
|
||||
or user_id == self.sender
|
||||
)
|
||||
|
||||
def is_interested_in_alias(self, alias: str) -> bool:
|
||||
return bool(self._matches_regex(ApplicationService.NS_ALIASES, alias))
|
||||
return bool(self._matches_regex(alias, ApplicationService.NS_ALIASES))
|
||||
|
||||
def is_interested_in_room(self, room_id: str) -> bool:
|
||||
return bool(self._matches_regex(ApplicationService.NS_ROOMS, room_id))
|
||||
return bool(self._matches_regex(room_id, ApplicationService.NS_ROOMS))
|
||||
|
||||
def is_exclusive_user(self, user_id: str) -> bool:
|
||||
return (
|
||||
@@ -306,14 +285,14 @@ class ApplicationService:
|
||||
def is_exclusive_room(self, room_id: str) -> bool:
|
||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||
|
||||
def get_exclusive_user_regexes(self) -> List[Pattern[str]]:
|
||||
def get_exclusive_user_regexes(self):
|
||||
"""Get the list of regexes used to determine if a user is exclusively
|
||||
registered by the AS
|
||||
"""
|
||||
return [
|
||||
namespace.regex
|
||||
for namespace in self.namespaces[ApplicationService.NS_USERS]
|
||||
if namespace.exclusive
|
||||
regex_obj["regex"]
|
||||
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||
if regex_obj["exclusive"]
|
||||
]
|
||||
|
||||
def get_groups_for_user(self, user_id: str) -> Iterable[str]:
|
||||
@@ -326,15 +305,15 @@ class ApplicationService:
|
||||
An iterable that yields group_id strings.
|
||||
"""
|
||||
return (
|
||||
namespace.group_id
|
||||
for namespace in self.namespaces[ApplicationService.NS_USERS]
|
||||
if namespace.group_id and namespace.regex.match(user_id)
|
||||
regex_obj["group_id"]
|
||||
for regex_obj in self.namespaces[ApplicationService.NS_USERS]
|
||||
if "group_id" in regex_obj and regex_obj["regex"].match(user_id)
|
||||
)
|
||||
|
||||
def is_rate_limited(self) -> bool:
|
||||
return self.rate_limited
|
||||
|
||||
def __str__(self) -> str:
|
||||
def __str__(self):
|
||||
# copy dictionary and redact token fields so they don't get logged
|
||||
dict_copy = self.__dict__.copy()
|
||||
dict_copy["token"] = "<redacted>"
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
|
||||
import urllib
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
@@ -53,7 +53,7 @@ HOUR_IN_MS = 60 * 60 * 1000
|
||||
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
|
||||
|
||||
|
||||
def _is_valid_3pe_metadata(info: JsonDict) -> bool:
|
||||
def _is_valid_3pe_metadata(info):
|
||||
if "instances" not in info:
|
||||
return False
|
||||
if not isinstance(info["instances"], list):
|
||||
@@ -61,7 +61,7 @@ def _is_valid_3pe_metadata(info: JsonDict) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _is_valid_3pe_result(r: JsonDict, field: str) -> bool:
|
||||
def _is_valid_3pe_result(r, field):
|
||||
if not isinstance(r, dict):
|
||||
return False
|
||||
|
||||
@@ -93,13 +93,9 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
|
||||
)
|
||||
|
||||
async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
|
||||
async def query_user(self, service, user_id):
|
||||
if service.url is None:
|
||||
return False
|
||||
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||
try:
|
||||
response = await self.get_json(uri, {"access_token": service.hs_token})
|
||||
@@ -113,13 +109,9 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("query_user to %s threw exception %s", uri, ex)
|
||||
return False
|
||||
|
||||
async def query_alias(self, service: "ApplicationService", alias: str) -> bool:
|
||||
async def query_alias(self, service, alias):
|
||||
if service.url is None:
|
||||
return False
|
||||
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||
try:
|
||||
response = await self.get_json(uri, {"access_token": service.hs_token})
|
||||
@@ -133,13 +125,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("query_alias to %s threw exception %s", uri, ex)
|
||||
return False
|
||||
|
||||
async def query_3pe(
|
||||
self,
|
||||
service: "ApplicationService",
|
||||
kind: str,
|
||||
protocol: str,
|
||||
fields: Dict[bytes, List[bytes]],
|
||||
) -> List[JsonDict]:
|
||||
async def query_3pe(self, service, kind, protocol, fields):
|
||||
if kind == ThirdPartyEntityKind.USER:
|
||||
required_field = "userid"
|
||||
elif kind == ThirdPartyEntityKind.LOCATION:
|
||||
@@ -219,14 +205,11 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
events: List[EventBase],
|
||||
ephemeral: List[JsonDict],
|
||||
txn_id: Optional[int] = None,
|
||||
) -> bool:
|
||||
):
|
||||
if service.url is None:
|
||||
return True
|
||||
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
serialized_events = self._serialize(service, events)
|
||||
events = self._serialize(service, events)
|
||||
|
||||
if txn_id is None:
|
||||
logger.warning(
|
||||
@@ -238,12 +221,9 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
# Never send ephemeral events to appservices that do not support it
|
||||
if service.supports_ephemeral:
|
||||
body = {
|
||||
"events": serialized_events,
|
||||
"de.sorunome.msc2409.ephemeral": ephemeral,
|
||||
}
|
||||
body = {"events": events, "de.sorunome.msc2409.ephemeral": ephemeral}
|
||||
else:
|
||||
body = {"events": serialized_events}
|
||||
body = {"events": events}
|
||||
|
||||
try:
|
||||
await self.put_json(
|
||||
@@ -258,7 +238,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
[event.get("event_id") for event in events],
|
||||
)
|
||||
sent_transactions_counter.labels(service.id).inc()
|
||||
sent_events_counter.labels(service.id).inc(len(serialized_events))
|
||||
sent_events_counter.labels(service.id).inc(len(events))
|
||||
return True
|
||||
except CodeMessageException as e:
|
||||
logger.warning(
|
||||
@@ -280,9 +260,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
failed_transactions_counter.labels(service.id).inc()
|
||||
return False
|
||||
|
||||
def _serialize(
|
||||
self, service: "ApplicationService", events: Iterable[EventBase]
|
||||
) -> List[JsonDict]:
|
||||
def _serialize(self, service, events):
|
||||
time_now = self.clock.time_msec()
|
||||
return [
|
||||
serialize_event(
|
||||
|
||||
@@ -48,19 +48,13 @@ This is all tied together by the AppServiceScheduler which DIs the required
|
||||
components.
|
||||
"""
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Set
|
||||
from typing import List, Optional
|
||||
|
||||
from synapse.appservice import ApplicationService, ApplicationServiceState
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import Clock
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -78,7 +72,7 @@ class ApplicationServiceScheduler:
|
||||
case is a simple array.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
def __init__(self, hs):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.as_api = hs.get_application_service_api()
|
||||
@@ -86,7 +80,7 @@ class ApplicationServiceScheduler:
|
||||
self.txn_ctrl = _TransactionController(self.clock, self.store, self.as_api)
|
||||
self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
|
||||
|
||||
async def start(self) -> None:
|
||||
async def start(self):
|
||||
logger.info("Starting appservice scheduler")
|
||||
|
||||
# check for any DOWN ASes and start recoverers for them.
|
||||
@@ -97,14 +91,12 @@ class ApplicationServiceScheduler:
|
||||
for service in services:
|
||||
self.txn_ctrl.start_recoverer(service)
|
||||
|
||||
def submit_event_for_as(
|
||||
self, service: ApplicationService, event: EventBase
|
||||
) -> None:
|
||||
def submit_event_for_as(self, service: ApplicationService, event: EventBase):
|
||||
self.queuer.enqueue_event(service, event)
|
||||
|
||||
def submit_ephemeral_events_for_as(
|
||||
self, service: ApplicationService, events: List[JsonDict]
|
||||
) -> None:
|
||||
):
|
||||
self.queuer.enqueue_ephemeral(service, events)
|
||||
|
||||
|
||||
@@ -116,18 +108,16 @@ class _ServiceQueuer:
|
||||
appservice at a given time.
|
||||
"""
|
||||
|
||||
def __init__(self, txn_ctrl: "_TransactionController", clock: Clock):
|
||||
# dict of {service_id: [events]}
|
||||
self.queued_events: Dict[str, List[EventBase]] = {}
|
||||
# dict of {service_id: [events]}
|
||||
self.queued_ephemeral: Dict[str, List[JsonDict]] = {}
|
||||
def __init__(self, txn_ctrl, clock):
|
||||
self.queued_events = {} # dict of {service_id: [events]}
|
||||
self.queued_ephemeral = {} # dict of {service_id: [events]}
|
||||
|
||||
# the appservices which currently have a transaction in flight
|
||||
self.requests_in_flight: Set[str] = set()
|
||||
self.requests_in_flight = set()
|
||||
self.txn_ctrl = txn_ctrl
|
||||
self.clock = clock
|
||||
|
||||
def _start_background_request(self, service: ApplicationService) -> None:
|
||||
def _start_background_request(self, service):
|
||||
# start a sender for this appservice if we don't already have one
|
||||
if service.id in self.requests_in_flight:
|
||||
return
|
||||
@@ -136,17 +126,15 @@ class _ServiceQueuer:
|
||||
"as-sender-%s" % (service.id,), self._send_request, service
|
||||
)
|
||||
|
||||
def enqueue_event(self, service: ApplicationService, event: EventBase) -> None:
|
||||
def enqueue_event(self, service: ApplicationService, event: EventBase):
|
||||
self.queued_events.setdefault(service.id, []).append(event)
|
||||
self._start_background_request(service)
|
||||
|
||||
def enqueue_ephemeral(
|
||||
self, service: ApplicationService, events: List[JsonDict]
|
||||
) -> None:
|
||||
def enqueue_ephemeral(self, service: ApplicationService, events: List[JsonDict]):
|
||||
self.queued_ephemeral.setdefault(service.id, []).extend(events)
|
||||
self._start_background_request(service)
|
||||
|
||||
async def _send_request(self, service: ApplicationService) -> None:
|
||||
async def _send_request(self, service: ApplicationService):
|
||||
# sanity-check: we shouldn't get here if this service already has a sender
|
||||
# running.
|
||||
assert service.id not in self.requests_in_flight
|
||||
@@ -180,15 +168,20 @@ class _TransactionController:
|
||||
if a transaction fails.
|
||||
|
||||
(Note we have only have one of these in the homeserver.)
|
||||
|
||||
Args:
|
||||
clock (synapse.util.Clock):
|
||||
store (synapse.storage.DataStore):
|
||||
as_api (synapse.appservice.api.ApplicationServiceApi):
|
||||
"""
|
||||
|
||||
def __init__(self, clock: Clock, store: DataStore, as_api: ApplicationServiceApi):
|
||||
def __init__(self, clock, store, as_api):
|
||||
self.clock = clock
|
||||
self.store = store
|
||||
self.as_api = as_api
|
||||
|
||||
# map from service id to recoverer instance
|
||||
self.recoverers: Dict[str, "_Recoverer"] = {}
|
||||
self.recoverers = {}
|
||||
|
||||
# for UTs
|
||||
self.RECOVERER_CLASS = _Recoverer
|
||||
@@ -198,7 +191,7 @@ class _TransactionController:
|
||||
service: ApplicationService,
|
||||
events: List[EventBase],
|
||||
ephemeral: Optional[List[JsonDict]] = None,
|
||||
) -> None:
|
||||
):
|
||||
try:
|
||||
txn = await self.store.create_appservice_txn(
|
||||
service=service, events=events, ephemeral=ephemeral or []
|
||||
@@ -214,7 +207,7 @@ class _TransactionController:
|
||||
logger.exception("Error creating appservice transaction")
|
||||
run_in_background(self._on_txn_fail, service)
|
||||
|
||||
async def on_recovered(self, recoverer: "_Recoverer") -> None:
|
||||
async def on_recovered(self, recoverer):
|
||||
logger.info(
|
||||
"Successfully recovered application service AS ID %s", recoverer.service.id
|
||||
)
|
||||
@@ -224,18 +217,18 @@ class _TransactionController:
|
||||
recoverer.service, ApplicationServiceState.UP
|
||||
)
|
||||
|
||||
async def _on_txn_fail(self, service: ApplicationService) -> None:
|
||||
async def _on_txn_fail(self, service):
|
||||
try:
|
||||
await self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
|
||||
self.start_recoverer(service)
|
||||
except Exception:
|
||||
logger.exception("Error starting AS recoverer")
|
||||
|
||||
def start_recoverer(self, service: ApplicationService) -> None:
|
||||
def start_recoverer(self, service):
|
||||
"""Start a Recoverer for the given service
|
||||
|
||||
Args:
|
||||
service:
|
||||
service (synapse.appservice.ApplicationService):
|
||||
"""
|
||||
logger.info("Starting recoverer for AS ID %s", service.id)
|
||||
assert service.id not in self.recoverers
|
||||
@@ -264,14 +257,7 @@ class _Recoverer:
|
||||
callback (callable[_Recoverer]): called once the service recovers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
clock: Clock,
|
||||
store: DataStore,
|
||||
as_api: ApplicationServiceApi,
|
||||
service: ApplicationService,
|
||||
callback: Callable[["_Recoverer"], Awaitable[None]],
|
||||
):
|
||||
def __init__(self, clock, store, as_api, service, callback):
|
||||
self.clock = clock
|
||||
self.store = store
|
||||
self.as_api = as_api
|
||||
@@ -279,8 +265,8 @@ class _Recoverer:
|
||||
self.callback = callback
|
||||
self.backoff_counter = 1
|
||||
|
||||
def recover(self) -> None:
|
||||
def _retry() -> None:
|
||||
def recover(self):
|
||||
def _retry():
|
||||
run_as_background_process(
|
||||
"as-recoverer-%s" % (self.service.id,), self.retry
|
||||
)
|
||||
@@ -289,13 +275,13 @@ class _Recoverer:
|
||||
logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
|
||||
self.clock.call_later(delay, _retry)
|
||||
|
||||
def _backoff(self) -> None:
|
||||
def _backoff(self):
|
||||
# cap the backoff to be around 8.5min => (2^9) = 512 secs
|
||||
if self.backoff_counter < 9:
|
||||
self.backoff_counter += 1
|
||||
self.recover()
|
||||
|
||||
async def retry(self) -> None:
|
||||
async def retry(self):
|
||||
logger.info("Starting retries on %s", self.service.id)
|
||||
try:
|
||||
while True:
|
||||
|
||||
@@ -147,7 +147,8 @@ def _load_appservice(
|
||||
# protocols check
|
||||
protocols = as_info.get("protocols")
|
||||
if protocols:
|
||||
if not isinstance(protocols, list):
|
||||
# Because strings are lists in python
|
||||
if isinstance(protocols, str) or not isinstance(protocols, list):
|
||||
raise KeyError("Optional 'protocols' must be a list if present.")
|
||||
for p in protocols:
|
||||
if not isinstance(p, str):
|
||||
|
||||
@@ -32,7 +32,7 @@ class ExperimentalConfig(Config):
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
|
||||
|
||||
# MSC2716 (importing historical messages)
|
||||
# MSC2716 (backfill existing history)
|
||||
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
|
||||
|
||||
# MSC2285 (hidden read receipts)
|
||||
@@ -49,8 +49,3 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC3030 (Jump to date API endpoint)
|
||||
self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False)
|
||||
|
||||
# The portion of MSC3202 which is related to device masquerading.
|
||||
self.msc3202_device_masquerading_enabled: bool = experimental.get(
|
||||
"msc3202_device_masquerading", False
|
||||
)
|
||||
|
||||
@@ -16,14 +16,12 @@
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, Dict, Iterator, List, Optional
|
||||
from typing import Any, Dict
|
||||
|
||||
import attr
|
||||
import jsonschema
|
||||
from signedjson.key import (
|
||||
NACL_ED25519,
|
||||
SigningKey,
|
||||
VerifyKey,
|
||||
decode_signing_key_base64,
|
||||
decode_verify_key_bytes,
|
||||
generate_signing_key,
|
||||
@@ -33,7 +31,6 @@ from signedjson.key import (
|
||||
)
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.stringutils import random_string, random_string_with_symbols
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
@@ -84,13 +81,14 @@ To suppress this warning and continue using 'matrix.org', admins should set
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
@attr.s
|
||||
class TrustedKeyServer:
|
||||
# name of the server.
|
||||
server_name: str
|
||||
# string: name of the server.
|
||||
server_name = attr.ib()
|
||||
|
||||
# map from key id to key object, or None to disable signature verification.
|
||||
verify_keys: Optional[Dict[str, VerifyKey]] = None
|
||||
# dict[str,VerifyKey]|None: map from key id to key object, or None to disable
|
||||
# signature verification.
|
||||
verify_keys = attr.ib(default=None)
|
||||
|
||||
|
||||
class KeyConfig(Config):
|
||||
@@ -281,15 +279,15 @@ class KeyConfig(Config):
|
||||
% locals()
|
||||
)
|
||||
|
||||
def read_signing_keys(self, signing_key_path: str, name: str) -> List[SigningKey]:
|
||||
def read_signing_keys(self, signing_key_path, name):
|
||||
"""Read the signing keys in the given path.
|
||||
|
||||
Args:
|
||||
signing_key_path
|
||||
name: Associated config key name
|
||||
signing_key_path (str)
|
||||
name (str): Associated config key name
|
||||
|
||||
Returns:
|
||||
The signing keys read from the given path.
|
||||
list[SigningKey]
|
||||
"""
|
||||
|
||||
signing_keys = self.read_file(signing_key_path, name)
|
||||
@@ -298,9 +296,7 @@ class KeyConfig(Config):
|
||||
except Exception as e:
|
||||
raise ConfigError("Error reading %s: %s" % (name, str(e)))
|
||||
|
||||
def read_old_signing_keys(
|
||||
self, old_signing_keys: Optional[JsonDict]
|
||||
) -> Dict[str, VerifyKey]:
|
||||
def read_old_signing_keys(self, old_signing_keys):
|
||||
if old_signing_keys is None:
|
||||
return {}
|
||||
keys = {}
|
||||
@@ -344,7 +340,7 @@ class KeyConfig(Config):
|
||||
write_signing_keys(signing_key_file, (key,))
|
||||
|
||||
|
||||
def _perspectives_to_key_servers(config: JsonDict) -> Iterator[JsonDict]:
|
||||
def _perspectives_to_key_servers(config):
|
||||
"""Convert old-style 'perspectives' configs into new-style 'trusted_key_servers'
|
||||
|
||||
Returns an iterable of entries to add to trusted_key_servers.
|
||||
@@ -406,9 +402,7 @@ TRUSTED_KEY_SERVERS_SCHEMA = {
|
||||
}
|
||||
|
||||
|
||||
def _parse_key_servers(
|
||||
key_servers: List[Any], federation_verify_certificates: bool
|
||||
) -> Iterator[TrustedKeyServer]:
|
||||
def _parse_key_servers(key_servers, federation_verify_certificates):
|
||||
try:
|
||||
jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA)
|
||||
except jsonschema.ValidationError as e:
|
||||
@@ -450,7 +444,7 @@ def _parse_key_servers(
|
||||
yield result
|
||||
|
||||
|
||||
def _assert_keyserver_has_verify_keys(trusted_key_server: TrustedKeyServer) -> None:
|
||||
def _assert_keyserver_has_verify_keys(trusted_key_server):
|
||||
if not trusted_key_server.verify_keys:
|
||||
raise ConfigError(INSECURE_NOTARY_ERROR)
|
||||
|
||||
|
||||
@@ -22,12 +22,10 @@ from ._base import Config, ConfigError
|
||||
|
||||
@attr.s
|
||||
class MetricsFlags:
|
||||
known_servers: bool = attr.ib(
|
||||
default=False, validator=attr.validators.instance_of(bool)
|
||||
)
|
||||
known_servers = attr.ib(default=False, validator=attr.validators.instance_of(bool))
|
||||
|
||||
@classmethod
|
||||
def all_off(cls) -> "MetricsFlags":
|
||||
def all_off(cls):
|
||||
"""
|
||||
Instantiate the flags with all options set to off.
|
||||
"""
|
||||
|
||||
@@ -1257,7 +1257,7 @@ class ServerConfig(Config):
|
||||
help="Turn on the twisted telnet manhole service on the given port.",
|
||||
)
|
||||
|
||||
def read_gc_intervals(self, durations: Any) -> Optional[Tuple[float, float, float]]:
|
||||
def read_gc_intervals(self, durations) -> Optional[Tuple[float, float, float]]:
|
||||
"""Reads the three durations for the GC min interval option, returning seconds."""
|
||||
if durations is None:
|
||||
return None
|
||||
|
||||
@@ -132,7 +132,7 @@ class TlsConfig(Config):
|
||||
self.tls_certificate: Optional[crypto.X509] = None
|
||||
self.tls_private_key: Optional[crypto.PKey] = None
|
||||
|
||||
def read_certificate_from_disk(self) -> None:
|
||||
def read_certificate_from_disk(self):
|
||||
"""
|
||||
Read the certificates and private key from disk.
|
||||
"""
|
||||
|
||||
@@ -454,26 +454,23 @@ class EventClientSerializer:
|
||||
return
|
||||
|
||||
event_id = event.event_id
|
||||
room_id = event.room_id
|
||||
|
||||
# The bundled aggregations to include.
|
||||
aggregations = {}
|
||||
|
||||
annotations = await self.store.get_aggregation_groups_for_event(
|
||||
event_id, room_id
|
||||
)
|
||||
annotations = await self.store.get_aggregation_groups_for_event(event_id)
|
||||
if annotations.chunk:
|
||||
aggregations[RelationTypes.ANNOTATION] = annotations.to_dict()
|
||||
|
||||
references = await self.store.get_relations_for_event(
|
||||
event_id, room_id, RelationTypes.REFERENCE, direction="f"
|
||||
event_id, RelationTypes.REFERENCE, direction="f"
|
||||
)
|
||||
if references.chunk:
|
||||
aggregations[RelationTypes.REFERENCE] = references.to_dict()
|
||||
|
||||
edit = None
|
||||
if event.type == EventTypes.Message:
|
||||
edit = await self.store.get_applicable_edit(event_id, room_id)
|
||||
edit = await self.store.get_applicable_edit(event_id)
|
||||
|
||||
if edit:
|
||||
# If there is an edit replace the content, preserving existing
|
||||
@@ -506,7 +503,7 @@ class EventClientSerializer:
|
||||
(
|
||||
thread_count,
|
||||
latest_thread_event,
|
||||
) = await self.store.get_thread_summary(event_id, room_id)
|
||||
) = await self.store.get_thread_summary(event_id)
|
||||
if latest_thread_event:
|
||||
aggregations[RelationTypes.THREAD] = {
|
||||
# Don't bundle aggregations as this could recurse forever.
|
||||
|
||||
@@ -30,6 +30,7 @@ from typing import (
|
||||
|
||||
from prometheus_client import Counter, Gauge, Histogram
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
@@ -66,7 +67,7 @@ from synapse.replication.http.federation import (
|
||||
from synapse.storage.databases.main.lock import Lock
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.util import glob_to_regex, json_decoder, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.stringutils import parse_server_name
|
||||
|
||||
@@ -359,13 +360,13 @@ class FederationServer(FederationBase):
|
||||
# want to block things like to device messages from reaching clients
|
||||
# behind the potentially expensive handling of PDUs.
|
||||
pdu_results, _ = await make_deferred_yieldable(
|
||||
gather_results(
|
||||
(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._handle_pdus_in_txn, origin, transaction, request_time
|
||||
),
|
||||
run_in_background(self._handle_edus_in_txn, origin, transaction),
|
||||
),
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
@@ -997,7 +997,9 @@ class AuthHandler:
|
||||
# really don't want is active access_tokens without a record of the
|
||||
# device, so we double-check it here.
|
||||
if device_id is not None:
|
||||
if await self.store.get_device(user_id, device_id) is None:
|
||||
try:
|
||||
await self.store.get_device(user_id, device_id)
|
||||
except StoreError:
|
||||
await self.store.delete_access_token(access_token)
|
||||
raise StoreError(400, "Login raced against device deletion")
|
||||
|
||||
|
||||
@@ -106,10 +106,10 @@ class DeviceWorkerHandler:
|
||||
Raises:
|
||||
errors.NotFoundError: if the device was not found
|
||||
"""
|
||||
device = await self.store.get_device(user_id, device_id)
|
||||
if device is None:
|
||||
raise errors.NotFoundError()
|
||||
|
||||
try:
|
||||
device = await self.store.get_device(user_id, device_id)
|
||||
except errors.StoreError:
|
||||
raise errors.NotFoundError
|
||||
ips = await self.store.get_last_client_ip_by_device(user_id, device_id)
|
||||
_update_device_from_client_ips(device, ips)
|
||||
|
||||
@@ -602,8 +602,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
access_token, device_id
|
||||
)
|
||||
old_device = await self.store.get_device(user_id, old_device_id)
|
||||
if old_device is None:
|
||||
raise errors.NotFoundError()
|
||||
await self.store.update_device(user_id, device_id, old_device["display_name"])
|
||||
# can't call self.delete_device because that will clobber the
|
||||
# access token so call the storage layer directly
|
||||
|
||||
@@ -65,12 +65,8 @@ class E2eKeysHandler:
|
||||
else:
|
||||
# Only register this edu handler on master as it requires writing
|
||||
# device updates to the db
|
||||
federation_registry.register_edu_handler(
|
||||
"m.signing_key_update",
|
||||
self._edu_updater.incoming_signing_key_update,
|
||||
)
|
||||
# also handle the unstable version
|
||||
# FIXME: remove this when enough servers have upgraded
|
||||
#
|
||||
# FIXME: switch to m.signing_key_update when MSC1756 is merged into the spec
|
||||
federation_registry.register_edu_handler(
|
||||
"org.matrix.signing_key_update",
|
||||
self._edu_updater.incoming_signing_key_update,
|
||||
@@ -580,9 +576,7 @@ class E2eKeysHandler:
|
||||
log_kv(
|
||||
{"message": "Did not update one_time_keys", "reason": "no keys given"}
|
||||
)
|
||||
fallback_keys = keys.get("fallback_keys") or keys.get(
|
||||
"org.matrix.msc2732.fallback_keys"
|
||||
)
|
||||
fallback_keys = keys.get("org.matrix.msc2732.fallback_keys", None)
|
||||
if fallback_keys and isinstance(fallback_keys, dict):
|
||||
log_kv(
|
||||
{
|
||||
|
||||
@@ -14,9 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, Optional
|
||||
|
||||
from typing_extensions import Literal
|
||||
from typing import TYPE_CHECKING, List, Optional
|
||||
|
||||
from synapse.api.errors import (
|
||||
Codes,
|
||||
@@ -26,7 +24,6 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.logging.opentracing import log_kv, trace
|
||||
from synapse.storage.databases.main.e2e_room_keys import RoomKey
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
|
||||
@@ -61,9 +58,7 @@ class E2eRoomKeysHandler:
|
||||
version: str,
|
||||
room_id: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
) -> Dict[
|
||||
Literal["rooms"], Dict[str, Dict[Literal["sessions"], Dict[str, RoomKey]]]
|
||||
]:
|
||||
) -> List[JsonDict]:
|
||||
"""Bulk get the E2E room keys for a given backup, optionally filtered to a given
|
||||
room, or a given session.
|
||||
See EndToEndRoomKeyStore.get_e2e_room_keys for full details.
|
||||
@@ -77,8 +72,8 @@ class E2eRoomKeysHandler:
|
||||
Raises:
|
||||
NotFoundError: if the backup version does not exist
|
||||
Returns:
|
||||
A dict giving the session_data and message metadata for these room keys.
|
||||
`{"rooms": {room_id: {"sessions": {session_id: room_key}}}}`
|
||||
A list of dicts giving the session_data and message metadata for
|
||||
these room keys.
|
||||
"""
|
||||
|
||||
# we deliberately take the lock to get keys so that changing the version
|
||||
@@ -278,7 +273,7 @@ class E2eRoomKeysHandler:
|
||||
|
||||
@staticmethod
|
||||
def _should_replace_room_key(
|
||||
current_room_key: Optional[RoomKey], room_key: RoomKey
|
||||
current_room_key: Optional[JsonDict], room_key: JsonDict
|
||||
) -> bool:
|
||||
"""
|
||||
Determine whether to replace a given current_room_key (if any)
|
||||
|
||||
@@ -79,14 +79,13 @@ class EventStreamHandler:
|
||||
# thundering herds on restart.
|
||||
timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1))
|
||||
|
||||
stream_result = await self.notifier.get_events_for(
|
||||
events, tokens = await self.notifier.get_events_for(
|
||||
auth_user,
|
||||
pagin_config,
|
||||
timeout,
|
||||
is_guest=is_guest,
|
||||
explicit_room_id=room_id,
|
||||
)
|
||||
events = stream_result.events
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
@@ -129,8 +128,8 @@ class EventStreamHandler:
|
||||
|
||||
chunk = {
|
||||
"chunk": chunks,
|
||||
"start": await stream_result.start_token.to_string(self.store),
|
||||
"end": await stream_result.end_token.to_string(self.store),
|
||||
"start": await tokens[0].to_string(self.store),
|
||||
"end": await tokens[1].to_string(self.store),
|
||||
}
|
||||
|
||||
return chunk
|
||||
|
||||
@@ -360,34 +360,31 @@ class FederationHandler:
|
||||
|
||||
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
||||
resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events)
|
||||
states_list = await make_deferred_yieldable(
|
||||
states = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True
|
||||
)
|
||||
)
|
||||
|
||||
# A map from event_id to state map of event_ids.
|
||||
state_ids: Dict[str, StateMap[str]] = dict(
|
||||
zip(event_ids, [s.state for s in states_list])
|
||||
)
|
||||
# dict[str, dict[tuple, str]], a map from event_id to state map of
|
||||
# event_ids.
|
||||
states = dict(zip(event_ids, [s.state for s in states]))
|
||||
|
||||
state_map = await self.store.get_events(
|
||||
[e_id for ids in state_ids.values() for e_id in ids.values()],
|
||||
[e_id for ids in states.values() for e_id in ids.values()],
|
||||
get_prev_content=False,
|
||||
)
|
||||
|
||||
# A map from event_id to state map of events.
|
||||
state_events: Dict[str, StateMap[EventBase]] = {
|
||||
states = {
|
||||
key: {
|
||||
k: state_map[e_id]
|
||||
for k, e_id in state_dict.items()
|
||||
if e_id in state_map
|
||||
}
|
||||
for key, state_dict in state_ids.items()
|
||||
for key, state_dict in states.items()
|
||||
}
|
||||
|
||||
for e_id in event_ids:
|
||||
likely_extremeties_domains = get_domains_from_state(state_events[e_id])
|
||||
likely_extremeties_domains = get_domains_from_state(states[e_id])
|
||||
|
||||
success = await try_backfill(
|
||||
[
|
||||
|
||||
@@ -13,27 +13,21 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, cast
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.handlers.presence import format_user_presence_state
|
||||
from synapse.handlers.receipts import ReceiptEventSource
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.storage.roommember import RoomsForUser
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
Requester,
|
||||
RoomStreamToken,
|
||||
StateMap,
|
||||
StreamToken,
|
||||
UserID,
|
||||
)
|
||||
from synapse.types import JsonDict, Requester, RoomStreamToken, StreamToken, UserID
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async_helpers import concurrently_execute, gather_results
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
@@ -196,13 +190,14 @@ class InitialSyncHandler:
|
||||
)
|
||||
deferred_room_state = run_in_background(
|
||||
self.state_store.get_state_for_events, [event.event_id]
|
||||
).addCallback(
|
||||
lambda states: cast(StateMap[EventBase], states[event.event_id])
|
||||
)
|
||||
deferred_room_state.addCallback(
|
||||
lambda states: states[event.event_id]
|
||||
)
|
||||
|
||||
(messages, token), current_state = await make_deferred_yieldable(
|
||||
gather_results(
|
||||
(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.store.get_recent_events_for_room,
|
||||
event.room_id,
|
||||
@@ -210,7 +205,7 @@ class InitialSyncHandler:
|
||||
end_token=room_end_token,
|
||||
),
|
||||
deferred_room_state,
|
||||
)
|
||||
]
|
||||
)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
@@ -459,8 +454,8 @@ class InitialSyncHandler:
|
||||
return receipts
|
||||
|
||||
presence, receipts, (messages, token) = await make_deferred_yieldable(
|
||||
gather_results(
|
||||
(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(get_presence),
|
||||
run_in_background(get_receipts),
|
||||
run_in_background(
|
||||
@@ -469,7 +464,7 @@ class InitialSyncHandler:
|
||||
limit=limit,
|
||||
end_token=now_token.room_key,
|
||||
),
|
||||
),
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
@@ -21,6 +21,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.interfaces import IDelayedCall
|
||||
|
||||
from synapse import event_auth
|
||||
@@ -56,7 +57,7 @@ from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester
|
||||
from synapse.util import json_decoder, json_encoder, log_failure
|
||||
from synapse.util.async_helpers import Linearizer, gather_results, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.visibility import filter_events_for_client
|
||||
@@ -495,7 +496,6 @@ class EventCreationHandler:
|
||||
require_consent: bool = True,
|
||||
outlier: bool = False,
|
||||
historical: bool = False,
|
||||
allow_no_prev_events: bool = False,
|
||||
depth: Optional[int] = None,
|
||||
) -> Tuple[EventBase, EventContext]:
|
||||
"""
|
||||
@@ -607,7 +607,6 @@ class EventCreationHandler:
|
||||
prev_event_ids=prev_event_ids,
|
||||
auth_event_ids=auth_event_ids,
|
||||
depth=depth,
|
||||
allow_no_prev_events=allow_no_prev_events,
|
||||
)
|
||||
|
||||
# In an ideal world we wouldn't need the second part of this condition. However,
|
||||
@@ -883,7 +882,6 @@ class EventCreationHandler:
|
||||
prev_event_ids: Optional[List[str]] = None,
|
||||
auth_event_ids: Optional[List[str]] = None,
|
||||
depth: Optional[int] = None,
|
||||
allow_no_prev_events: bool = False,
|
||||
) -> Tuple[EventBase, EventContext]:
|
||||
"""Create a new event for a local client
|
||||
|
||||
@@ -914,7 +912,6 @@ class EventCreationHandler:
|
||||
full_state_ids_at_event = None
|
||||
if auth_event_ids is not None:
|
||||
# If auth events are provided, prev events must be also.
|
||||
# prev_event_ids could be an empty array though.
|
||||
assert prev_event_ids is not None
|
||||
|
||||
# Copy the full auth state before it stripped down
|
||||
@@ -946,22 +943,14 @@ class EventCreationHandler:
|
||||
else:
|
||||
prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
|
||||
|
||||
# Do a quick sanity check here, rather than waiting until we've created the
|
||||
# we now ought to have some prev_events (unless it's a create event).
|
||||
#
|
||||
# do a quick sanity check here, rather than waiting until we've created the
|
||||
# event and then try to auth it (which fails with a somewhat confusing "No
|
||||
# create event in auth events")
|
||||
if allow_no_prev_events:
|
||||
# We allow events with no `prev_events` but it better have some `auth_events`
|
||||
assert (
|
||||
builder.type == EventTypes.Create
|
||||
# Allow an event to have empty list of prev_event_ids
|
||||
# only if it has auth_event_ids.
|
||||
or auth_event_ids
|
||||
), "Attempting to create a non-m.room.create event with no prev_events or auth_event_ids"
|
||||
else:
|
||||
# we now ought to have some prev_events (unless it's a create event).
|
||||
assert (
|
||||
builder.type == EventTypes.Create or prev_event_ids
|
||||
), "Attempting to create a non-m.room.create event with no prev_events"
|
||||
assert (
|
||||
builder.type == EventTypes.Create or len(prev_event_ids) > 0
|
||||
), "Attempting to create an event with no prev_events"
|
||||
|
||||
event = await builder.build(
|
||||
prev_event_ids=prev_event_ids,
|
||||
@@ -1167,9 +1156,9 @@ class EventCreationHandler:
|
||||
|
||||
# We now persist the event (and update the cache in parallel, since we
|
||||
# don't want to block on it).
|
||||
result, _ = await make_deferred_yieldable(
|
||||
gather_results(
|
||||
(
|
||||
result = await make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._persist_event,
|
||||
requester=requester,
|
||||
@@ -1181,12 +1170,12 @@ class EventCreationHandler:
|
||||
run_in_background(
|
||||
self.cache_joined_hosts_for_event, event, context
|
||||
).addErrback(log_failure, "cache_joined_hosts_for_event failed"),
|
||||
),
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
return result
|
||||
return result[0]
|
||||
|
||||
async def _persist_event(
|
||||
self,
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
||||
|
||||
from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes
|
||||
from synapse.api.constants import ReadReceiptEventFields
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.streams import EventSource
|
||||
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
|
||||
@@ -178,7 +178,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
||||
|
||||
for event_id in content.keys():
|
||||
event_content = content.get(event_id, {})
|
||||
m_read = event_content.get(ReceiptTypes.READ, {})
|
||||
m_read = event_content.get("m.read", {})
|
||||
|
||||
# If m_read is missing copy over the original event_content as there is nothing to process here
|
||||
if not m_read:
|
||||
@@ -206,7 +206,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
|
||||
|
||||
# Set new users unless empty
|
||||
if len(new_users.keys()) > 0:
|
||||
new_event["content"][event_id] = {ReceiptTypes.READ: new_users}
|
||||
new_event["content"][event_id] = {"m.read": new_users}
|
||||
|
||||
# Append new_event to visible_events unless empty
|
||||
if len(new_event["content"].keys()) > 0:
|
||||
|
||||
@@ -658,8 +658,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
if block_invite:
|
||||
raise SynapseError(403, "Invites have been disabled on this server")
|
||||
|
||||
# An empty prev_events list is allowed as long as the auth_event_ids are present
|
||||
if prev_event_ids is not None:
|
||||
if prev_event_ids:
|
||||
return await self._local_membership_update(
|
||||
requester=requester,
|
||||
target=target,
|
||||
@@ -1020,7 +1019,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
# Add new room to the room directory if the old room was there
|
||||
# Remove old room from the room directory
|
||||
old_room = await self.store.get_room(old_room_id)
|
||||
if old_room is not None and old_room["is_public"]:
|
||||
if old_room and old_room["is_public"]:
|
||||
await self.store.set_room_is_public(old_room_id, False)
|
||||
await self.store.set_room_is_public(room_id, True)
|
||||
|
||||
@@ -1031,9 +1030,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
local_group_ids = await self.store.get_local_groups_for_room(old_room_id)
|
||||
for group_id in local_group_ids:
|
||||
# Add new the new room to those groups
|
||||
await self.store.add_room_to_group(
|
||||
group_id, room_id, old_room is not None and old_room["is_public"]
|
||||
)
|
||||
await self.store.add_room_to_group(group_id, room_id, old_room["is_public"])
|
||||
|
||||
# Remove the old room from those groups
|
||||
await self.store.remove_room_from_group(group_id, old_room_id)
|
||||
|
||||
@@ -28,7 +28,7 @@ from typing import (
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, EventTypes, Membership, ReceiptTypes
|
||||
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
|
||||
from synapse.api.filtering import FilterCollection
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
@@ -1046,7 +1046,7 @@ class SyncHandler:
|
||||
last_unread_event_id = await self.store.get_last_receipt_event_id_for_user(
|
||||
user_id=sync_config.user.to_string(),
|
||||
room_id=room_id,
|
||||
receipt_type=ReceiptTypes.READ,
|
||||
receipt_type="m.read",
|
||||
)
|
||||
|
||||
notifs = await self.store.get_unread_event_push_actions_by_room_for_user(
|
||||
@@ -1662,20 +1662,20 @@ class SyncHandler:
|
||||
) -> _RoomChanges:
|
||||
"""Determine the changes in rooms to report to the user.
|
||||
|
||||
This function is a first pass at generating the rooms part of the sync response.
|
||||
It determines which rooms have changed during the sync period, and categorises
|
||||
them into four buckets: "knock", "invite", "join" and "leave".
|
||||
Ideally, we want to report all events whose stream ordering `s` lies in the
|
||||
range `since_token < s <= now_token`, where the two tokens are read from the
|
||||
sync_result_builder.
|
||||
|
||||
1. Finds all membership changes for the user in the sync period (from
|
||||
`since_token` up to `now_token`).
|
||||
2. Uses those to place the room in one of the four categories above.
|
||||
3. Builds a `_RoomChanges` struct to record this, and return that struct.
|
||||
If there are too many events in that range to report, things get complicated.
|
||||
In this situation we return a truncated list of the most recent events, and
|
||||
indicate in the response that there is a "gap" of omitted events. Additionally:
|
||||
|
||||
For rooms classified as "knock", "invite" or "leave", we just need to report
|
||||
a single membership event in the eventual /sync response. For "join" we need
|
||||
to fetch additional non-membership events, e.g. messages in the room. That is
|
||||
more complicated, so instead we report an intermediary `RoomSyncResultBuilder`
|
||||
struct, and leave the additional work to `_generate_room_entry`.
|
||||
- we include a "state_delta", to describe the changes in state over the gap,
|
||||
- we include all membership events applying to the user making the request,
|
||||
even those in the gap.
|
||||
|
||||
See the spec for the rationale:
|
||||
https://spec.matrix.org/v1.1/client-server-api/#syncing
|
||||
|
||||
The sync_result_builder is not modified by this function.
|
||||
"""
|
||||
@@ -1686,6 +1686,16 @@ class SyncHandler:
|
||||
|
||||
assert since_token
|
||||
|
||||
# The spec
|
||||
# https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3sync
|
||||
# notes that membership events need special consideration:
|
||||
#
|
||||
# > When a sync is limited, the server MUST return membership events for events
|
||||
# > in the gap (between since and the start of the returned timeline), regardless
|
||||
# > as to whether or not they are redundant.
|
||||
#
|
||||
# We fetch such events here, but we only seem to use them for categorising rooms
|
||||
# as newly joined, newly left, invited or knocked.
|
||||
# TODO: we've already called this function and ran this query in
|
||||
# _have_rooms_changed. We could keep the results in memory to avoid a
|
||||
# second query, at the cost of more complicated source code.
|
||||
@@ -1999,23 +2009,6 @@ class SyncHandler:
|
||||
"""Populates the `joined` and `archived` section of `sync_result_builder`
|
||||
based on the `room_builder`.
|
||||
|
||||
Ideally, we want to report all events whose stream ordering `s` lies in the
|
||||
range `since_token < s <= now_token`, where the two tokens are read from the
|
||||
sync_result_builder.
|
||||
|
||||
If there are too many events in that range to report, things get complicated.
|
||||
In this situation we return a truncated list of the most recent events, and
|
||||
indicate in the response that there is a "gap" of omitted events. Lots of this
|
||||
is handled in `_load_filtered_recents`, but some of is handled in this method.
|
||||
|
||||
Additionally:
|
||||
- we include a "state_delta", to describe the changes in state over the gap,
|
||||
- we include all membership events applying to the user making the request,
|
||||
even those in the gap.
|
||||
|
||||
See the spec for the rationale:
|
||||
https://spec.matrix.org/v1.1/client-server-api/#syncing
|
||||
|
||||
Args:
|
||||
sync_result_builder
|
||||
ignored_users: Set of users ignored by user.
|
||||
|
||||
@@ -25,7 +25,7 @@ from synapse.api.errors import SynapseError
|
||||
class RequestTimedOutError(SynapseError):
|
||||
"""Exception representing timeout of an outbound request"""
|
||||
|
||||
def __init__(self, msg: str):
|
||||
def __init__(self, msg):
|
||||
super().__init__(504, msg)
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ ACCESS_TOKEN_RE = re.compile(r"(\?.*access(_|%5[Ff])token=)[^&]*(.*)$")
|
||||
CLIENT_SECRET_RE = re.compile(r"(\?.*client(_|%5[Ff])secret=)[^&]*(.*)$")
|
||||
|
||||
|
||||
def redact_uri(uri: str) -> str:
|
||||
def redact_uri(uri):
|
||||
"""Strips sensitive information from the uri replaces with <redacted>"""
|
||||
uri = ACCESS_TOKEN_RE.sub(r"\1<redacted>\3", uri)
|
||||
return CLIENT_SECRET_RE.sub(r"\1<redacted>\3", uri)
|
||||
@@ -46,7 +46,7 @@ class QuieterFileBodyProducer(FileBodyProducer):
|
||||
https://twistedmatrix.com/trac/ticket/6528
|
||||
"""
|
||||
|
||||
def stopProducing(self) -> None:
|
||||
def stopProducing(self):
|
||||
try:
|
||||
FileBodyProducer.stopProducing(self)
|
||||
except task.TaskStopped:
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
@@ -32,11 +32,7 @@ class AdditionalResource(DirectServeJsonResource):
|
||||
and exception handling.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
handler: Callable[[Request], Awaitable[Optional[Tuple[int, Any]]]],
|
||||
):
|
||||
def __init__(self, hs: "HomeServer", handler):
|
||||
"""Initialise AdditionalResource
|
||||
|
||||
The ``handler`` should return a deferred which completes when it has
|
||||
@@ -51,7 +47,7 @@ class AdditionalResource(DirectServeJsonResource):
|
||||
super().__init__()
|
||||
self._handler = handler
|
||||
|
||||
async def _async_render(self, request: Request) -> Optional[Tuple[int, Any]]:
|
||||
def _async_render(self, request: Request):
|
||||
# Cheekily pass the result straight through, so we don't need to worry
|
||||
# if its an awaitable or not.
|
||||
return await self._handler(request)
|
||||
return self._handler(request)
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import urllib.parse
|
||||
from http import HTTPStatus
|
||||
from io import BytesIO
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -281,9 +280,7 @@ class BlacklistingAgentWrapper(Agent):
|
||||
ip_address, self._ip_whitelist, self._ip_blacklist
|
||||
):
|
||||
logger.info("Blocking access to %s due to blacklist" % (ip_address,))
|
||||
e = SynapseError(
|
||||
HTTPStatus.FORBIDDEN, "IP address blocked by IP blacklist entry"
|
||||
)
|
||||
e = SynapseError(403, "IP address blocked by IP blacklist entry")
|
||||
return defer.fail(Failure(e))
|
||||
|
||||
return self._agent.request(
|
||||
@@ -722,9 +719,7 @@ class SimpleHttpClient:
|
||||
|
||||
if response.code > 299:
|
||||
logger.warning("Got %d when downloading %s" % (response.code, url))
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_GATEWAY, "Got error %d" % (response.code,), Codes.UNKNOWN
|
||||
)
|
||||
raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN)
|
||||
|
||||
# TODO: if our Content-Type is HTML or something, just read the first
|
||||
# N bytes into RAM rather than saving it all to disk only to read it
|
||||
@@ -736,14 +731,12 @@ class SimpleHttpClient:
|
||||
)
|
||||
except BodyExceededMaxSize:
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_GATEWAY,
|
||||
502,
|
||||
"Requested file is too large > %r bytes" % (max_size,),
|
||||
Codes.TOO_LARGE,
|
||||
)
|
||||
except Exception as e:
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_GATEWAY, ("Failed to download remote body: %s" % e)
|
||||
) from e
|
||||
raise SynapseError(502, ("Failed to download remote body: %s" % e)) from e
|
||||
|
||||
return (
|
||||
length,
|
||||
|
||||
@@ -25,7 +25,6 @@ from zope.interface import implementer
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.internet.interfaces import (
|
||||
IProtocol,
|
||||
IProtocolFactory,
|
||||
IReactorCore,
|
||||
IStreamClientEndpoint,
|
||||
@@ -310,14 +309,12 @@ class MatrixHostnameEndpoint:
|
||||
|
||||
self._srv_resolver = srv_resolver
|
||||
|
||||
def connect(
|
||||
self, protocol_factory: IProtocolFactory
|
||||
) -> "defer.Deferred[IProtocol]":
|
||||
def connect(self, protocol_factory: IProtocolFactory) -> defer.Deferred:
|
||||
"""Implements IStreamClientEndpoint interface"""
|
||||
|
||||
return run_in_background(self._do_connect, protocol_factory)
|
||||
|
||||
async def _do_connect(self, protocol_factory: IProtocolFactory) -> IProtocol:
|
||||
async def _do_connect(self, protocol_factory: IProtocolFactory) -> None:
|
||||
first_exception = None
|
||||
|
||||
server_list = await self._resolve_server()
|
||||
|
||||
@@ -19,7 +19,6 @@ import random
|
||||
import sys
|
||||
import typing
|
||||
import urllib.parse
|
||||
from http import HTTPStatus
|
||||
from io import BytesIO, StringIO
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -1155,7 +1154,7 @@ class MatrixFederationHttpClient:
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
|
||||
raise SynapseError(502, msg, Codes.TOO_LARGE)
|
||||
except defer.TimeoutError as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Timed out reading response - %s %s",
|
||||
|
||||
@@ -30,7 +30,6 @@ from typing import (
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Pattern,
|
||||
Tuple,
|
||||
@@ -171,9 +170,7 @@ def return_html_error(
|
||||
respond_with_html(request, code, body)
|
||||
|
||||
|
||||
def wrap_async_request_handler(
|
||||
h: Callable[["_AsyncResource", SynapseRequest], Awaitable[None]]
|
||||
) -> Callable[["_AsyncResource", SynapseRequest], "defer.Deferred[None]"]:
|
||||
def wrap_async_request_handler(h):
|
||||
"""Wraps an async request handler so that it calls request.processing.
|
||||
|
||||
This helps ensure that work done by the request handler after the request is completed
|
||||
@@ -186,9 +183,7 @@ def wrap_async_request_handler(
|
||||
logged until the deferred completes.
|
||||
"""
|
||||
|
||||
async def wrapped_async_request_handler(
|
||||
self: "_AsyncResource", request: SynapseRequest
|
||||
) -> None:
|
||||
async def wrapped_async_request_handler(self, request):
|
||||
with request.processing():
|
||||
await h(self, request)
|
||||
|
||||
@@ -245,18 +240,18 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
||||
context from the request the servlet is handling.
|
||||
"""
|
||||
|
||||
def __init__(self, extract_context: bool = False):
|
||||
def __init__(self, extract_context=False):
|
||||
super().__init__()
|
||||
|
||||
self._extract_context = extract_context
|
||||
|
||||
def render(self, request: SynapseRequest) -> int:
|
||||
def render(self, request):
|
||||
"""This gets called by twisted every time someone sends us a request."""
|
||||
defer.ensureDeferred(self._async_render_wrapper(request))
|
||||
return NOT_DONE_YET
|
||||
|
||||
@wrap_async_request_handler
|
||||
async def _async_render_wrapper(self, request: SynapseRequest) -> None:
|
||||
async def _async_render_wrapper(self, request: SynapseRequest):
|
||||
"""This is a wrapper that delegates to `_async_render` and handles
|
||||
exceptions, return values, metrics, etc.
|
||||
"""
|
||||
@@ -276,7 +271,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
||||
f = failure.Failure()
|
||||
self._send_error_response(f, request)
|
||||
|
||||
async def _async_render(self, request: SynapseRequest) -> Optional[Tuple[int, Any]]:
|
||||
async def _async_render(self, request: Request):
|
||||
"""Delegates to `_async_render_<METHOD>` methods, or returns a 400 if
|
||||
no appropriate method exists. Can be overridden in sub classes for
|
||||
different routing.
|
||||
@@ -323,7 +318,7 @@ class DirectServeJsonResource(_AsyncResource):
|
||||
formatting responses and errors as JSON.
|
||||
"""
|
||||
|
||||
def __init__(self, canonical_json: bool = False, extract_context: bool = False):
|
||||
def __init__(self, canonical_json=False, extract_context=False):
|
||||
super().__init__(extract_context)
|
||||
self.canonical_json = canonical_json
|
||||
|
||||
@@ -332,7 +327,7 @@ class DirectServeJsonResource(_AsyncResource):
|
||||
request: SynapseRequest,
|
||||
code: int,
|
||||
response_object: Any,
|
||||
) -> None:
|
||||
):
|
||||
"""Implements _AsyncResource._send_response"""
|
||||
# TODO: Only enable CORS for the requests that need it.
|
||||
respond_with_json(
|
||||
@@ -373,45 +368,34 @@ class JsonResource(DirectServeJsonResource):
|
||||
|
||||
isLeaf = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
canonical_json: bool = True,
|
||||
extract_context: bool = False,
|
||||
):
|
||||
def __init__(self, hs: "HomeServer", canonical_json=True, extract_context=False):
|
||||
super().__init__(canonical_json, extract_context)
|
||||
self.clock = hs.get_clock()
|
||||
self.path_regexs: Dict[bytes, List[_PathEntry]] = {}
|
||||
self.hs = hs
|
||||
|
||||
def register_paths(
|
||||
self,
|
||||
method: str,
|
||||
path_patterns: Iterable[Pattern],
|
||||
callback: ServletCallback,
|
||||
servlet_classname: str,
|
||||
) -> None:
|
||||
def register_paths(self, method, path_patterns, callback, servlet_classname):
|
||||
"""
|
||||
Registers a request handler against a regular expression. Later request URLs are
|
||||
checked against these regular expressions in order to identify an appropriate
|
||||
handler for that request.
|
||||
|
||||
Args:
|
||||
method: GET, POST etc
|
||||
method (str): GET, POST etc
|
||||
|
||||
path_patterns: A list of regular expressions to which the request
|
||||
URLs are compared.
|
||||
path_patterns (Iterable[str]): A list of regular expressions to which
|
||||
the request URLs are compared.
|
||||
|
||||
callback: The handler for the request. Usually a Servlet
|
||||
callback (function): The handler for the request. Usually a Servlet
|
||||
|
||||
servlet_classname: The name of the handler to be used in prometheus
|
||||
servlet_classname (str): The name of the handler to be used in prometheus
|
||||
and opentracing logs.
|
||||
"""
|
||||
method_bytes = method.encode("utf-8")
|
||||
method = method.encode("utf-8") # method is bytes on py3
|
||||
|
||||
for path_pattern in path_patterns:
|
||||
logger.debug("Registering for %s %s", method, path_pattern.pattern)
|
||||
self.path_regexs.setdefault(method_bytes, []).append(
|
||||
self.path_regexs.setdefault(method, []).append(
|
||||
_PathEntry(path_pattern, callback, servlet_classname)
|
||||
)
|
||||
|
||||
@@ -443,7 +427,7 @@ class JsonResource(DirectServeJsonResource):
|
||||
# Huh. No one wanted to handle that? Fiiiiiine. Send 400.
|
||||
return _unrecognised_request_handler, "unrecognised_request_handler", {}
|
||||
|
||||
async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]:
|
||||
async def _async_render(self, request):
|
||||
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
|
||||
|
||||
# Make sure we have an appropriate name for this handler in prometheus
|
||||
@@ -484,7 +468,7 @@ class DirectServeHtmlResource(_AsyncResource):
|
||||
request: SynapseRequest,
|
||||
code: int,
|
||||
response_object: Any,
|
||||
) -> None:
|
||||
):
|
||||
"""Implements _AsyncResource._send_response"""
|
||||
# We expect to get bytes for us to write
|
||||
assert isinstance(response_object, bytes)
|
||||
@@ -508,12 +492,12 @@ class StaticResource(File):
|
||||
Differs from the File resource by adding clickjacking protection.
|
||||
"""
|
||||
|
||||
def render_GET(self, request: Request) -> bytes:
|
||||
def render_GET(self, request: Request):
|
||||
set_clickjacking_protection_headers(request)
|
||||
return super().render_GET(request)
|
||||
|
||||
|
||||
def _unrecognised_request_handler(request: Request) -> NoReturn:
|
||||
def _unrecognised_request_handler(request):
|
||||
"""Request handler for unrecognised requests
|
||||
|
||||
This is a request handler suitable for return from
|
||||
@@ -521,7 +505,7 @@ def _unrecognised_request_handler(request: Request) -> NoReturn:
|
||||
UnrecognizedRequestError.
|
||||
|
||||
Args:
|
||||
request: Unused, but passed in to match the signature of ServletCallback.
|
||||
request (twisted.web.http.Request):
|
||||
"""
|
||||
raise UnrecognizedRequestError()
|
||||
|
||||
@@ -529,14 +513,14 @@ def _unrecognised_request_handler(request: Request) -> NoReturn:
|
||||
class RootRedirect(resource.Resource):
|
||||
"""Redirects the root '/' path to another path."""
|
||||
|
||||
def __init__(self, path: str):
|
||||
def __init__(self, path):
|
||||
resource.Resource.__init__(self)
|
||||
self.url = path
|
||||
|
||||
def render_GET(self, request: Request) -> bytes:
|
||||
def render_GET(self, request):
|
||||
return redirectTo(self.url.encode("ascii"), request)
|
||||
|
||||
def getChild(self, name: str, request: Request) -> resource.Resource:
|
||||
def getChild(self, name, request):
|
||||
if len(name) == 0:
|
||||
return self # select ourselves as the child to render
|
||||
return resource.Resource.getChild(self, name, request)
|
||||
@@ -545,7 +529,7 @@ class RootRedirect(resource.Resource):
|
||||
class OptionsResource(resource.Resource):
|
||||
"""Responds to OPTION requests for itself and all children."""
|
||||
|
||||
def render_OPTIONS(self, request: Request) -> bytes:
|
||||
def render_OPTIONS(self, request):
|
||||
request.setResponseCode(204)
|
||||
request.setHeader(b"Content-Length", b"0")
|
||||
|
||||
@@ -553,7 +537,7 @@ class OptionsResource(resource.Resource):
|
||||
|
||||
return b""
|
||||
|
||||
def getChildWithDefault(self, path: str, request: Request) -> resource.Resource:
|
||||
def getChildWithDefault(self, path, request):
|
||||
if request.method == b"OPTIONS":
|
||||
return self # select ourselves as the child to render
|
||||
return resource.Resource.getChildWithDefault(self, path, request)
|
||||
@@ -665,7 +649,7 @@ def respond_with_json(
|
||||
json_object: Any,
|
||||
send_cors: bool = False,
|
||||
canonical_json: bool = True,
|
||||
) -> Optional[int]:
|
||||
):
|
||||
"""Sends encoded JSON in response to the given request.
|
||||
|
||||
Args:
|
||||
@@ -712,7 +696,7 @@ def respond_with_json_bytes(
|
||||
code: int,
|
||||
json_bytes: bytes,
|
||||
send_cors: bool = False,
|
||||
) -> Optional[int]:
|
||||
):
|
||||
"""Sends encoded JSON in response to the given request.
|
||||
|
||||
Args:
|
||||
@@ -729,7 +713,7 @@ def respond_with_json_bytes(
|
||||
logger.warning(
|
||||
"Not sending response to request %s, already disconnected.", request
|
||||
)
|
||||
return None
|
||||
return
|
||||
|
||||
request.setResponseCode(code)
|
||||
request.setHeader(b"Content-Type", b"application/json")
|
||||
@@ -747,7 +731,7 @@ async def _async_write_json_to_request_in_thread(
|
||||
request: SynapseRequest,
|
||||
json_encoder: Callable[[Any], bytes],
|
||||
json_object: Any,
|
||||
) -> None:
|
||||
):
|
||||
"""Encodes the given JSON object on a thread and then writes it to the
|
||||
request.
|
||||
|
||||
@@ -789,7 +773,7 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
|
||||
_ByteProducer(request, bytes_generator)
|
||||
|
||||
|
||||
def set_cors_headers(request: Request) -> None:
|
||||
def set_cors_headers(request: Request):
|
||||
"""Set the CORS headers so that javascript running in a web browsers can
|
||||
use this API
|
||||
|
||||
@@ -806,14 +790,14 @@ def set_cors_headers(request: Request) -> None:
|
||||
)
|
||||
|
||||
|
||||
def respond_with_html(request: Request, code: int, html: str) -> None:
|
||||
def respond_with_html(request: Request, code: int, html: str):
|
||||
"""
|
||||
Wraps `respond_with_html_bytes` by first encoding HTML from a str to UTF-8 bytes.
|
||||
"""
|
||||
respond_with_html_bytes(request, code, html.encode("utf-8"))
|
||||
|
||||
|
||||
def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes) -> None:
|
||||
def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes):
|
||||
"""
|
||||
Sends HTML (encoded as UTF-8 bytes) as the response to the given request.
|
||||
|
||||
@@ -831,7 +815,7 @@ def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes) -> N
|
||||
logger.warning(
|
||||
"Not sending response to request %s, already disconnected.", request
|
||||
)
|
||||
return None
|
||||
return
|
||||
|
||||
request.setResponseCode(code)
|
||||
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
|
||||
@@ -844,7 +828,7 @@ def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes) -> N
|
||||
finish_request(request)
|
||||
|
||||
|
||||
def set_clickjacking_protection_headers(request: Request) -> None:
|
||||
def set_clickjacking_protection_headers(request: Request):
|
||||
"""
|
||||
Set headers to guard against clickjacking of embedded content.
|
||||
|
||||
@@ -866,7 +850,7 @@ def respond_with_redirect(request: Request, url: bytes) -> None:
|
||||
finish_request(request)
|
||||
|
||||
|
||||
def finish_request(request: Request) -> None:
|
||||
def finish_request(request: Request):
|
||||
"""Finish writing the response to the request.
|
||||
|
||||
Twisted throws a RuntimeException if the connection closed before the
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
""" This module contains base REST classes for constructing REST servlets. """
|
||||
import logging
|
||||
from http import HTTPStatus
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Iterable,
|
||||
@@ -31,7 +30,6 @@ from typing_extensions import Literal
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.types import JsonDict, RoomAlias, RoomID
|
||||
from synapse.util import json_decoder
|
||||
|
||||
@@ -139,15 +137,11 @@ def parse_integer_from_args(
|
||||
return int(args[name_bytes][0])
|
||||
except Exception:
|
||||
message = "Query parameter %r must be an integer" % (name,)
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM
|
||||
)
|
||||
raise SynapseError(400, message, errcode=Codes.INVALID_PARAM)
|
||||
else:
|
||||
if required:
|
||||
message = "Missing integer query parameter %r" % (name,)
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM
|
||||
)
|
||||
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
|
||||
else:
|
||||
return default
|
||||
|
||||
@@ -252,15 +246,11 @@ def parse_boolean_from_args(
|
||||
message = (
|
||||
"Boolean query parameter %r must be one of ['true', 'false']"
|
||||
) % (name,)
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM
|
||||
)
|
||||
raise SynapseError(400, message)
|
||||
else:
|
||||
if required:
|
||||
message = "Missing boolean query parameter %r" % (name,)
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM
|
||||
)
|
||||
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
|
||||
else:
|
||||
return default
|
||||
|
||||
@@ -323,7 +313,7 @@ def parse_bytes_from_args(
|
||||
return args[name_bytes][0]
|
||||
elif required:
|
||||
message = "Missing string query parameter %s" % (name,)
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM)
|
||||
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
|
||||
|
||||
return default
|
||||
|
||||
@@ -417,16 +407,14 @@ def _parse_string_value(
|
||||
try:
|
||||
value_str = value.decode(encoding)
|
||||
except ValueError:
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST, "Query parameter %r must be %s" % (name, encoding)
|
||||
)
|
||||
raise SynapseError(400, "Query parameter %r must be %s" % (name, encoding))
|
||||
|
||||
if allowed_values is not None and value_str not in allowed_values:
|
||||
message = "Query parameter %r must be one of [%s]" % (
|
||||
name,
|
||||
", ".join(repr(v) for v in allowed_values),
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM)
|
||||
raise SynapseError(400, message)
|
||||
else:
|
||||
return value_str
|
||||
|
||||
@@ -522,9 +510,7 @@ def parse_strings_from_args(
|
||||
else:
|
||||
if required:
|
||||
message = "Missing string query parameter %r" % (name,)
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST, message, errcode=Codes.MISSING_PARAM
|
||||
)
|
||||
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
|
||||
|
||||
return default
|
||||
|
||||
@@ -652,7 +638,7 @@ def parse_json_value_from_request(
|
||||
try:
|
||||
content_bytes = request.content.read() # type: ignore
|
||||
except Exception:
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, "Error reading JSON content.")
|
||||
raise SynapseError(400, "Error reading JSON content.")
|
||||
|
||||
if not content_bytes and allow_empty_body:
|
||||
return None
|
||||
@@ -661,9 +647,7 @@ def parse_json_value_from_request(
|
||||
content = json_decoder.decode(content_bytes.decode("utf-8"))
|
||||
except Exception as e:
|
||||
logger.warning("Unable to parse JSON: %s (%s)", e, content_bytes)
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST, "Content not JSON.", errcode=Codes.NOT_JSON
|
||||
)
|
||||
raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
|
||||
|
||||
return content
|
||||
|
||||
@@ -689,7 +673,7 @@ def parse_json_object_from_request(
|
||||
|
||||
if not isinstance(content, dict):
|
||||
message = "Content must be a JSON object."
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.BAD_JSON)
|
||||
raise SynapseError(400, message, errcode=Codes.BAD_JSON)
|
||||
|
||||
return content
|
||||
|
||||
@@ -701,9 +685,7 @@ def assert_params_in_dict(body: JsonDict, required: Iterable[str]) -> None:
|
||||
absent.append(k)
|
||||
|
||||
if len(absent) > 0:
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST, "Missing params: %r" % absent, Codes.MISSING_PARAM
|
||||
)
|
||||
raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
|
||||
|
||||
|
||||
class RestServlet:
|
||||
@@ -727,7 +709,7 @@ class RestServlet:
|
||||
into the appropriate HTTP response.
|
||||
"""
|
||||
|
||||
def register(self, http_server: HttpServer) -> None:
|
||||
def register(self, http_server):
|
||||
"""Register this servlet with the given HTTP server."""
|
||||
patterns = getattr(self, "PATTERNS", None)
|
||||
if patterns:
|
||||
@@ -776,12 +758,10 @@ class ResolveRoomIdMixin:
|
||||
resolved_room_id = room_id.to_string()
|
||||
else:
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
"%s was not legal room ID or room alias" % (room_identifier,),
|
||||
400, "%s was not legal room ID or room alias" % (room_identifier,)
|
||||
)
|
||||
if not resolved_room_id:
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
"Unknown room ID or room alias %s" % room_identifier,
|
||||
400, "Unknown room ID or room alias %s" % room_identifier
|
||||
)
|
||||
return resolved_room_id, remote_room_hosts
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
import contextlib
|
||||
import logging
|
||||
import time
|
||||
from typing import Any, Generator, Optional, Tuple, Union
|
||||
from typing import Generator, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
from zope.interface import implementer
|
||||
@@ -66,9 +66,9 @@ class SynapseRequest(Request):
|
||||
self,
|
||||
channel: HTTPChannel,
|
||||
site: "SynapseSite",
|
||||
*args: Any,
|
||||
*args,
|
||||
max_request_body_size: int = 1024,
|
||||
**kw: Any,
|
||||
**kw,
|
||||
):
|
||||
super().__init__(channel, *args, **kw)
|
||||
self._max_request_body_size = max_request_body_size
|
||||
@@ -557,7 +557,7 @@ class SynapseSite(Site):
|
||||
proxied = config.http_options.x_forwarded
|
||||
request_class = XForwardedForRequest if proxied else SynapseRequest
|
||||
|
||||
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
|
||||
def request_factory(channel, queued: bool) -> Request:
|
||||
return request_class(
|
||||
channel,
|
||||
self,
|
||||
|
||||
@@ -22,33 +22,20 @@ them.
|
||||
|
||||
See doc/log_contexts.rst for details on how this works.
|
||||
"""
|
||||
import inspect
|
||||
import logging
|
||||
import threading
|
||||
import typing
|
||||
import warnings
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Optional, Tuple, TypeVar, Union
|
||||
|
||||
import attr
|
||||
from typing_extensions import Literal
|
||||
|
||||
from twisted.internet import defer, threads
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.logging.scopecontextmanager import _LogContextScope
|
||||
from synapse.types import ISynapseReactor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -79,7 +66,7 @@ except Exception:
|
||||
|
||||
|
||||
# a hook which can be set during testing to assert that we aren't abusing logcontexts.
|
||||
def logcontext_error(msg: str) -> None:
|
||||
def logcontext_error(msg: str):
|
||||
logger.warning(msg)
|
||||
|
||||
|
||||
@@ -236,19 +223,22 @@ class _Sentinel:
|
||||
def __str__(self) -> str:
|
||||
return "sentinel"
|
||||
|
||||
def start(self, rusage: "Optional[resource.struct_rusage]") -> None:
|
||||
def copy_to(self, record):
|
||||
pass
|
||||
|
||||
def stop(self, rusage: "Optional[resource.struct_rusage]") -> None:
|
||||
def start(self, rusage: "Optional[resource.struct_rusage]"):
|
||||
pass
|
||||
|
||||
def add_database_transaction(self, duration_sec: float) -> None:
|
||||
def stop(self, rusage: "Optional[resource.struct_rusage]"):
|
||||
pass
|
||||
|
||||
def add_database_scheduled(self, sched_sec: float) -> None:
|
||||
def add_database_transaction(self, duration_sec):
|
||||
pass
|
||||
|
||||
def record_event_fetch(self, event_count: int) -> None:
|
||||
def add_database_scheduled(self, sched_sec):
|
||||
pass
|
||||
|
||||
def record_event_fetch(self, event_count):
|
||||
pass
|
||||
|
||||
def __bool__(self) -> Literal[False]:
|
||||
@@ -389,12 +379,7 @@ class LoggingContext:
|
||||
)
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
type: Optional[Type[BaseException]],
|
||||
value: Optional[BaseException],
|
||||
traceback: Optional[TracebackType],
|
||||
) -> None:
|
||||
def __exit__(self, type, value, traceback) -> None:
|
||||
"""Restore the logging context in thread local storage to the state it
|
||||
was before this context was entered.
|
||||
Returns:
|
||||
@@ -414,6 +399,17 @@ class LoggingContext:
|
||||
# recorded against the correct metrics.
|
||||
self.finished = True
|
||||
|
||||
def copy_to(self, record) -> None:
|
||||
"""Copy logging fields from this context to a log record or
|
||||
another LoggingContext
|
||||
"""
|
||||
|
||||
# we track the current request
|
||||
record.request = self.request
|
||||
|
||||
# we also track the current scope:
|
||||
record.scope = self.scope
|
||||
|
||||
def start(self, rusage: "Optional[resource.struct_rusage]") -> None:
|
||||
"""
|
||||
Record that this logcontext is currently running.
|
||||
@@ -630,12 +626,7 @@ class PreserveLoggingContext:
|
||||
def __enter__(self) -> None:
|
||||
self._old_context = set_current_context(self._new_context)
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
type: Optional[Type[BaseException]],
|
||||
value: Optional[BaseException],
|
||||
traceback: Optional[TracebackType],
|
||||
) -> None:
|
||||
def __exit__(self, type, value, traceback) -> None:
|
||||
context = set_current_context(self._old_context)
|
||||
|
||||
if context != self._new_context:
|
||||
@@ -720,61 +711,16 @@ def nested_logging_context(suffix: str) -> LoggingContext:
|
||||
)
|
||||
|
||||
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
@overload
|
||||
def preserve_fn( # type: ignore[misc]
|
||||
f: Callable[..., Awaitable[R]],
|
||||
) -> Callable[..., "defer.Deferred[R]"]:
|
||||
# The `type: ignore[misc]` above suppresses
|
||||
# "Overloaded function signatures 1 and 2 overlap with incompatible return types"
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def preserve_fn(f: Callable[..., R]) -> Callable[..., "defer.Deferred[R]"]:
|
||||
...
|
||||
|
||||
|
||||
def preserve_fn(
|
||||
f: Union[
|
||||
Callable[..., R],
|
||||
Callable[..., Awaitable[R]],
|
||||
]
|
||||
) -> Callable[..., "defer.Deferred[R]"]:
|
||||
def preserve_fn(f):
|
||||
"""Function decorator which wraps the function with run_in_background"""
|
||||
|
||||
def g(*args: Any, **kwargs: Any) -> "defer.Deferred[R]":
|
||||
def g(*args, **kwargs):
|
||||
return run_in_background(f, *args, **kwargs)
|
||||
|
||||
return g
|
||||
|
||||
|
||||
@overload
|
||||
def run_in_background( # type: ignore[misc]
|
||||
f: Callable[..., Awaitable[R]], *args: Any, **kwargs: Any
|
||||
) -> "defer.Deferred[R]":
|
||||
# The `type: ignore[misc]` above suppresses
|
||||
# "Overloaded function signatures 1 and 2 overlap with incompatible return types"
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def run_in_background(
|
||||
f: Callable[..., R], *args: Any, **kwargs: Any
|
||||
) -> "defer.Deferred[R]":
|
||||
...
|
||||
|
||||
|
||||
def run_in_background(
|
||||
f: Union[
|
||||
Callable[..., R],
|
||||
Callable[..., Awaitable[R]],
|
||||
],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> "defer.Deferred[R]":
|
||||
def run_in_background(f, *args, **kwargs) -> defer.Deferred:
|
||||
"""Calls a function, ensuring that the current context is restored after
|
||||
return from the function, and that the sentinel context is set once the
|
||||
deferred returned by the function completes.
|
||||
@@ -805,10 +751,6 @@ def run_in_background(
|
||||
# At this point we should have a Deferred, if not then f was a synchronous
|
||||
# function, wrap it in a Deferred for consistency.
|
||||
if not isinstance(res, defer.Deferred):
|
||||
# `res` is not a `Deferred` and not a `Coroutine`.
|
||||
# There are no other types of `Awaitable`s we expect to encounter in Synapse.
|
||||
assert not isinstance(res, Awaitable)
|
||||
|
||||
return defer.succeed(res)
|
||||
|
||||
if res.called and not res.paused:
|
||||
@@ -836,14 +778,13 @@ def run_in_background(
|
||||
return res
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
def make_deferred_yieldable(deferred):
|
||||
"""Given a deferred (or coroutine), make it follow the Synapse logcontext
|
||||
rules:
|
||||
|
||||
|
||||
def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]":
|
||||
"""Given a deferred, make it follow the Synapse logcontext rules:
|
||||
|
||||
If the deferred has completed, essentially does nothing (just returns another
|
||||
completed deferred with the result/failure).
|
||||
If the deferred has completed (or is not actually a Deferred), essentially
|
||||
does nothing (just returns another completed deferred with the
|
||||
result/failure).
|
||||
|
||||
If the deferred has not yet completed, resets the logcontext before
|
||||
returning a deferred. Then, when the deferred completes, restores the
|
||||
@@ -851,6 +792,16 @@ def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]
|
||||
|
||||
(This is more-or-less the opposite operation to run_in_background.)
|
||||
"""
|
||||
if inspect.isawaitable(deferred):
|
||||
# If we're given a coroutine we convert it to a deferred so that we
|
||||
# run it and find out if it immediately finishes, it it does then we
|
||||
# don't need to fiddle with log contexts at all and can return
|
||||
# immediately.
|
||||
deferred = defer.ensureDeferred(deferred)
|
||||
|
||||
if not isinstance(deferred, defer.Deferred):
|
||||
return deferred
|
||||
|
||||
if deferred.called and not deferred.paused:
|
||||
# it looks like this deferred is ready to run any callbacks we give it
|
||||
# immediately. We may as well optimise out the logcontext faffery.
|
||||
@@ -872,9 +823,7 @@ def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT:
|
||||
return result
|
||||
|
||||
|
||||
def defer_to_thread(
|
||||
reactor: "ISynapseReactor", f: Callable[..., R], *args: Any, **kwargs: Any
|
||||
) -> "defer.Deferred[R]":
|
||||
def defer_to_thread(reactor, f, *args, **kwargs):
|
||||
"""
|
||||
Calls the function `f` using a thread from the reactor's default threadpool and
|
||||
returns the result as a Deferred.
|
||||
@@ -906,13 +855,7 @@ def defer_to_thread(
|
||||
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
|
||||
|
||||
|
||||
def defer_to_threadpool(
|
||||
reactor: "ISynapseReactor",
|
||||
threadpool: ThreadPool,
|
||||
f: Callable[..., R],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> "defer.Deferred[R]":
|
||||
def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs):
|
||||
"""
|
||||
A wrapper for twisted.internet.threads.deferToThreadpool, which handles
|
||||
logcontexts correctly.
|
||||
@@ -954,7 +897,7 @@ def defer_to_threadpool(
|
||||
assert isinstance(curr_context, LoggingContext)
|
||||
parent_context = curr_context
|
||||
|
||||
def g() -> R:
|
||||
def g():
|
||||
with LoggingContext(str(curr_context), parent_context=parent_context):
|
||||
return f(*args, **kwargs)
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from typing import (
|
||||
Awaitable,
|
||||
Callable,
|
||||
@@ -43,13 +44,7 @@ from synapse.logging.opentracing import log_kv, start_active_span
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
PersistedEventPosition,
|
||||
RoomStreamToken,
|
||||
StreamToken,
|
||||
UserID,
|
||||
)
|
||||
from synapse.types import PersistedEventPosition, RoomStreamToken, StreamToken, UserID
|
||||
from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.visibility import filter_events_for_client
|
||||
@@ -183,12 +178,7 @@ class _NotifierUserStream:
|
||||
return _NotificationListener(self.notify_deferred.observe())
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class EventStreamResult:
|
||||
events: List[Union[JsonDict, EventBase]]
|
||||
start_token: StreamToken
|
||||
end_token: StreamToken
|
||||
|
||||
class EventStreamResult(namedtuple("EventStreamResult", ("events", "tokens"))):
|
||||
def __bool__(self):
|
||||
return bool(self.events)
|
||||
|
||||
@@ -592,12 +582,9 @@ class Notifier:
|
||||
before_token: StreamToken, after_token: StreamToken
|
||||
) -> EventStreamResult:
|
||||
if after_token == before_token:
|
||||
return EventStreamResult([], from_token, from_token)
|
||||
return EventStreamResult([], (from_token, from_token))
|
||||
|
||||
# The events fetched from each source are a JsonDict, EventBase, or
|
||||
# UserPresenceState, but see below for UserPresenceState being
|
||||
# converted to JsonDict.
|
||||
events: List[Union[JsonDict, EventBase]] = []
|
||||
events: List[EventBase] = []
|
||||
end_token = from_token
|
||||
|
||||
for name, source in self.event_sources.sources.get_sources():
|
||||
@@ -636,7 +623,7 @@ class Notifier:
|
||||
events.extend(new_events)
|
||||
end_token = end_token.copy_and_replace(keyname, new_key)
|
||||
|
||||
return EventStreamResult(events, from_token, end_token)
|
||||
return EventStreamResult(events, (from_token, end_token))
|
||||
|
||||
user_id_for_stream = user.to_string()
|
||||
if is_peeking:
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# limitations under the License.
|
||||
from typing import Dict
|
||||
|
||||
from synapse.api.constants import ReceiptTypes
|
||||
from synapse.events import EventBase
|
||||
from synapse.push.presentable_names import calculate_room_name, name_from_member_event
|
||||
from synapse.storage import Storage
|
||||
@@ -24,7 +23,7 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
|
||||
invites = await store.get_invited_rooms_for_local_user(user_id)
|
||||
joins = await store.get_rooms_for_user(user_id)
|
||||
|
||||
my_receipts_by_room = await store.get_receipts_for_user(user_id, ReceiptTypes.READ)
|
||||
my_receipts_by_room = await store.get_receipts_for_user(user_id, "m.read")
|
||||
|
||||
badge = len(invites)
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ from synapse.push.pusher import PusherFactory
|
||||
from synapse.replication.http.push import ReplicationRemovePusherRestServlet
|
||||
from synapse.types import JsonDict, RoomStreamToken
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -114,9 +113,7 @@ class PusherPool:
|
||||
"""
|
||||
|
||||
if kind == "email":
|
||||
email_owner = await self.store.get_user_id_by_threepid(
|
||||
"email", canonicalise_email(pushkey)
|
||||
)
|
||||
email_owner = await self.store.get_user_id_by_threepid("email", pushkey)
|
||||
if email_owner != user_id:
|
||||
raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
|
||||
|
||||
|
||||
@@ -50,7 +50,8 @@ logger = logging.getLogger(__name__)
|
||||
REQUIREMENTS = [
|
||||
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
|
||||
"jsonschema>=3.0.0",
|
||||
"frozendict>=1",
|
||||
# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
|
||||
"frozendict>=1,<2.1.2",
|
||||
"unpaddedbase64>=1.1.0",
|
||||
"canonicaljson>=1.4.0",
|
||||
# we use the type definitions added in signedjson 1.1.
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.database import DatabasePool
|
||||
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.util.id_generators import MultiWriterIdGenerator
|
||||
@@ -27,12 +27,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseSlavedStore(CacheInvalidationWorkerStore):
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
|
||||
super().__init__(database, db_conn, hs)
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
self._cache_id_gen: Optional[
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.database import DatabasePool
|
||||
from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
|
||||
@@ -25,12 +25,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class SlavedClientIpStore(BaseSlavedStore):
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self.client_ip_last_seen: LruCache[tuple, int] = LruCache(
|
||||
|
||||
@@ -17,7 +17,7 @@ from typing import TYPE_CHECKING
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
|
||||
from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.database import DatabasePool
|
||||
from synapse.storage.databases.main.devices import DeviceWorkerStore
|
||||
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyWorkerStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
@@ -27,12 +27,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore):
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self.hs = hs
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.database import DatabasePool
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import (
|
||||
EventPushActionsWorkerStore,
|
||||
@@ -58,12 +58,7 @@ class SlavedEventStore(
|
||||
RelationsWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
events_max = self._stream_id_gen.get_current_token()
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.database import DatabasePool
|
||||
from synapse.storage.databases.main.filtering import FilteringStore
|
||||
|
||||
from ._base import BaseSlavedStore
|
||||
@@ -24,12 +24,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class SlavedFilteringStore(BaseSlavedStore):
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
# Filters are immutable so this cache doesn't need to be expired
|
||||
|
||||
@@ -17,7 +17,7 @@ from typing import TYPE_CHECKING
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
|
||||
from synapse.replication.tcp.streams import GroupServerStream
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.database import DatabasePool
|
||||
from synapse.storage.databases.main.group_server import GroupServerWorkerStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
||||
@@ -26,12 +26,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class SlavedGroupServerStore(GroupServerWorkerStore, BaseSlavedStore):
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self.hs = hs
|
||||
|
||||
@@ -108,7 +108,7 @@ class VersionServlet(RestServlet):
|
||||
|
||||
class PurgeHistoryRestServlet(RestServlet):
|
||||
PATTERNS = admin_patterns(
|
||||
"/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]*))?$"
|
||||
"/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]+))?"
|
||||
)
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@@ -195,7 +195,7 @@ class PurgeHistoryRestServlet(RestServlet):
|
||||
|
||||
|
||||
class PurgeHistoryStatusRestServlet(RestServlet):
|
||||
PATTERNS = admin_patterns("/purge_history_status/(?P<purge_id>[^/]*)$")
|
||||
PATTERNS = admin_patterns("/purge_history_status/(?P<purge_id>[^/]+)")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.pagination_handler = hs.get_pagination_handler()
|
||||
|
||||
@@ -22,7 +22,7 @@ from synapse.http.servlet import (
|
||||
parse_json_object_from_request,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
|
||||
from synapse.rest.admin._base import admin_patterns, assert_user_is_admin
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -41,7 +41,8 @@ class BackgroundUpdateEnabledRestServlet(RestServlet):
|
||||
self._data_stores = hs.get_datastores()
|
||||
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self._auth, request)
|
||||
requester = await self._auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self._auth, requester.user)
|
||||
|
||||
# We need to check that all configured databases have updates enabled.
|
||||
# (They *should* all be in sync.)
|
||||
@@ -50,7 +51,8 @@ class BackgroundUpdateEnabledRestServlet(RestServlet):
|
||||
return HTTPStatus.OK, {"enabled": enabled}
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self._auth, request)
|
||||
requester = await self._auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self._auth, requester.user)
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
@@ -82,7 +84,8 @@ class BackgroundUpdateRestServlet(RestServlet):
|
||||
self._data_stores = hs.get_datastores()
|
||||
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self._auth, request)
|
||||
requester = await self._auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self._auth, requester.user)
|
||||
|
||||
# We need to check that all configured databases have updates enabled.
|
||||
# (They *should* all be in sync.)
|
||||
@@ -108,14 +111,15 @@ class BackgroundUpdateRestServlet(RestServlet):
|
||||
class BackgroundUpdateStartJobRestServlet(RestServlet):
|
||||
"""Allows to start specific background updates"""
|
||||
|
||||
PATTERNS = admin_patterns("/background_updates/start_job$")
|
||||
PATTERNS = admin_patterns("/background_updates/start_job")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._auth = hs.get_auth()
|
||||
self._store = hs.get_datastore()
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self._auth, request)
|
||||
requester = await self._auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self._auth, requester.user)
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
assert_params_in_dict(body, ["job_name"])
|
||||
|
||||
@@ -42,10 +42,10 @@ class DeviceRestServlet(RestServlet):
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.store = hs.get_datastore()
|
||||
self.is_mine = hs.is_mine
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, user_id: str, device_id: str
|
||||
@@ -53,7 +53,7 @@ class DeviceRestServlet(RestServlet):
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
target_user = UserID.from_string(user_id)
|
||||
if not self.is_mine(target_user):
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only lookup local users")
|
||||
|
||||
u = await self.store.get_user_by_id(target_user.to_string())
|
||||
@@ -63,8 +63,6 @@ class DeviceRestServlet(RestServlet):
|
||||
device = await self.device_handler.get_device(
|
||||
target_user.to_string(), device_id
|
||||
)
|
||||
if device is None:
|
||||
raise NotFoundError("No device found")
|
||||
return HTTPStatus.OK, device
|
||||
|
||||
async def on_DELETE(
|
||||
@@ -73,7 +71,7 @@ class DeviceRestServlet(RestServlet):
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
target_user = UserID.from_string(user_id)
|
||||
if not self.is_mine(target_user):
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only lookup local users")
|
||||
|
||||
u = await self.store.get_user_by_id(target_user.to_string())
|
||||
@@ -89,7 +87,7 @@ class DeviceRestServlet(RestServlet):
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
target_user = UserID.from_string(user_id)
|
||||
if not self.is_mine(target_user):
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only lookup local users")
|
||||
|
||||
u = await self.store.get_user_by_id(target_user.to_string())
|
||||
@@ -111,10 +109,14 @@ class DevicesRestServlet(RestServlet):
|
||||
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/devices$", "v2")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
"""
|
||||
Args:
|
||||
hs: server
|
||||
"""
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.store = hs.get_datastore()
|
||||
self.is_mine = hs.is_mine
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, user_id: str
|
||||
@@ -122,7 +124,7 @@ class DevicesRestServlet(RestServlet):
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
target_user = UserID.from_string(user_id)
|
||||
if not self.is_mine(target_user):
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only lookup local users")
|
||||
|
||||
u = await self.store.get_user_by_id(target_user.to_string())
|
||||
@@ -142,10 +144,10 @@ class DeleteDevicesRestServlet(RestServlet):
|
||||
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/delete_devices$", "v2")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.store = hs.get_datastore()
|
||||
self.is_mine = hs.is_mine
|
||||
|
||||
async def on_POST(
|
||||
self, request: SynapseRequest, user_id: str
|
||||
@@ -153,7 +155,7 @@ class DeleteDevicesRestServlet(RestServlet):
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
target_user = UserID.from_string(user_id)
|
||||
if not self.is_mine(target_user):
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only lookup local users")
|
||||
|
||||
u = await self.store.get_user_by_id(target_user.to_string())
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user