Compare commits
121 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2712a9ef8f | |||
| da757b7759 | |||
| 59bc7debf0 | |||
| cf68593544 | |||
| 9cc95fd0a5 | |||
| 82886e4c8f | |||
| 08919847c1 | |||
| c3acc45a87 | |||
| ae5bb32ad0 | |||
| 7ed3232b08 | |||
| 6e7488ce11 | |||
| 41585e1340 | |||
| 9498cd3e7b | |||
| c7503f8f33 | |||
| 9d8baa1595 | |||
| 4ff8486f0f | |||
| 2669e494e0 | |||
| b6d8a808a4 | |||
| 0cb5d34756 | |||
| 650761666d | |||
| aa2a4b4b42 | |||
| 022469d819 | |||
| 45d06c754a | |||
| dbd0821c43 | |||
| 0476852fc6 | |||
| 1d11d9323d | |||
| 261e4f2542 | |||
| 11728561f3 | |||
| 9d57abcadd | |||
| cb0bbde981 | |||
| abc97bd1de | |||
| ee238254a0 | |||
| 0125b5d002 | |||
| fe265fe990 | |||
| 7735eee41d | |||
| 3d0faa39fb | |||
| fd28d13e19 | |||
| d18731e252 | |||
| 81beae30b8 | |||
| 11f1bace3c | |||
| 1e8cfc9e77 | |||
| 488ed3e444 | |||
| c3ec84dbcd | |||
| 0783801659 | |||
| 9f2fd29c14 | |||
| 6372dff771 | |||
| b3e346f40c | |||
| fb47ce3e6a | |||
| debf04556b | |||
| 907a62df28 | |||
| 41b987cbc5 | |||
| 5c74ab4064 | |||
| 06820250c9 | |||
| 383c4ae59c | |||
| f639ac143d | |||
| ad0424bab0 | |||
| 2992125561 | |||
| ef56b6e27c | |||
| 53d6245529 | |||
| 25e471dac3 | |||
| 76fca1730e | |||
| 32e4420a66 | |||
| 79b2583f1b | |||
| 8a24c4eee5 | |||
| f93cb7410d | |||
| 50d5a97c1b | |||
| c06932a029 | |||
| 3a62cacfb0 | |||
| 4d55b16faa | |||
| 105709bf32 | |||
| d7fad867fa | |||
| 8fddcf703e | |||
| e2adb360eb | |||
| 47ed4a4aa7 | |||
| 7fafa838ae | |||
| de341bec1b | |||
| 643c89d497 | |||
| 6554253f48 | |||
| 3add16df49 | |||
| dde01efbcb | |||
| 22e416b726 | |||
| b4b7c80181 | |||
| 5fc3477fd3 | |||
| 8743f42b49 | |||
| 7285afa4be | |||
| b22a53e357 | |||
| 3c446d0a81 | |||
| 240e940c3f | |||
| 969ed2e49d | |||
| 1147ce7e18 | |||
| 0d2b7fdcec | |||
| 4e12b10c7c | |||
| e654230a51 | |||
| ef5193e0cb | |||
| 7b3959c7f3 | |||
| 2e4a6c5aab | |||
| e3eb2cfe8b | |||
| 5c341c99f6 | |||
| 739d3500fe | |||
| 0e2d70e101 | |||
| 82c4fd7226 | |||
| e446077478 | |||
| d82c89ac22 | |||
| 75b25b3f1f | |||
| 1df10d8814 | |||
| 8f9340d248 | |||
| c5034cd4b0 | |||
| f7f937d051 | |||
| e52b5d94a9 | |||
| d90f27a21f | |||
| 03cf9710e3 | |||
| 1dcdd8d568 | |||
| 4344fb1faf | |||
| 846577ebde | |||
| 3869981227 | |||
| fa80b492a5 | |||
| c776c52eed | |||
| b424c16f50 | |||
| 313a489fc9 | |||
| 4b090cb273 | |||
| 3f79378d4b |
+4
-7
@@ -23,9 +23,6 @@ branches:
|
||||
- develop
|
||||
- /^release-v/
|
||||
|
||||
# When running the tox environments that call Twisted Trial, we can pass the -j
|
||||
# flag to run the tests concurrently. We set this to 2 for CPU bound tests
|
||||
# (SQLite) and 4 for I/O bound tests (PostgreSQL).
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
@@ -36,10 +33,10 @@ matrix:
|
||||
env: TOX_ENV="pep8,check_isort"
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27 TRIAL_FLAGS="-j 2"
|
||||
env: TOX_ENV=py27
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
|
||||
env: TOX_ENV=py27-old
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
||||
@@ -47,10 +44,10 @@ matrix:
|
||||
- postgresql
|
||||
|
||||
- python: 3.5
|
||||
env: TOX_ENV=py35 TRIAL_FLAGS="-j 2"
|
||||
env: TOX_ENV=py35
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36 TRIAL_FLAGS="-j 2"
|
||||
env: TOX_ENV=py36
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
|
||||
|
||||
-61
@@ -1,64 +1,3 @@
|
||||
Synapse 0.33.9 (2018-11-19)
|
||||
===========================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 0.33.9rc1 (2018-11-14)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Include flags to optionally add `m.login.terms` to the registration flow when consent tracking is enabled. ([\#4004](https://github.com/matrix-org/synapse/issues/4004), [\#4133](https://github.com/matrix-org/synapse/issues/4133), [\#4142](https://github.com/matrix-org/synapse/issues/4142), [\#4184](https://github.com/matrix-org/synapse/issues/4184))
|
||||
- Support for replacing rooms with new ones ([\#4091](https://github.com/matrix-org/synapse/issues/4091), [\#4099](https://github.com/matrix-org/synapse/issues/4099), [\#4100](https://github.com/matrix-org/synapse/issues/4100), [\#4101](https://github.com/matrix-org/synapse/issues/4101))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix exceptions when using the email mailer on Python 3. ([\#4095](https://github.com/matrix-org/synapse/issues/4095))
|
||||
- Fix e2e key backup with more than 9 backup versions ([\#4113](https://github.com/matrix-org/synapse/issues/4113))
|
||||
- Searches that request profile info now no longer fail with a 500. ([\#4122](https://github.com/matrix-org/synapse/issues/4122))
|
||||
- fix return code of empty key backups ([\#4123](https://github.com/matrix-org/synapse/issues/4123))
|
||||
- If the typing stream ID goes backwards (as on a worker when the master restarts), the worker's typing handler will no longer erroneously report rooms containing new typing events. ([\#4127](https://github.com/matrix-org/synapse/issues/4127))
|
||||
- Fix table lock of device_lists_remote_cache which could freeze the application ([\#4132](https://github.com/matrix-org/synapse/issues/4132))
|
||||
- Fix exception when using state res v2 algorithm ([\#4135](https://github.com/matrix-org/synapse/issues/4135))
|
||||
- Generating the user consent URI no longer fails on Python 3. ([\#4140](https://github.com/matrix-org/synapse/issues/4140), [\#4163](https://github.com/matrix-org/synapse/issues/4163))
|
||||
- Loading URL previews from the DB cache on Postgres will no longer cause Unicode type errors when responding to the request, and URL previews will no longer fail if the remote server returns a Content-Type header with the chartype in quotes. ([\#4157](https://github.com/matrix-org/synapse/issues/4157))
|
||||
- The hash_password script now works on Python 3. ([\#4161](https://github.com/matrix-org/synapse/issues/4161))
|
||||
- Fix noop checks when updating device keys, reducing spurious device list update notifications. ([\#4164](https://github.com/matrix-org/synapse/issues/4164))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- The disused and un-specced identicon generator has been removed. ([\#4106](https://github.com/matrix-org/synapse/issues/4106))
|
||||
- The obsolete and non-functional /pull federation endpoint has been removed. ([\#4118](https://github.com/matrix-org/synapse/issues/4118))
|
||||
- The deprecated v1 key exchange endpoints have been removed. ([\#4119](https://github.com/matrix-org/synapse/issues/4119))
|
||||
- Synapse will no longer fetch keys using the fallback deprecated v1 key exchange method and will now always use v2. ([\#4120](https://github.com/matrix-org/synapse/issues/4120))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Fix build of Docker image with docker-compose ([\#3778](https://github.com/matrix-org/synapse/issues/3778))
|
||||
- Delete unreferenced state groups during history purge ([\#4006](https://github.com/matrix-org/synapse/issues/4006))
|
||||
- The "Received rdata" log messages on workers is now logged at DEBUG, not INFO. ([\#4108](https://github.com/matrix-org/synapse/issues/4108))
|
||||
- Reduce replication traffic for device lists ([\#4109](https://github.com/matrix-org/synapse/issues/4109))
|
||||
- Fix `synapse_replication_tcp_protocol_*_commands` metric label to be full command name, rather than just the first character ([\#4110](https://github.com/matrix-org/synapse/issues/4110))
|
||||
- Log some bits about room creation ([\#4121](https://github.com/matrix-org/synapse/issues/4121))
|
||||
- Fix `tox` failure on old systems ([\#4124](https://github.com/matrix-org/synapse/issues/4124))
|
||||
- Add STATE_V2_TEST room version ([\#4128](https://github.com/matrix-org/synapse/issues/4128))
|
||||
- Clean up event accesses and tests ([\#4137](https://github.com/matrix-org/synapse/issues/4137))
|
||||
- The default logging config will now set an explicit log file encoding of UTF-8. ([\#4138](https://github.com/matrix-org/synapse/issues/4138))
|
||||
- Add helpers functions for getting prev and auth events of an event ([\#4139](https://github.com/matrix-org/synapse/issues/4139))
|
||||
- Add some tests for the HTTP pusher. ([\#4149](https://github.com/matrix-org/synapse/issues/4149))
|
||||
- add purge_history.sh and purge_remote_media.sh scripts to contrib/ ([\#4155](https://github.com/matrix-org/synapse/issues/4155))
|
||||
- HTTP tests have been refactored to contain less boilerplate. ([\#4156](https://github.com/matrix-org/synapse/issues/4156))
|
||||
- Drop incoming events from federation for unknown rooms ([\#4165](https://github.com/matrix-org/synapse/issues/4165))
|
||||
|
||||
|
||||
Synapse 0.33.8 (2018-11-01)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -6,11 +6,9 @@ version: '3'
|
||||
services:
|
||||
|
||||
synapse:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: docker/Dockerfile
|
||||
build: ../..
|
||||
image: docker.io/matrixdotorg/synapse:latest
|
||||
# Since synapse does not retry to connect to the database, restart upon
|
||||
# Since snyapse does not retry to connect to the database, restart upon
|
||||
# failure
|
||||
restart: unless-stopped
|
||||
# See the readme for a full documentation of the environment settings
|
||||
@@ -49,4 +47,4 @@ services:
|
||||
# You may store the database tables in a local folder..
|
||||
- ./schemas:/var/lib/postgresql/data
|
||||
# .. or store them on some high performance storage for better results
|
||||
# - /path/to/ssd/storage:/var/lib/postgresql/data
|
||||
# - /path/to/ssd/storage:/var/lib/postfesql/data
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
Purge history API examples
|
||||
==========================
|
||||
|
||||
# `purge_history.sh`
|
||||
|
||||
A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to
|
||||
purge all messages in a list of rooms up to a certain event. You can select a
|
||||
timeframe or a number of messages that you want to keep in the room.
|
||||
|
||||
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
|
||||
the script.
|
||||
|
||||
# `purge_remote_media.sh`
|
||||
|
||||
A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to
|
||||
purge all old cached remote media.
|
||||
@@ -1,141 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# this script will use the api:
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
|
||||
#
|
||||
# It will purge all messages in a list of rooms up to a cetrain event
|
||||
|
||||
###################################################################################################
|
||||
# define your domain and admin user
|
||||
###################################################################################################
|
||||
# add this user as admin in your home server:
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
ADMIN="@you_admin_username:$DOMAIN"
|
||||
|
||||
API_URL="$DOMAIN:8008/_matrix/client/r0"
|
||||
|
||||
###################################################################################################
|
||||
#choose the rooms to prune old messages from (add a free comment at the end)
|
||||
###################################################################################################
|
||||
# the room_id's you can get e.g. from your Riot clients "View Source" button on each message
|
||||
ROOMS_ARRAY=(
|
||||
'!DgvjtOljKujDBrxyHk:matrix.org#riot:matrix.org'
|
||||
'!QtykxKocfZaZOUrTwp:matrix.org#Matrix HQ'
|
||||
)
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# you can select all the rooms that are not encrypted and loop over the result:
|
||||
# SELECT room_id FROM rooms WHERE room_id NOT IN (SELECT DISTINCT room_id FROM events WHERE type ='m.room.encrypted')
|
||||
# or
|
||||
# select all rooms with at least 100 members:
|
||||
# SELECT q.room_id FROM (select count(*) as numberofusers, room_id FROM current_state_events WHERE type ='m.room.member'
|
||||
# GROUP BY room_id) AS q LEFT JOIN room_aliases a ON q.room_id=a.room_id WHERE q.numberofusers > 100 ORDER BY numberofusers desc
|
||||
|
||||
###################################################################################################
|
||||
# evaluate the EVENT_ID before which should be pruned
|
||||
###################################################################################################
|
||||
# choose a time before which the messages should be pruned:
|
||||
TIME='12 months ago'
|
||||
# ALTERNATIVELY:
|
||||
# a certain time:
|
||||
# TIME='2016-08-31 23:59:59'
|
||||
|
||||
# creates a timestamp from the given time string:
|
||||
UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# prune all messages that are older than 1000 messages ago:
|
||||
# LAST_MESSAGES=1000
|
||||
# SQL_GET_EVENT="SELECT event_id from events WHERE type='m.room.message' AND room_id ='$ROOM' ORDER BY received_ts DESC LIMIT 1 offset $(($LAST_MESSAGES - 1))"
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# select the EVENT_ID manually:
|
||||
#EVENT_ID='$1471814088343495zpPNI:matrix.org' # an example event from 21st of Aug 2016 by Matthew
|
||||
|
||||
###################################################################################################
|
||||
# make the admin user a server admin in the database with
|
||||
###################################################################################################
|
||||
# psql -A -t --dbname=synapse -c "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
|
||||
|
||||
###################################################################################################
|
||||
# database function
|
||||
###################################################################################################
|
||||
sql (){
|
||||
# for sqlite3:
|
||||
#sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
|
||||
# for postgres:
|
||||
psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
|
||||
}
|
||||
|
||||
###################################################################################################
|
||||
# get an access token
|
||||
###################################################################################################
|
||||
# for example externally by watching Riot in your browser's network inspector
|
||||
# or internally on the server locally, use this:
|
||||
TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
|
||||
AUTH="Authorization: Bearer $TOKEN"
|
||||
|
||||
###################################################################################################
|
||||
# check, if your TOKEN works. For example this works:
|
||||
###################################################################################################
|
||||
# $ curl --header "$AUTH" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
|
||||
|
||||
###################################################################################################
|
||||
# finally start pruning the room:
|
||||
###################################################################################################
|
||||
POSTDATA='{"delete_local_events":"true"}' # this will really delete local events, so the messages in the room really disappear unless they are restored by remote federation
|
||||
|
||||
for ROOM in "${ROOMS_ARRAY[@]}"; do
|
||||
echo "########################################### $(date) ################# "
|
||||
echo "pruning room: $ROOM ..."
|
||||
ROOM=${ROOM%#*}
|
||||
#set -x
|
||||
echo "check for alias in db..."
|
||||
# for postgres:
|
||||
sql "SELECT * FROM room_aliases WHERE room_id='$ROOM'"
|
||||
echo "get event..."
|
||||
# for postgres:
|
||||
EVENT_ID=$(sql "SELECT event_id FROM events WHERE type='m.room.message' AND received_ts<'$UNIX_TIMESTAMP' AND room_id='$ROOM' ORDER BY received_ts DESC LIMIT 1;")
|
||||
if [ "$EVENT_ID" == "" ]; then
|
||||
echo "no event $TIME"
|
||||
else
|
||||
echo "event: $EVENT_ID"
|
||||
SLEEP=2
|
||||
set -x
|
||||
# call purge
|
||||
OUT=$(curl --header "$AUTH" -s -d $POSTDATA POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
|
||||
PURGE_ID=$(echo "$OUT" |grep purge_id|cut -d'"' -f4 )
|
||||
if [ "$PURGE_ID" == "" ]; then
|
||||
# probably the history purge is already in progress for $ROOM
|
||||
: "continuing with next room"
|
||||
else
|
||||
while : ; do
|
||||
# get status of purge and sleep longer each time if still active
|
||||
sleep $SLEEP
|
||||
STATUS=$(curl --header "$AUTH" -s GET "$API_URL/admin/purge_history_status/$PURGE_ID" |grep status|cut -d'"' -f4)
|
||||
: "$ROOM --> Status: $STATUS"
|
||||
[[ "$STATUS" == "active" ]] || break
|
||||
SLEEP=$((SLEEP + 1))
|
||||
done
|
||||
fi
|
||||
set +x
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
###################################################################################################
|
||||
# additionally
|
||||
###################################################################################################
|
||||
# to benefit from pruning large amounts of data, you need to call VACUUM to free the unused space.
|
||||
# This can take a very long time (hours) and the client have to be stopped while you do so:
|
||||
# $ synctl stop
|
||||
# $ sqlite3 -line homeserver.db "vacuum;"
|
||||
# $ synctl start
|
||||
|
||||
# This could be set, so you don't need to prune every time after deleting some rows:
|
||||
# $ sqlite3 homeserver.db "PRAGMA auto_vacuum = FULL;"
|
||||
# be cautious, it could make the database somewhat slow if there are a lot of deletions
|
||||
|
||||
exit
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
ADMIN="@you_admin_username:$DOMAIN"
|
||||
|
||||
API_URL="$DOMAIN:8008/_matrix/client/r0"
|
||||
|
||||
# choose a time before which the messages should be pruned:
|
||||
# TIME='2016-08-31 23:59:59'
|
||||
TIME='12 months ago'
|
||||
|
||||
# creates a timestamp from the given time string:
|
||||
UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
|
||||
|
||||
|
||||
###################################################################################################
|
||||
# database function
|
||||
###################################################################################################
|
||||
sql (){
|
||||
# for sqlite3:
|
||||
#sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
|
||||
# for postgres:
|
||||
psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# make the admin user a server admin in the database with
|
||||
###############################################################################
|
||||
# sql "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
|
||||
|
||||
###############################################################################
|
||||
# get an access token
|
||||
###############################################################################
|
||||
# for example externally by watching Riot in your browser's network inspector
|
||||
# or internally on the server locally, use this:
|
||||
TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
|
||||
|
||||
###############################################################################
|
||||
# check, if your TOKEN works. For example this works:
|
||||
###############################################################################
|
||||
# curl --header "Authorization: Bearer $TOKEN" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
|
||||
|
||||
###############################################################################
|
||||
# optional check size before
|
||||
###############################################################################
|
||||
# echo calculate used storage before ...
|
||||
# du -shc ../.synapse/media_store/*
|
||||
|
||||
###############################################################################
|
||||
# finally start pruning media:
|
||||
###############################################################################
|
||||
set -x # for debugging the generated string
|
||||
curl --header "Authorization: Bearer $TOKEN" -v POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
|
||||
@@ -31,7 +31,7 @@ Note that the templates must be stored under a name giving the language of the
|
||||
template - currently this must always be `en` (for "English");
|
||||
internationalisation support is intended for the future.
|
||||
|
||||
The template for the policy itself should be versioned and named according to
|
||||
The template for the policy itself should be versioned and named according to
|
||||
the version: for example `1.0.html`. The version of the policy which the user
|
||||
has agreed to is stored in the database.
|
||||
|
||||
@@ -85,37 +85,6 @@ Once this is complete, and the server has been restarted, try visiting
|
||||
an error "Missing string query parameter 'u'". It is now possible to manually
|
||||
construct URIs where users can give their consent.
|
||||
|
||||
### Enabling consent tracking at registration
|
||||
|
||||
1. Add the following to your configuration:
|
||||
|
||||
```yaml
|
||||
user_consent:
|
||||
require_at_registration: true
|
||||
policy_name: "Privacy Policy" # or whatever you'd like to call the policy
|
||||
```
|
||||
|
||||
2. In your consent templates, make use of the `public_version` variable to
|
||||
see if an unauthenticated user is viewing the page. This is typically
|
||||
wrapped around the form that would be used to actually agree to the document:
|
||||
|
||||
```
|
||||
{% if not public_version %}
|
||||
<!-- The variables used here are only provided when the 'u' param is given to the homeserver -->
|
||||
<form method="post" action="consent">
|
||||
<input type="hidden" name="v" value="{{version}}"/>
|
||||
<input type="hidden" name="u" value="{{user}}"/>
|
||||
<input type="hidden" name="h" value="{{userhmac}}"/>
|
||||
<input type="submit" value="Sure thing!"/>
|
||||
</form>
|
||||
{% endif %}
|
||||
```
|
||||
|
||||
3. Restart Synapse to apply the changes.
|
||||
|
||||
Visiting `https://<server>/_matrix/consent` should now give you a view of the privacy
|
||||
document. This is what users will be able to see when registering for accounts.
|
||||
|
||||
### Constructing the consent URI
|
||||
|
||||
It may be useful to manually construct the "consent URI" for a given user - for
|
||||
@@ -137,12 +106,6 @@ query parameters:
|
||||
`https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
|
||||
|
||||
|
||||
Note that not providing a `u` parameter will be interpreted as wanting to view
|
||||
the document from an unauthenticated perspective, such as prior to registration.
|
||||
Therefore, the `h` parameter is not required in this scenario. To enable this
|
||||
behaviour, set `require_at_registration` to `true` in your `user_consent` config.
|
||||
|
||||
|
||||
Sending users a server notice asking them to agree to the policy
|
||||
----------------------------------------------------------------
|
||||
|
||||
|
||||
@@ -12,15 +12,12 @@
|
||||
<p>
|
||||
All your base are belong to us.
|
||||
</p>
|
||||
{% if not public_version %}
|
||||
<!-- The variables used here are only provided when the 'u' param is given to the homeserver -->
|
||||
<form method="post" action="consent">
|
||||
<input type="hidden" name="v" value="{{version}}"/>
|
||||
<input type="hidden" name="u" value="{{user}}"/>
|
||||
<input type="hidden" name="h" value="{{userhmac}}"/>
|
||||
<input type="submit" value="Sure thing!"/>
|
||||
</form>
|
||||
{% endif %}
|
||||
<form method="post" action="consent">
|
||||
<input type="hidden" name="v" value="{{version}}"/>
|
||||
<input type="hidden" name="u" value="{{user}}"/>
|
||||
<input type="hidden" name="h" value="{{userhmac}}"/>
|
||||
<input type="submit" value="Sure thing!"/>
|
||||
</form>
|
||||
{% endif %}
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -14,3 +14,22 @@ fi
|
||||
|
||||
# set up the virtualenv
|
||||
tox -e py27 --notest -v
|
||||
|
||||
TOX_BIN=$TOX_DIR/py27/bin
|
||||
|
||||
# cryptography 2.2 requires setuptools >= 18.5.
|
||||
#
|
||||
# older versions of virtualenv (?) give us a virtualenv with the same version
|
||||
# of setuptools as is installed on the system python (and tox runs virtualenv
|
||||
# under python3, so we get the version of setuptools that is installed on that).
|
||||
#
|
||||
# anyway, make sure that we have a recent enough setuptools.
|
||||
$TOX_BIN/pip install 'setuptools>=18.5'
|
||||
|
||||
# we also need a semi-recent version of pip, because old ones fail to install
|
||||
# the "enum34" dependency of cryptography.
|
||||
$TOX_BIN/pip install 'pip>=10'
|
||||
|
||||
{ python synapse/python_dependencies.py
|
||||
echo lxml
|
||||
} | xargs $TOX_BIN/pip install
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
.header {
|
||||
border-bottom: 4px solid #e4f7ed ! important;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #76CFA6 ! important;
|
||||
}
|
||||
@@ -0,0 +1,156 @@
|
||||
body {
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
pre, code {
|
||||
word-break: break-word;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
#page {
|
||||
font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
|
||||
font-color: #454545;
|
||||
font-size: 12pt;
|
||||
width: 100%;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
#inner {
|
||||
width: 640px;
|
||||
}
|
||||
|
||||
.header {
|
||||
width: 100%;
|
||||
height: 87px;
|
||||
color: #454545;
|
||||
border-bottom: 4px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.logo {
|
||||
text-align: right;
|
||||
margin-left: 20px;
|
||||
}
|
||||
|
||||
.salutation {
|
||||
padding-top: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.summarytext {
|
||||
}
|
||||
|
||||
.room {
|
||||
width: 100%;
|
||||
color: #454545;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_header td {
|
||||
padding-top: 38px;
|
||||
padding-bottom: 10px;
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
}
|
||||
|
||||
.room_name {
|
||||
vertical-align: middle;
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.room_header h2 {
|
||||
margin-top: 0px;
|
||||
margin-left: 75px;
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
.room_avatar {
|
||||
width: 56px;
|
||||
line-height: 0px;
|
||||
text-align: center;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
.room_avatar img {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
object-fit: cover;
|
||||
border-radius: 24px;
|
||||
}
|
||||
|
||||
.notif {
|
||||
border-bottom: 1px solid #e5e5e5;
|
||||
margin-top: 16px;
|
||||
padding-bottom: 16px;
|
||||
}
|
||||
|
||||
.historical_message .sender_avatar {
|
||||
opacity: 0.3;
|
||||
}
|
||||
|
||||
/* spell out opacity and historical_message class names for Outlook aka Word */
|
||||
.historical_message .sender_name {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_time {
|
||||
color: #e3e3e3;
|
||||
}
|
||||
|
||||
.historical_message .message_body {
|
||||
color: #c7c7c7;
|
||||
}
|
||||
|
||||
.historical_message td,
|
||||
.message td {
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
.sender_avatar {
|
||||
width: 56px;
|
||||
text-align: center;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.sender_avatar img {
|
||||
margin-top: -2px;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 16px;
|
||||
}
|
||||
|
||||
.sender_name {
|
||||
display: inline;
|
||||
font-size: 13px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_time {
|
||||
text-align: right;
|
||||
width: 100px;
|
||||
font-size: 11px;
|
||||
color: #a2a2a2;
|
||||
}
|
||||
|
||||
.message_body {
|
||||
}
|
||||
|
||||
.notif_link td {
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.notif_link a, .footer a {
|
||||
color: #454545;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.debug {
|
||||
font-size: 10px;
|
||||
color: #888;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: 20px;
|
||||
text-align: center;
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
{% for message in notif.messages %}
|
||||
<tr class="{{ "historical_message" if message.is_historical else "message" }}">
|
||||
<td class="sender_avatar">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
{% if message.sender_avatar_url %}
|
||||
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
|
||||
{% else %}
|
||||
{% if message.sender_hash % 3 == 0 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif message.sender_hash % 3 == 1 %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="message_contents">
|
||||
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
|
||||
<div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
|
||||
{% endif %}
|
||||
<div class="message_body">
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_html }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
<span class="filename">{{ message.body_text_plain }}</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</td>
|
||||
<td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
<tr class="notif_link">
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ notif.link }}">Voir {{ room.title }}</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
@@ -0,0 +1,16 @@
|
||||
{% for message in notif.messages %}
|
||||
{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
|
||||
{% if message.msgtype == "m.text" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.emote" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.notice" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.image" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% elif message.msgtype == "m.file" %}
|
||||
{{ message.body_text_plain }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
Voir {{ room.title }} à {{ notif.link }}
|
||||
@@ -0,0 +1,55 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<style type="text/css">
|
||||
{% include 'mail.css' without context %}
|
||||
{% include "mail-%s.css" % app_name ignore missing without context %}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table id="page">
|
||||
<tr>
|
||||
<td> </td>
|
||||
<td id="inner">
|
||||
<table class="header">
|
||||
<tr>
|
||||
<td>
|
||||
<div class="salutation">Bonjour {{ user_display_name }},</div>
|
||||
<div class="summarytext">{{ summary_text }}</div>
|
||||
</td>
|
||||
<td class="logo">
|
||||
{% if app_name == "Riot" %}
|
||||
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
|
||||
{% elif app_name == "Vector" %}
|
||||
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
|
||||
{% else %}
|
||||
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
|
||||
{% endif %}
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
{% for room in rooms %}
|
||||
{% include 'room.html' with context %}
|
||||
{% endfor %}
|
||||
<div class="footer">
|
||||
<a href="{{ unsubscribe_link }}">Se désinscrire</a>
|
||||
<br/>
|
||||
<br/>
|
||||
<div class="debug">
|
||||
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
|
||||
an event was received at {{ reason.received_at|format_ts("%c") }}
|
||||
which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
|
||||
{% if reason.last_sent_ts %}
|
||||
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
|
||||
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
|
||||
{% else %}
|
||||
and we don't have a last time we sent a mail for this room.
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td> </td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
@@ -0,0 +1,10 @@
|
||||
Bonjour {{ user_display_name }},
|
||||
|
||||
{{ summary_text }}
|
||||
|
||||
{% for room in rooms %}
|
||||
{% include 'room.txt' with context %}
|
||||
{% endfor %}
|
||||
|
||||
Vous pouvez désactiver ces notifications en cliquant ici {{ unsubscribe_link }}
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
<table class="room">
|
||||
<tr class="room_header">
|
||||
<td class="room_avatar">
|
||||
{% if room.avatar_url %}
|
||||
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
|
||||
{% else %}
|
||||
{% if room.hash % 3 == 0 %}
|
||||
<img alt="" src="https://vector.im/beta/img/76cfa6.png" />
|
||||
{% elif room.hash % 3 == 1 %}
|
||||
<img alt="" src="https://vector.im/beta/img/50e2c2.png" />
|
||||
{% else %}
|
||||
<img alt="" src="https://vector.im/beta/img/f4c371.png" />
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</td>
|
||||
<td class="room_name" colspan="2">
|
||||
{{ room.title }}
|
||||
</td>
|
||||
</tr>
|
||||
{% if room.invite %}
|
||||
<tr>
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{ room.link }}">Rejoindre la conversation.</a>
|
||||
</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.html' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</table>
|
||||
@@ -0,0 +1,9 @@
|
||||
{{ room.title }}
|
||||
|
||||
{% if room.invite %}
|
||||
Vous avez été invité, rejoignez la conversation en cliquant sur le lien suivant {{ room.link }}
|
||||
{% else %}
|
||||
{% for notif in room.notifs %}
|
||||
{% include 'notif.txt' with context %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -154,15 +154,10 @@ def request_json(method, origin_name, origin_key, destination, path, content):
|
||||
s = requests.Session()
|
||||
s.mount("matrix://", MatrixConnectionAdapter())
|
||||
|
||||
headers = {"Host": destination, "Authorization": authorization_headers[0]}
|
||||
|
||||
if method == "POST":
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
result = s.request(
|
||||
method=method,
|
||||
url=dest,
|
||||
headers=headers,
|
||||
headers={"Host": destination, "Authorization": authorization_headers[0]},
|
||||
verify=False,
|
||||
data=content,
|
||||
)
|
||||
@@ -208,7 +203,7 @@ def main():
|
||||
parser.add_argument(
|
||||
"-X",
|
||||
"--method",
|
||||
help="HTTP method to use for the request. Defaults to GET if --body is"
|
||||
help="HTTP method to use for the request. Defaults to GET if --data is"
|
||||
"unspecified, POST if it is.",
|
||||
)
|
||||
|
||||
|
||||
Executable
+39
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI;
|
||||
use DBD::SQLite;
|
||||
use JSON;
|
||||
use Getopt::Long;
|
||||
|
||||
my $db; # = "homeserver.db";
|
||||
my $server = "http://localhost:8008";
|
||||
my $size = 320;
|
||||
|
||||
GetOptions("db|d=s", \$db,
|
||||
"server|s=s", \$server,
|
||||
"width|w=i", \$size) or usage();
|
||||
|
||||
usage() unless $db;
|
||||
|
||||
my $dbh = DBI->connect("dbi:SQLite:dbname=$db","","") || die $DBI::errstr;
|
||||
|
||||
my $res = $dbh->selectall_arrayref("select token, name from access_tokens, users where access_tokens.user_id = users.id group by user_id") || die $DBI::errstr;
|
||||
|
||||
foreach (@$res) {
|
||||
my ($token, $mxid) = ($_->[0], $_->[1]);
|
||||
my ($user_id) = ($mxid =~ m/@(.*):/);
|
||||
my ($url) = $dbh->selectrow_array("select avatar_url from profiles where user_id=?", undef, $user_id);
|
||||
if (!$url || $url =~ /#auto$/) {
|
||||
`curl -s -o tmp.png "$server/_matrix/media/v1/identicon?name=${mxid}&width=$size&height=$size"`;
|
||||
my $json = `curl -s -X POST -H "Content-Type: image/png" -T "tmp.png" $server/_matrix/media/v1/upload?access_token=$token`;
|
||||
my $content_uri = from_json($json)->{content_uri};
|
||||
`curl -X PUT -H "Content-Type: application/json" --data '{ "avatar_url": "${content_uri}#auto"}' $server/_matrix/client/api/v1/profile/${mxid}/avatar_url?access_token=$token`;
|
||||
}
|
||||
}
|
||||
|
||||
sub usage {
|
||||
die "usage: ./make-identicons.pl\n\t-d database [e.g. homeserver.db]\n\t-s homeserver (default: http://localhost:8008)\n\t-w identicon size in pixels (default 320)";
|
||||
}
|
||||
+7
-34
@@ -3,15 +3,13 @@
|
||||
import argparse
|
||||
import getpass
|
||||
import sys
|
||||
import unicodedata
|
||||
|
||||
import bcrypt
|
||||
import yaml
|
||||
|
||||
bcrypt_rounds = 12
|
||||
bcrypt_rounds=12
|
||||
password_pepper = ""
|
||||
|
||||
|
||||
def prompt_for_pass():
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
@@ -25,27 +23,19 @@ def prompt_for_pass():
|
||||
|
||||
return password
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description=(
|
||||
"Calculate the hash of a new password, so that passwords can be reset"
|
||||
)
|
||||
)
|
||||
description="Calculate the hash of a new password, so that passwords"
|
||||
" can be reset")
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--password",
|
||||
"-p", "--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
"-c", "--config",
|
||||
type=argparse.FileType('r'),
|
||||
help=(
|
||||
"Path to server config file. "
|
||||
"Used to read in bcrypt_rounds and password_pepper."
|
||||
),
|
||||
help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
@@ -59,21 +49,4 @@ if __name__ == "__main__":
|
||||
if not password:
|
||||
password = prompt_for_pass()
|
||||
|
||||
# On Python 2, make sure we decode it to Unicode before we normalise it
|
||||
if isinstance(password, bytes):
|
||||
try:
|
||||
password = password.decode(sys.stdin.encoding)
|
||||
except UnicodeDecodeError:
|
||||
print(
|
||||
"ERROR! Your password is not decodable using your terminal encoding (%s)."
|
||||
% (sys.stdin.encoding,)
|
||||
)
|
||||
|
||||
pw = unicodedata.normalize("NFKC", password)
|
||||
|
||||
hashed = bcrypt.hashpw(
|
||||
pw.encode('utf8') + password_pepper.encode("utf8"),
|
||||
bcrypt.gensalt(bcrypt_rounds),
|
||||
).decode('ascii')
|
||||
|
||||
print(hashed)
|
||||
print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
|
||||
|
||||
+1
-1
@@ -27,4 +27,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "0.33.9"
|
||||
__version__ = "0.33.8"
|
||||
|
||||
+16
-29
@@ -189,6 +189,7 @@ class Auth(object):
|
||||
# Can optionally look elsewhere in the request (e.g. headers)
|
||||
try:
|
||||
user_id, app_service = yield self._get_appservice_user_id(request)
|
||||
|
||||
if user_id:
|
||||
request.authenticated_entity = user_id
|
||||
defer.returnValue(
|
||||
@@ -238,39 +239,40 @@ class Auth(object):
|
||||
errcode=Codes.MISSING_TOKEN
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_appservice_user_id(self, request):
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
)
|
||||
|
||||
if app_service is None:
|
||||
defer.returnValue((None, None))
|
||||
return(None, None)
|
||||
|
||||
if app_service.ip_range_whitelist:
|
||||
ip_address = IPAddress(self.hs.get_ip_from_request(request))
|
||||
if ip_address not in app_service.ip_range_whitelist:
|
||||
defer.returnValue((None, None))
|
||||
return(None, None)
|
||||
|
||||
if b"user_id" not in request.args:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
return(app_service.sender, app_service)
|
||||
|
||||
user_id = request.args[b"user_id"][0].decode('utf8')
|
||||
if app_service.sender == user_id:
|
||||
defer.returnValue((app_service.sender, app_service))
|
||||
return(app_service.sender, app_service)
|
||||
|
||||
if not app_service.is_interested_in_user(user_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"Application service cannot masquerade as this user."
|
||||
)
|
||||
if not (yield self.store.get_user_by_id(user_id)):
|
||||
raise AuthError(
|
||||
403,
|
||||
"Application service has not registered this user"
|
||||
)
|
||||
defer.returnValue((user_id, app_service))
|
||||
# Let ASes manipulate nonexistent users (e.g. to shadow-register them)
|
||||
# if not (yield self.store.get_user_by_id(user_id)):
|
||||
# raise AuthError(
|
||||
# 403,
|
||||
# "Application service has not registered this user"
|
||||
# )
|
||||
return(user_id, app_service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_access_token(self, token, rights="access"):
|
||||
@@ -514,24 +516,9 @@ class Auth(object):
|
||||
defer.returnValue(user_info)
|
||||
|
||||
def get_appservice_by_req(self, request):
|
||||
try:
|
||||
token = self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token.")
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
)
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
except KeyError:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token."
|
||||
)
|
||||
(user_id, app_service) = self._get_appservice_user_id(request)
|
||||
request.authenticated_entity = app_service.sender
|
||||
return app_service
|
||||
|
||||
def is_server_admin(self, user):
|
||||
""" Check if the given user is a local server admin.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -51,7 +51,6 @@ class LoginType(object):
|
||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||
MSISDN = u"m.login.msisdn"
|
||||
RECAPTCHA = u"m.login.recaptcha"
|
||||
TERMS = u"m.login.terms"
|
||||
DUMMY = u"m.login.dummy"
|
||||
|
||||
# Only for C/S API v1
|
||||
@@ -62,7 +61,6 @@ class LoginType(object):
|
||||
class EventTypes(object):
|
||||
Member = "m.room.member"
|
||||
Create = "m.room.create"
|
||||
Tombstone = "m.room.tombstone"
|
||||
JoinRules = "m.room.join_rules"
|
||||
PowerLevels = "m.room.power_levels"
|
||||
Aliases = "m.room.aliases"
|
||||
@@ -73,6 +71,7 @@ class EventTypes(object):
|
||||
CanonicalAlias = "m.room.canonical_alias"
|
||||
RoomAvatar = "m.room.avatar"
|
||||
GuestAccess = "m.room.guest_access"
|
||||
Encryption = "m.room.encryption"
|
||||
|
||||
# These are used for validation
|
||||
Message = "m.room.message"
|
||||
@@ -103,7 +102,6 @@ class ThirdPartyEntityKind(object):
|
||||
class RoomVersions(object):
|
||||
V1 = "1"
|
||||
VDH_TEST = "vdh-test-version"
|
||||
STATE_V2_TEST = "state-v2-test"
|
||||
|
||||
|
||||
# the version we will give rooms which are created on this server
|
||||
@@ -111,11 +109,7 @@ DEFAULT_ROOM_VERSION = RoomVersions.V1
|
||||
|
||||
# vdh-test-version is a placeholder to get room versioning support working and tested
|
||||
# until we have a working v2.
|
||||
KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V1,
|
||||
RoomVersions.VDH_TEST,
|
||||
RoomVersions.STATE_V2_TEST,
|
||||
}
|
||||
KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
|
||||
|
||||
ServerNoticeMsgType = "m.server_notice"
|
||||
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
|
||||
|
||||
@@ -28,6 +28,7 @@ FEDERATION_PREFIX = "/_matrix/federation/v1"
|
||||
STATIC_PREFIX = "/_matrix/static"
|
||||
WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
|
||||
@@ -37,6 +37,7 @@ from synapse.api.urls import (
|
||||
FEDERATION_PREFIX,
|
||||
LEGACY_MEDIA_PREFIX,
|
||||
MEDIA_PREFIX,
|
||||
SERVER_KEY_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
STATIC_PREFIX,
|
||||
WEB_CLIENT_PREFIX,
|
||||
@@ -58,6 +59,7 @@ from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirem
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.server import HomeServer
|
||||
@@ -234,7 +236,10 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
|
||||
resources.update({
|
||||
SERVER_KEY_PREFIX: LocalKey(self),
|
||||
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
||||
})
|
||||
|
||||
if name == "webclient":
|
||||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||
|
||||
@@ -226,15 +226,7 @@ class SynchrotronPresence(object):
|
||||
class SynchrotronTyping(object):
|
||||
def __init__(self, hs):
|
||||
self._latest_room_serial = 0
|
||||
self._reset()
|
||||
|
||||
def _reset(self):
|
||||
"""
|
||||
Reset the typing handler's data caches.
|
||||
"""
|
||||
# map room IDs to serial numbers
|
||||
self._room_serials = {}
|
||||
# map room IDs to sets of users currently typing
|
||||
self._room_typing = {}
|
||||
|
||||
def stream_positions(self):
|
||||
@@ -244,12 +236,6 @@ class SynchrotronTyping(object):
|
||||
return {"typing": self._latest_room_serial}
|
||||
|
||||
def process_replication_rows(self, token, rows):
|
||||
if self._latest_room_serial > token:
|
||||
# The master has gone backwards. To prevent inconsistent data, just
|
||||
# clear everything.
|
||||
self._reset()
|
||||
|
||||
# Set the latest serial token to whatever the server gave us.
|
||||
self._latest_room_serial = token
|
||||
|
||||
for row in rows:
|
||||
|
||||
@@ -265,7 +265,7 @@ class ApplicationService(object):
|
||||
def is_exclusive_room(self, room_id):
|
||||
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
|
||||
|
||||
def get_exlusive_user_regexes(self):
|
||||
def get_exclusive_user_regexes(self):
|
||||
"""Get the list of regexes used to determine if a user is exclusively
|
||||
registered by the AS
|
||||
"""
|
||||
|
||||
@@ -42,14 +42,6 @@ DEFAULT_CONFIG = """\
|
||||
# until the user consents to the privacy policy. The value of the setting is
|
||||
# used as the text of the error.
|
||||
#
|
||||
# 'require_at_registration', if enabled, will add a step to the registration
|
||||
# process, similar to how captcha works. Users will be required to accept the
|
||||
# policy before their account is created.
|
||||
#
|
||||
# 'policy_name' is the display name of the policy users will see when registering
|
||||
# for an account. Has no effect unless `require_at_registration` is enabled.
|
||||
# Defaults to "Privacy Policy".
|
||||
#
|
||||
# user_consent:
|
||||
# template_dir: res/templates/privacy
|
||||
# version: 1.0
|
||||
@@ -62,8 +54,6 @@ DEFAULT_CONFIG = """\
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# require_at_registration: False
|
||||
# policy_name: Privacy Policy
|
||||
#
|
||||
"""
|
||||
|
||||
@@ -77,8 +67,6 @@ class ConsentConfig(Config):
|
||||
self.user_consent_server_notice_content = None
|
||||
self.user_consent_server_notice_to_guests = False
|
||||
self.block_events_without_consent_error = None
|
||||
self.user_consent_at_registration = False
|
||||
self.user_consent_policy_name = "Privacy Policy"
|
||||
|
||||
def read_config(self, config):
|
||||
consent_config = config.get("user_consent")
|
||||
@@ -95,12 +83,6 @@ class ConsentConfig(Config):
|
||||
self.user_consent_server_notice_to_guests = bool(consent_config.get(
|
||||
"send_server_notice_to_guests", False,
|
||||
))
|
||||
self.user_consent_at_registration = bool(consent_config.get(
|
||||
"require_at_registration", False,
|
||||
))
|
||||
self.user_consent_policy_name = consent_config.get(
|
||||
"policy_name", "Privacy Policy",
|
||||
)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return DEFAULT_CONFIG
|
||||
|
||||
@@ -50,7 +50,6 @@ handlers:
|
||||
maxBytes: 104857600
|
||||
backupCount: 10
|
||||
filters: [context]
|
||||
encoding: utf8
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
|
||||
@@ -33,7 +33,15 @@ class RegistrationConfig(Config):
|
||||
|
||||
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
|
||||
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
|
||||
self.check_is_for_allowed_local_3pids = config.get(
|
||||
"check_is_for_allowed_local_3pids", None
|
||||
)
|
||||
self.allow_invited_3pids = config.get("allow_invited_3pids", False)
|
||||
|
||||
self.disable_3pid_changes = config.get("disable_3pid_changes", False)
|
||||
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
self.register_mxid_from_3pid = config.get("register_mxid_from_3pid")
|
||||
|
||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
||||
@@ -49,6 +57,17 @@ class RegistrationConfig(Config):
|
||||
raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,))
|
||||
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
|
||||
|
||||
self.disable_set_displayname = config.get("disable_set_displayname", False)
|
||||
self.disable_set_avatar_url = config.get("disable_set_avatar_url", False)
|
||||
|
||||
self.replicate_user_profiles_to = config.get("replicate_user_profiles_to", [])
|
||||
if not isinstance(self.replicate_user_profiles_to, list):
|
||||
self.replicate_user_profiles_to = [self.replicate_user_profiles_to, ]
|
||||
|
||||
self.shadow_server = config.get("shadow_server", None)
|
||||
self.rewrite_identity_server_urls = config.get("rewrite_identity_server_urls", {})
|
||||
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
registration_shared_secret = random_string_with_symbols(50)
|
||||
|
||||
@@ -64,9 +83,26 @@ class RegistrationConfig(Config):
|
||||
# - email
|
||||
# - msisdn
|
||||
|
||||
# Derive the user's matrix ID from a type of 3PID used when registering.
|
||||
# This overrides any matrix ID the user proposes when calling /register
|
||||
# The 3PID type should be present in registrations_require_3pid to avoid
|
||||
# users failing to register if they don't specify the right kind of 3pid.
|
||||
#
|
||||
# register_mxid_from_3pid: email
|
||||
|
||||
# Mandate that users are only allowed to associate certain formats of
|
||||
# 3PIDs with accounts on this server.
|
||||
#
|
||||
# Use an Identity Server to establish which 3PIDs are allowed to register?
|
||||
# Overrides allowed_local_3pids below.
|
||||
# check_is_for_allowed_local_3pids: matrix.org
|
||||
#
|
||||
# If you are using an IS you can also check whether that IS registers
|
||||
# pending invites for the given 3PID (and then allow it to sign up on
|
||||
# the platform):
|
||||
#
|
||||
# allow_invited_3pids: False
|
||||
#
|
||||
# allowed_local_3pids:
|
||||
# - medium: email
|
||||
# pattern: ".*@matrix\\.org"
|
||||
@@ -75,6 +111,11 @@ class RegistrationConfig(Config):
|
||||
# - medium: msisdn
|
||||
# pattern: "\\+44"
|
||||
|
||||
# If true, stop users from trying to change the 3PIDs associated with
|
||||
# their accounts.
|
||||
#
|
||||
# disable_3pid_changes: False
|
||||
|
||||
# If set, allows registration by anyone who also has the shared
|
||||
# secret, even if registration is otherwise disabled.
|
||||
registration_shared_secret: "%(registration_shared_secret)s"
|
||||
@@ -98,6 +139,28 @@ class RegistrationConfig(Config):
|
||||
- vector.im
|
||||
- riot.im
|
||||
|
||||
# If enabled, user IDs, display names and avatar URLs will be replicated
|
||||
# to this server whenever they change.
|
||||
# This is an experimental API currently implemented by sydent to support
|
||||
# cross-homeserver user directories.
|
||||
# replicate_user_profiles_to: example.com
|
||||
|
||||
# If specified, attempt to replay registrations, profile changes & 3pid
|
||||
# bindings on the given target homeserver via the AS API. The HS is authed
|
||||
# via a given AS token.
|
||||
# shadow_server:
|
||||
# hs_url: https://shadow.example.com
|
||||
# hs: shadow.example.com
|
||||
# as_token: 12u394refgbdhivsia
|
||||
|
||||
# If enabled, don't let users set their own display names/avatars
|
||||
# other than for the very first time (unless they are a server admin).
|
||||
# Useful when provisioning users based on the contents of a 3rd party
|
||||
# directory and to avoid ambiguities.
|
||||
#
|
||||
# disable_set_displayname: False
|
||||
# disable_set_avatar_url: False
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#auto_join_rooms:
|
||||
|
||||
@@ -23,11 +23,15 @@ class UserDirectoryConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.user_directory_search_all_users = False
|
||||
self.user_directory_defer_to_id_server = None
|
||||
user_directory_config = config.get("user_directory", None)
|
||||
if user_directory_config:
|
||||
self.user_directory_search_all_users = (
|
||||
user_directory_config.get("search_all_users", False)
|
||||
)
|
||||
self.user_directory_defer_to_id_server = (
|
||||
user_directory_config.get("defer_to_id_server", None)
|
||||
)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
@@ -41,4 +45,9 @@ class UserDirectoryConfig(Config):
|
||||
#
|
||||
#user_directory:
|
||||
# search_all_users: false
|
||||
#
|
||||
# If this is set, user search will be delegated to this ID server instead
|
||||
# of synapse performing the search itself.
|
||||
# This is an experimental API.
|
||||
# defer_to_id_server: https://id.example.com
|
||||
"""
|
||||
|
||||
@@ -15,8 +15,6 @@
|
||||
|
||||
import logging
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
@@ -30,15 +28,15 @@ from synapse.util import logcontext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
KEY_API_V2 = "/_matrix/key/v2/server/%s"
|
||||
KEY_API_V1 = b"/_matrix/key/v1/"
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def fetch_server_key(server_name, tls_client_options_factory, key_id):
|
||||
def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
|
||||
"""Fetch the keys for a remote server."""
|
||||
|
||||
factory = SynapseKeyClientFactory()
|
||||
factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), )
|
||||
factory.path = path
|
||||
factory.host = server_name
|
||||
endpoint = matrix_federation_endpoint(
|
||||
reactor, server_name, tls_client_options_factory, timeout=30
|
||||
|
||||
+103
-7
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017, 2018 New Vector Ltd.
|
||||
# Copyright 2017 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -18,6 +18,8 @@ import hashlib
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
from signedjson.key import (
|
||||
decode_verify_key_bytes,
|
||||
encode_verify_key_base64,
|
||||
@@ -393,13 +395,32 @@ class Keyring(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_server(self, server_name_and_key_ids):
|
||||
@defer.inlineCallbacks
|
||||
def get_key(server_name, key_ids):
|
||||
keys = None
|
||||
try:
|
||||
keys = yield self.get_server_verify_key_v2_direct(
|
||||
server_name, key_ids
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Unable to get key %r for %r directly: %s %s",
|
||||
key_ids, server_name,
|
||||
type(e).__name__, str(e),
|
||||
)
|
||||
|
||||
if not keys:
|
||||
keys = yield self.get_server_verify_key_v1_direct(
|
||||
server_name, key_ids
|
||||
)
|
||||
|
||||
keys = {server_name: keys}
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self.get_server_verify_key_v2_direct,
|
||||
server_name,
|
||||
key_ids,
|
||||
)
|
||||
run_in_background(get_key, server_name, key_ids)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
@@ -504,7 +525,10 @@ class Keyring(object):
|
||||
continue
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_client_options_factory, requested_key_id
|
||||
server_name, self.hs.tls_client_options_factory,
|
||||
path=("/_matrix/key/v2/server/%s" % (
|
||||
urllib.parse.quote(requested_key_id),
|
||||
)).encode("ascii"),
|
||||
)
|
||||
|
||||
if (u"signatures" not in response
|
||||
@@ -633,6 +657,78 @@ class Keyring(object):
|
||||
|
||||
defer.returnValue(results)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_server_verify_key_v1_direct(self, server_name, key_ids):
|
||||
"""Finds a verification key for the server with one of the key ids.
|
||||
Args:
|
||||
server_name (str): The name of the server to fetch a key for.
|
||||
keys_ids (list of str): The key_ids to check for.
|
||||
"""
|
||||
|
||||
# Try to fetch the key from the remote server.
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_client_options_factory
|
||||
)
|
||||
|
||||
# Check the response.
|
||||
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, tls_certificate
|
||||
)
|
||||
|
||||
if ("signatures" not in response
|
||||
or server_name not in response["signatures"]):
|
||||
raise KeyLookupError("Key response not signed by remote server")
|
||||
|
||||
if "tls_certificate" not in response:
|
||||
raise KeyLookupError("Key response missing TLS certificate")
|
||||
|
||||
tls_certificate_b64 = response["tls_certificate"]
|
||||
|
||||
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
|
||||
raise KeyLookupError("TLS certificate doesn't match")
|
||||
|
||||
# Cache the result in the datastore.
|
||||
|
||||
time_now_ms = self.clock.time_msec()
|
||||
|
||||
verify_keys = {}
|
||||
for key_id, key_base64 in response["verify_keys"].items():
|
||||
if is_signing_algorithm_supported(key_id):
|
||||
key_bytes = decode_base64(key_base64)
|
||||
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
||||
verify_key.time_added = time_now_ms
|
||||
verify_keys[key_id] = verify_key
|
||||
|
||||
for key_id in response["signatures"][server_name]:
|
||||
if key_id not in response["verify_keys"]:
|
||||
raise KeyLookupError(
|
||||
"Key response must include verification keys for all"
|
||||
" signatures"
|
||||
)
|
||||
if key_id in verify_keys:
|
||||
verify_signed_json(
|
||||
response,
|
||||
server_name,
|
||||
verify_keys[key_id]
|
||||
)
|
||||
|
||||
yield self.store.store_server_certificate(
|
||||
server_name,
|
||||
server_name,
|
||||
time_now_ms,
|
||||
tls_certificate,
|
||||
)
|
||||
|
||||
yield self.store_keys(
|
||||
server_name=server_name,
|
||||
from_server=server_name,
|
||||
verify_keys=verify_keys,
|
||||
)
|
||||
|
||||
defer.returnValue(verify_keys)
|
||||
|
||||
def store_keys(self, server_name, from_server, verify_keys):
|
||||
"""Store a collection of verify keys for a given server
|
||||
Args:
|
||||
|
||||
@@ -200,11 +200,11 @@ def _is_membership_change_allowed(event, auth_events):
|
||||
membership = event.content["membership"]
|
||||
|
||||
# Check if this is the room creator joining:
|
||||
if len(event.prev_event_ids()) == 1 and Membership.JOIN == membership:
|
||||
if len(event.prev_events) == 1 and Membership.JOIN == membership:
|
||||
# Get room creation event:
|
||||
key = (EventTypes.Create, "", )
|
||||
create = auth_events.get(key)
|
||||
if create and event.prev_event_ids()[0] == create.event_id:
|
||||
if create and event.prev_events[0][0] == create.event_id:
|
||||
if create.content["creator"] == event.state_key:
|
||||
return
|
||||
|
||||
|
||||
@@ -159,24 +159,6 @@ class EventBase(object):
|
||||
def keys(self):
|
||||
return six.iterkeys(self._event_dict)
|
||||
|
||||
def prev_event_ids(self):
|
||||
"""Returns the list of prev event IDs. The order matches the order
|
||||
specified in the event, though there is no meaning to it.
|
||||
|
||||
Returns:
|
||||
list[str]: The list of event IDs of this event's prev_events
|
||||
"""
|
||||
return [e for e, _ in self.prev_events]
|
||||
|
||||
def auth_event_ids(self):
|
||||
"""Returns the list of auth event IDs. The order matches the order
|
||||
specified in the event, though there is no meaning to it.
|
||||
|
||||
Returns:
|
||||
list[str]: The list of event IDs of this event's auth_events
|
||||
"""
|
||||
return [e for e, _ in self.auth_events]
|
||||
|
||||
|
||||
class FrozenEvent(EventBase):
|
||||
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
|
||||
|
||||
@@ -162,30 +162,8 @@ class FederationServer(FederationBase):
|
||||
p["age_ts"] = request_time - int(p["age"])
|
||||
del p["age"]
|
||||
|
||||
# We try and pull out an event ID so that if later checks fail we
|
||||
# can log something sensible. We don't mandate an event ID here in
|
||||
# case future event formats get rid of the key.
|
||||
possible_event_id = p.get("event_id", "<Unknown>")
|
||||
|
||||
# Now we get the room ID so that we can check that we know the
|
||||
# version of the room.
|
||||
room_id = p.get("room_id")
|
||||
if not room_id:
|
||||
logger.info(
|
||||
"Ignoring PDU as does not have a room_id. Event ID: %s",
|
||||
possible_event_id,
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
# In future we will actually use the room version to parse the
|
||||
# PDU into an event.
|
||||
yield self.store.get_room_version(room_id)
|
||||
except NotFoundError:
|
||||
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
||||
continue
|
||||
|
||||
event = event_from_pdu_json(p)
|
||||
room_id = event.room_id
|
||||
pdus_by_room.setdefault(room_id, []).append(event)
|
||||
|
||||
pdu_results = {}
|
||||
@@ -345,6 +323,11 @@ class FederationServer(FederationBase):
|
||||
else:
|
||||
defer.returnValue((404, ""))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_pull_request(self, origin, versions):
|
||||
raise NotImplementedError("Pull transactions not implemented")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_request(self, query_type, args):
|
||||
received_queries_counter.labels(query_type).inc()
|
||||
|
||||
@@ -183,7 +183,9 @@ class TransactionQueue(object):
|
||||
# banned then it won't receive the event because it won't
|
||||
# be in the room after the ban.
|
||||
destinations = yield self.state.get_current_hosts_in_room(
|
||||
event.room_id, latest_event_ids=event.prev_event_ids(),
|
||||
event.room_id, latest_event_ids=[
|
||||
prev_id for prev_id, _ in event.prev_events
|
||||
],
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
|
||||
@@ -362,6 +362,14 @@ class FederationSendServlet(BaseFederationServlet):
|
||||
defer.returnValue((code, response))
|
||||
|
||||
|
||||
class FederationPullServlet(BaseFederationServlet):
|
||||
PATH = "/pull/"
|
||||
|
||||
# This is for when someone asks us for everything since version X
|
||||
def on_GET(self, origin, content, query):
|
||||
return self.handler.on_pull_request(query["origin"][0], query["v"])
|
||||
|
||||
|
||||
class FederationEventServlet(BaseFederationServlet):
|
||||
PATH = "/event/(?P<event_id>[^/]*)/"
|
||||
|
||||
@@ -1253,6 +1261,7 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
|
||||
|
||||
FEDERATION_SERVLET_CLASSES = (
|
||||
FederationSendServlet,
|
||||
FederationPullServlet,
|
||||
FederationEventServlet,
|
||||
FederationStateServlet,
|
||||
FederationStateIdsServlet,
|
||||
|
||||
@@ -117,6 +117,9 @@ class Transaction(JsonEncodedObject):
|
||||
"Require 'transaction_id' to construct a Transaction"
|
||||
)
|
||||
|
||||
for p in pdus:
|
||||
p.transaction_id = kwargs["transaction_id"]
|
||||
|
||||
kwargs["pdus"] = [p.get_pdu_json() for p in pdus]
|
||||
|
||||
return Transaction(**kwargs)
|
||||
|
||||
@@ -59,7 +59,6 @@ class AuthHandler(BaseHandler):
|
||||
LoginType.EMAIL_IDENTITY: self._check_email_identity,
|
||||
LoginType.MSISDN: self._check_msisdn,
|
||||
LoginType.DUMMY: self._check_dummy_auth,
|
||||
LoginType.TERMS: self._check_terms_auth,
|
||||
}
|
||||
self.bcrypt_rounds = hs.config.bcrypt_rounds
|
||||
|
||||
@@ -432,9 +431,6 @@ class AuthHandler(BaseHandler):
|
||||
def _check_dummy_auth(self, authdict, _):
|
||||
return defer.succeed(True)
|
||||
|
||||
def _check_terms_auth(self, authdict, _):
|
||||
return defer.succeed(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_threepid(self, medium, authdict):
|
||||
if 'threepid_creds' not in authdict:
|
||||
@@ -466,22 +462,6 @@ class AuthHandler(BaseHandler):
|
||||
def _get_params_recaptcha(self):
|
||||
return {"public_key": self.hs.config.recaptcha_public_key}
|
||||
|
||||
def _get_params_terms(self):
|
||||
return {
|
||||
"policies": {
|
||||
"privacy_policy": {
|
||||
"version": self.hs.config.user_consent_version,
|
||||
"en": {
|
||||
"name": self.hs.config.user_consent_policy_name,
|
||||
"url": "%s/_matrix/consent?v=%s" % (
|
||||
self.hs.config.public_baseurl,
|
||||
self.hs.config.user_consent_version,
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def _auth_dict_for_flows(self, flows, session):
|
||||
public_flows = []
|
||||
for f in flows:
|
||||
@@ -489,7 +469,6 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
get_params = {
|
||||
LoginType.RECAPTCHA: self._get_params_recaptcha,
|
||||
LoginType.TERMS: self._get_params_terms,
|
||||
}
|
||||
|
||||
params = {}
|
||||
|
||||
@@ -33,6 +33,7 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._room_member_handler = hs.get_room_member_handler()
|
||||
self._identity_handler = hs.get_handlers().identity_handler
|
||||
self._profile_handler = hs.get_profile_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
# Flag that indicates whether the process to part users from rooms is running
|
||||
@@ -94,6 +95,9 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
|
||||
yield self.store.user_set_password_hash(user_id, None)
|
||||
|
||||
user = UserID.from_string(user_id)
|
||||
yield self._profile_handler.set_active(user, False, False)
|
||||
|
||||
# Add the user to a table of users pending deactivation (ie.
|
||||
# removal from all the rooms they're a member of)
|
||||
yield self.store.add_user_pending_deactivation(user_id)
|
||||
|
||||
@@ -138,30 +138,9 @@ class DirectoryHandler(BaseHandler):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_association(self, requester, room_alias, send_event=True):
|
||||
"""Remove an alias from the directory
|
||||
def delete_association(self, requester, room_alias):
|
||||
# association deletion for human users
|
||||
|
||||
(this is only meant for human users; AS users should call
|
||||
delete_appservice_association)
|
||||
|
||||
Args:
|
||||
requester (Requester):
|
||||
room_alias (RoomAlias):
|
||||
send_event (bool): Whether to send an updated m.room.aliases event.
|
||||
Note that, if we delete the canonical alias, we will always attempt
|
||||
to send an m.room.canonical_alias event
|
||||
|
||||
Returns:
|
||||
Deferred[unicode]: room id that the alias used to point to
|
||||
|
||||
Raises:
|
||||
NotFoundError: if the alias doesn't exist
|
||||
|
||||
AuthError: if the user doesn't have perms to delete the alias (ie, the user
|
||||
is neither the creator of the alias, nor a server admin.
|
||||
|
||||
SynapseError: if the alias belongs to an AS
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
try:
|
||||
@@ -189,11 +168,10 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id = yield self._delete_association(room_alias)
|
||||
|
||||
try:
|
||||
if send_event:
|
||||
yield self.send_room_alias_update_event(
|
||||
requester,
|
||||
room_id
|
||||
)
|
||||
yield self.send_room_alias_update_event(
|
||||
requester,
|
||||
room_id
|
||||
)
|
||||
|
||||
yield self._update_canonical_alias(
|
||||
requester,
|
||||
|
||||
@@ -19,7 +19,7 @@ from six import iteritems
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import NotFoundError, RoomKeysVersionError, StoreError
|
||||
from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -55,8 +55,6 @@ class E2eRoomKeysHandler(object):
|
||||
room_id(string): room ID to get keys for, for None to get keys for all rooms
|
||||
session_id(string): session ID to get keys for, for None to get keys for all
|
||||
sessions
|
||||
Raises:
|
||||
NotFoundError: if the backup version does not exist
|
||||
Returns:
|
||||
A deferred list of dicts giving the session_data and message metadata for
|
||||
these room keys.
|
||||
@@ -65,19 +63,13 @@ class E2eRoomKeysHandler(object):
|
||||
# we deliberately take the lock to get keys so that changing the version
|
||||
# works atomically
|
||||
with (yield self._upload_linearizer.queue(user_id)):
|
||||
# make sure the backup version exists
|
||||
try:
|
||||
yield self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Unknown backup version")
|
||||
else:
|
||||
raise
|
||||
|
||||
results = yield self.store.get_e2e_room_keys(
|
||||
user_id, version, room_id, session_id
|
||||
)
|
||||
|
||||
if results['rooms'] == {}:
|
||||
raise SynapseError(404, "No room_keys found")
|
||||
|
||||
defer.returnValue(results)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -128,7 +120,7 @@ class E2eRoomKeysHandler(object):
|
||||
}
|
||||
|
||||
Raises:
|
||||
NotFoundError: if there are no versions defined
|
||||
SynapseError: with code 404 if there are no versions defined
|
||||
RoomKeysVersionError: if the uploaded version is not the current version
|
||||
"""
|
||||
|
||||
@@ -142,7 +134,7 @@ class E2eRoomKeysHandler(object):
|
||||
version_info = yield self.store.get_e2e_room_keys_version_info(user_id)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Version '%s' not found" % (version,))
|
||||
raise SynapseError(404, "Version '%s' not found" % (version,))
|
||||
else:
|
||||
raise
|
||||
|
||||
@@ -156,7 +148,7 @@ class E2eRoomKeysHandler(object):
|
||||
raise RoomKeysVersionError(current_version=version_info['version'])
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Version '%s' not found" % (version,))
|
||||
raise SynapseError(404, "Version '%s' not found" % (version,))
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
+113
-76
@@ -202,22 +202,27 @@ class FederationHandler(BaseHandler):
|
||||
self.room_queues[room_id].append((pdu, origin))
|
||||
return
|
||||
|
||||
# If we're not in the room just ditch the event entirely. This is
|
||||
# probably an old server that has come back and thinks we're still in
|
||||
# the room (or we've been rejoined to the room by a state reset).
|
||||
# If we're no longer in the room just ditch the event entirely. This
|
||||
# is probably an old server that has come back and thinks we're still
|
||||
# in the room (or we've been rejoined to the room by a state reset).
|
||||
#
|
||||
# Note that if we were never in the room then we would have already
|
||||
# dropped the event, since we wouldn't know the room version.
|
||||
# If we were never in the room then maybe our database got vaped and
|
||||
# we should check if we *are* in fact in the room. If we are then we
|
||||
# can magically rejoin the room.
|
||||
is_in_room = yield self.auth.check_host_in_room(
|
||||
room_id,
|
||||
self.server_name
|
||||
)
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"[%s %s] Ignoring PDU from %s as we're not in the room",
|
||||
room_id, event_id, origin,
|
||||
was_in_room = yield self.store.was_host_joined(
|
||||
pdu.room_id, self.server_name,
|
||||
)
|
||||
defer.returnValue(None)
|
||||
if was_in_room:
|
||||
logger.info(
|
||||
"[%s %s] Ignoring PDU from %s as we've left the room",
|
||||
room_id, event_id, origin,
|
||||
)
|
||||
defer.returnValue(None)
|
||||
|
||||
state = None
|
||||
auth_chain = []
|
||||
@@ -234,7 +239,7 @@ class FederationHandler(BaseHandler):
|
||||
room_id, event_id, min_depth,
|
||||
)
|
||||
|
||||
prevs = set(pdu.prev_event_ids())
|
||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||
seen = yield self.store.have_seen_events(prevs)
|
||||
|
||||
if min_depth and pdu.depth < min_depth:
|
||||
@@ -552,54 +557,86 @@ class FederationHandler(BaseHandler):
|
||||
room_id, event_id, event,
|
||||
)
|
||||
|
||||
event_ids = set()
|
||||
if state:
|
||||
event_ids |= {e.event_id for e in state}
|
||||
if auth_chain:
|
||||
event_ids |= {e.event_id for e in auth_chain}
|
||||
|
||||
seen_ids = yield self.store.have_seen_events(event_ids)
|
||||
|
||||
if state and auth_chain is not None:
|
||||
# If we have any state or auth_chain given to us by the replication
|
||||
# layer, then we should handle them (if we haven't before.)
|
||||
|
||||
event_infos = []
|
||||
|
||||
for e in itertools.chain(auth_chain, state):
|
||||
if e.event_id in seen_ids:
|
||||
continue
|
||||
e.internal_metadata.outlier = True
|
||||
auth_ids = e.auth_event_ids()
|
||||
auth = {
|
||||
(e.type, e.state_key): e for e in auth_chain
|
||||
if e.event_id in auth_ids or e.type == EventTypes.Create
|
||||
}
|
||||
event_infos.append({
|
||||
"event": e,
|
||||
"auth_events": auth,
|
||||
})
|
||||
seen_ids.add(e.event_id)
|
||||
# FIXME (erikj): Awful hack to make the case where we are not currently
|
||||
# in the room work
|
||||
# If state and auth_chain are None, then we don't need to do this check
|
||||
# as we already know we have enough state in the DB to handle this
|
||||
# event.
|
||||
if state and auth_chain and not event.internal_metadata.is_outlier():
|
||||
is_in_room = yield self.auth.check_host_in_room(
|
||||
room_id,
|
||||
self.server_name
|
||||
)
|
||||
else:
|
||||
is_in_room = True
|
||||
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"[%s %s] persisting newly-received auth/state events %s",
|
||||
room_id, event_id, [e["event"].event_id for e in event_infos]
|
||||
"[%s %s] Got event for room we're not in",
|
||||
room_id, event_id,
|
||||
)
|
||||
yield self._handle_new_events(origin, event_infos)
|
||||
|
||||
try:
|
||||
context = yield self._handle_new_event(
|
||||
origin,
|
||||
event,
|
||||
state=state,
|
||||
)
|
||||
except AuthError as e:
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
e.code,
|
||||
e.msg,
|
||||
affected=event.event_id,
|
||||
)
|
||||
try:
|
||||
yield self._persist_auth_tree(
|
||||
origin, auth_chain, state, event
|
||||
)
|
||||
except AuthError as e:
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
e.code,
|
||||
e.msg,
|
||||
affected=event_id,
|
||||
)
|
||||
|
||||
else:
|
||||
event_ids = set()
|
||||
if state:
|
||||
event_ids |= {e.event_id for e in state}
|
||||
if auth_chain:
|
||||
event_ids |= {e.event_id for e in auth_chain}
|
||||
|
||||
seen_ids = yield self.store.have_seen_events(event_ids)
|
||||
|
||||
if state and auth_chain is not None:
|
||||
# If we have any state or auth_chain given to us by the replication
|
||||
# layer, then we should handle them (if we haven't before.)
|
||||
|
||||
event_infos = []
|
||||
|
||||
for e in itertools.chain(auth_chain, state):
|
||||
if e.event_id in seen_ids:
|
||||
continue
|
||||
e.internal_metadata.outlier = True
|
||||
auth_ids = [e_id for e_id, _ in e.auth_events]
|
||||
auth = {
|
||||
(e.type, e.state_key): e for e in auth_chain
|
||||
if e.event_id in auth_ids or e.type == EventTypes.Create
|
||||
}
|
||||
event_infos.append({
|
||||
"event": e,
|
||||
"auth_events": auth,
|
||||
})
|
||||
seen_ids.add(e.event_id)
|
||||
|
||||
logger.info(
|
||||
"[%s %s] persisting newly-received auth/state events %s",
|
||||
room_id, event_id, [e["event"].event_id for e in event_infos]
|
||||
)
|
||||
yield self._handle_new_events(origin, event_infos)
|
||||
|
||||
try:
|
||||
context = yield self._handle_new_event(
|
||||
origin,
|
||||
event,
|
||||
state=state,
|
||||
)
|
||||
except AuthError as e:
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
e.code,
|
||||
e.msg,
|
||||
affected=event.event_id,
|
||||
)
|
||||
|
||||
room = yield self.store.get_room(room_id)
|
||||
|
||||
@@ -689,7 +726,7 @@ class FederationHandler(BaseHandler):
|
||||
edges = [
|
||||
ev.event_id
|
||||
for ev in events
|
||||
if set(ev.prev_event_ids()) - event_ids
|
||||
if set(e_id for e_id, _ in ev.prev_events) - event_ids
|
||||
]
|
||||
|
||||
logger.info(
|
||||
@@ -716,7 +753,7 @@ class FederationHandler(BaseHandler):
|
||||
required_auth = set(
|
||||
a_id
|
||||
for event in events + list(state_events.values()) + list(auth_events.values())
|
||||
for a_id in event.auth_event_ids()
|
||||
for a_id, _ in event.auth_events
|
||||
)
|
||||
auth_events.update({
|
||||
e_id: event_map[e_id] for e_id in required_auth if e_id in event_map
|
||||
@@ -732,7 +769,7 @@ class FederationHandler(BaseHandler):
|
||||
auth_events.update(ret_events)
|
||||
|
||||
required_auth.update(
|
||||
a_id for event in ret_events.values() for a_id in event.auth_event_ids()
|
||||
a_id for event in ret_events.values() for a_id, _ in event.auth_events
|
||||
)
|
||||
missing_auth = required_auth - set(auth_events)
|
||||
|
||||
@@ -759,7 +796,7 @@ class FederationHandler(BaseHandler):
|
||||
required_auth.update(
|
||||
a_id
|
||||
for event in results if event
|
||||
for a_id in event.auth_event_ids()
|
||||
for a_id, _ in event.auth_events
|
||||
)
|
||||
missing_auth = required_auth - set(auth_events)
|
||||
|
||||
@@ -779,7 +816,7 @@ class FederationHandler(BaseHandler):
|
||||
"auth_events": {
|
||||
(auth_events[a_id].type, auth_events[a_id].state_key):
|
||||
auth_events[a_id]
|
||||
for a_id in a.auth_event_ids()
|
||||
for a_id, _ in a.auth_events
|
||||
if a_id in auth_events
|
||||
}
|
||||
})
|
||||
@@ -791,7 +828,7 @@ class FederationHandler(BaseHandler):
|
||||
"auth_events": {
|
||||
(auth_events[a_id].type, auth_events[a_id].state_key):
|
||||
auth_events[a_id]
|
||||
for a_id in event_map[e_id].auth_event_ids()
|
||||
for a_id, _ in event_map[e_id].auth_events
|
||||
if a_id in auth_events
|
||||
}
|
||||
})
|
||||
@@ -1004,17 +1041,17 @@ class FederationHandler(BaseHandler):
|
||||
Raises:
|
||||
SynapseError if the event does not pass muster
|
||||
"""
|
||||
if len(ev.prev_event_ids()) > 20:
|
||||
if len(ev.prev_events) > 20:
|
||||
logger.warn("Rejecting event %s which has %i prev_events",
|
||||
ev.event_id, len(ev.prev_event_ids()))
|
||||
ev.event_id, len(ev.prev_events))
|
||||
raise SynapseError(
|
||||
http_client.BAD_REQUEST,
|
||||
"Too many prev_events",
|
||||
)
|
||||
|
||||
if len(ev.auth_event_ids()) > 10:
|
||||
if len(ev.auth_events) > 10:
|
||||
logger.warn("Rejecting event %s which has %i auth_events",
|
||||
ev.event_id, len(ev.auth_event_ids()))
|
||||
ev.event_id, len(ev.auth_events))
|
||||
raise SynapseError(
|
||||
http_client.BAD_REQUEST,
|
||||
"Too many auth_events",
|
||||
@@ -1039,7 +1076,7 @@ class FederationHandler(BaseHandler):
|
||||
def on_event_auth(self, event_id):
|
||||
event = yield self.store.get_event(event_id)
|
||||
auth = yield self.store.get_auth_chain(
|
||||
[auth_id for auth_id in event.auth_event_ids()],
|
||||
[auth_id for auth_id, _ in event.auth_events],
|
||||
include_given=True
|
||||
)
|
||||
defer.returnValue([e for e in auth])
|
||||
@@ -1661,7 +1698,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
missing_auth_events = set()
|
||||
for e in itertools.chain(auth_events, state, [event]):
|
||||
for e_id in e.auth_event_ids():
|
||||
for e_id, _ in e.auth_events:
|
||||
if e_id not in event_map:
|
||||
missing_auth_events.add(e_id)
|
||||
|
||||
@@ -1680,7 +1717,7 @@ class FederationHandler(BaseHandler):
|
||||
for e in itertools.chain(auth_events, state, [event]):
|
||||
auth_for_e = {
|
||||
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
|
||||
for e_id in e.auth_event_ids()
|
||||
for e_id, _ in e.auth_events
|
||||
if e_id in event_map
|
||||
}
|
||||
if create_event:
|
||||
@@ -1748,10 +1785,10 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# This is a hack to fix some old rooms where the initial join event
|
||||
# didn't reference the create event in its auth events.
|
||||
if event.type == EventTypes.Member and not event.auth_event_ids():
|
||||
if len(event.prev_event_ids()) == 1 and event.depth < 5:
|
||||
if event.type == EventTypes.Member and not event.auth_events:
|
||||
if len(event.prev_events) == 1 and event.depth < 5:
|
||||
c = yield self.store.get_event(
|
||||
event.prev_event_ids()[0],
|
||||
event.prev_events[0][0],
|
||||
allow_none=True,
|
||||
)
|
||||
if c and c.type == EventTypes.Create:
|
||||
@@ -1798,7 +1835,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# Now get the current auth_chain for the event.
|
||||
local_auth_chain = yield self.store.get_auth_chain(
|
||||
[auth_id for auth_id in event.auth_event_ids()],
|
||||
[auth_id for auth_id, _ in event.auth_events],
|
||||
include_given=True
|
||||
)
|
||||
|
||||
@@ -1854,7 +1891,7 @@ class FederationHandler(BaseHandler):
|
||||
"""
|
||||
# Check if we have all the auth events.
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
event_auth_events = set(event.auth_event_ids())
|
||||
event_auth_events = set(e_id for e_id, _ in event.auth_events)
|
||||
|
||||
if event.is_state():
|
||||
event_key = (event.type, event.state_key)
|
||||
@@ -1898,7 +1935,7 @@ class FederationHandler(BaseHandler):
|
||||
continue
|
||||
|
||||
try:
|
||||
auth_ids = e.auth_event_ids()
|
||||
auth_ids = [e_id for e_id, _ in e.auth_events]
|
||||
auth = {
|
||||
(e.type, e.state_key): e for e in remote_auth_chain
|
||||
if e.event_id in auth_ids or e.type == EventTypes.Create
|
||||
@@ -1919,7 +1956,7 @@ class FederationHandler(BaseHandler):
|
||||
pass
|
||||
|
||||
have_events = yield self.store.get_seen_events_with_rejections(
|
||||
event.auth_event_ids()
|
||||
[e_id for e_id, _ in event.auth_events]
|
||||
)
|
||||
seen_events = set(have_events.keys())
|
||||
except Exception:
|
||||
@@ -2021,7 +2058,7 @@ class FederationHandler(BaseHandler):
|
||||
continue
|
||||
|
||||
try:
|
||||
auth_ids = ev.auth_event_ids()
|
||||
auth_ids = [e_id for e_id, _ in ev.auth_events]
|
||||
auth = {
|
||||
(e.type, e.state_key): e
|
||||
for e in result["auth_chain"]
|
||||
@@ -2213,7 +2250,7 @@ class FederationHandler(BaseHandler):
|
||||
missing_remote_ids = [e.event_id for e in missing_remotes]
|
||||
base_remote_rejected = list(missing_remotes)
|
||||
for e in missing_remotes:
|
||||
for e_id in e.auth_event_ids():
|
||||
for e_id, _ in e.auth_events:
|
||||
if e_id in missing_remote_ids:
|
||||
try:
|
||||
base_remote_rejected.remove(e)
|
||||
|
||||
@@ -47,6 +47,7 @@ class IdentityHandler(BaseHandler):
|
||||
self.trust_any_id_server_just_for_testing_do_not_use = (
|
||||
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
|
||||
)
|
||||
self.rewrite_identity_server_urls = hs.config.rewrite_identity_server_urls
|
||||
|
||||
def _should_trust_id_server(self, id_server):
|
||||
if id_server not in self.trusted_id_servers:
|
||||
@@ -84,7 +85,10 @@ class IdentityHandler(BaseHandler):
|
||||
'credentials', id_server
|
||||
)
|
||||
defer.returnValue(None)
|
||||
|
||||
# if we have a rewrite rule set for the identity server,
|
||||
# apply it now.
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
id_server = self.rewrite_identity_server_urls[id_server]
|
||||
try:
|
||||
data = yield self.http_client.get_json(
|
||||
"https://%s%s" % (
|
||||
@@ -119,7 +123,10 @@ class IdentityHandler(BaseHandler):
|
||||
client_secret = creds['clientSecret']
|
||||
else:
|
||||
raise SynapseError(400, "No client_secret in creds")
|
||||
|
||||
# if we have a rewrite rule set for the identity server,
|
||||
# apply it now.
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
id_server = self.rewrite_identity_server_urls[id_server]
|
||||
try:
|
||||
data = yield self.http_client.post_urlencoded_get_json(
|
||||
"https://%s%s" % (
|
||||
@@ -162,7 +169,6 @@ class IdentityHandler(BaseHandler):
|
||||
# deletion request to.
|
||||
id_server = next(iter(self.trusted_id_servers))
|
||||
|
||||
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
|
||||
content = {
|
||||
"mxid": mxid,
|
||||
"threepid": threepid,
|
||||
@@ -179,6 +185,15 @@ class IdentityHandler(BaseHandler):
|
||||
content=content,
|
||||
destination_is=id_server,
|
||||
)
|
||||
# if we have a rewrite rule set for the identity server,
|
||||
# apply it now.
|
||||
#
|
||||
# Note that destination_is has to be the real id_server, not
|
||||
# the server we connect to.
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
id_server = self.rewrite_identity_server_urls[id_server]
|
||||
|
||||
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
|
||||
try:
|
||||
yield self.http_client.post_json_get_json(
|
||||
url,
|
||||
@@ -210,7 +225,10 @@ class IdentityHandler(BaseHandler):
|
||||
'send_attempt': send_attempt,
|
||||
}
|
||||
params.update(kwargs)
|
||||
|
||||
# if we have a rewrite rule set for the identity server,
|
||||
# apply it now.
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
id_server = self.rewrite_identity_server_urls[id_server]
|
||||
try:
|
||||
data = yield self.http_client.post_json_get_json(
|
||||
"https://%s%s" % (
|
||||
@@ -242,7 +260,10 @@ class IdentityHandler(BaseHandler):
|
||||
'send_attempt': send_attempt,
|
||||
}
|
||||
params.update(kwargs)
|
||||
|
||||
# if we have a rewrite rule set for the identity server,
|
||||
# apply it now.
|
||||
if id_server in self.rewrite_identity_server_urls:
|
||||
id_server = self.rewrite_identity_server_urls[id_server]
|
||||
try:
|
||||
data = yield self.http_client.post_json_get_json(
|
||||
"https://%s%s" % (
|
||||
|
||||
@@ -427,9 +427,6 @@ class EventCreationHandler(object):
|
||||
|
||||
if event.is_state():
|
||||
prev_state = yield self.deduplicate_state_event(event, context)
|
||||
logger.info(
|
||||
"Not bothering to persist duplicate state event %s", event.event_id,
|
||||
)
|
||||
if prev_state is not None:
|
||||
defer.returnValue(prev_state)
|
||||
|
||||
|
||||
+144
-7
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,7 +16,9 @@
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
@@ -26,6 +29,7 @@ from synapse.api.errors import (
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
from synapse.util.logcontext import run_in_background
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -40,6 +44,8 @@ class BaseProfileHandler(BaseHandler):
|
||||
subclass MasterProfileHandler
|
||||
"""
|
||||
|
||||
PROFILE_REPLICATE_INTERVAL = 2 * 60 * 1000
|
||||
|
||||
def __init__(self, hs):
|
||||
super(BaseProfileHandler, self).__init__(hs)
|
||||
|
||||
@@ -50,6 +56,84 @@ class BaseProfileHandler(BaseHandler):
|
||||
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
if hs.config.worker_app is None:
|
||||
self.clock.looping_call(
|
||||
self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
|
||||
)
|
||||
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
reactor.callWhenRunning(self._assign_profile_replication_batches)
|
||||
reactor.callWhenRunning(self._replicate_profiles)
|
||||
# Add a looping call to replicate_profiles: this handles retries
|
||||
# if the replication is unsuccessful when the user updated their
|
||||
# profile.
|
||||
self.clock.looping_call(
|
||||
self._replicate_profiles, self.PROFILE_REPLICATE_INTERVAL
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _assign_profile_replication_batches(self):
|
||||
"""If no profile replication has been done yet, allocate replication batch
|
||||
numbers to each profile to start the replication process.
|
||||
"""
|
||||
logger.info("Assigning profile batch numbers...")
|
||||
total = 0
|
||||
while True:
|
||||
assigned = yield self.store.assign_profile_batch()
|
||||
total += assigned
|
||||
if assigned == 0:
|
||||
break
|
||||
logger.info("Assigned %d profile batch numbers", total)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _replicate_profiles(self):
|
||||
"""If any profile data has been updated and not pushed to the replication targets,
|
||||
replicate it.
|
||||
"""
|
||||
host_batches = yield self.store.get_replication_hosts()
|
||||
latest_batch = yield self.store.get_latest_profile_replication_batch_number()
|
||||
if latest_batch is None:
|
||||
latest_batch = -1
|
||||
for repl_host in self.hs.config.replicate_user_profiles_to:
|
||||
if repl_host not in host_batches:
|
||||
host_batches[repl_host] = -1
|
||||
try:
|
||||
for i in xrange(host_batches[repl_host] + 1, latest_batch + 1):
|
||||
yield self._replicate_host_profile_batch(repl_host, i)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Exception while replicating to %s: aborting for now", repl_host,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _replicate_host_profile_batch(self, host, batchnum):
|
||||
logger.info("Replicating profile batch %d to %s", batchnum, host)
|
||||
batch_rows = yield self.store.get_profile_batch(batchnum)
|
||||
batch = {
|
||||
UserID(r["user_id"], self.hs.hostname).to_string(): ({
|
||||
"display_name": r["displayname"],
|
||||
"avatar_url": r["avatar_url"],
|
||||
} if r["active"] else None) for r in batch_rows
|
||||
}
|
||||
|
||||
url = "https://%s/_matrix/identity/api/v1/replicate_profiles" % (host,)
|
||||
body = {
|
||||
"batchnum": batchnum,
|
||||
"batch": batch,
|
||||
"origin_server": self.hs.hostname,
|
||||
}
|
||||
signed_body = sign_json(body, self.hs.hostname, self.hs.config.signing_key[0])
|
||||
try:
|
||||
yield self.http_client.post_json_get_json(url, signed_body)
|
||||
yield self.store.update_replication_batch_for_host(host, batchnum)
|
||||
logger.info("Sucessfully replicated profile batch %d to %s", batchnum, host)
|
||||
except Exception:
|
||||
# This will get retried when the looping call next comes around
|
||||
logger.exception("Failed to replicate profile batch %d to %s", batchnum, host)
|
||||
raise
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_profile(self, user_id):
|
||||
target_user = UserID.from_string(user_id)
|
||||
@@ -147,19 +231,30 @@ class BaseProfileHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
|
||||
"""target_user is the user whose displayname is to be changed;
|
||||
auth_user is the user attempting to make this change."""
|
||||
"""target_user is the UserID whose displayname is to be changed;
|
||||
requester is the authenticated user attempting to make this change."""
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(400, "User is not hosted on this Home Server")
|
||||
|
||||
if not by_admin and target_user != requester.user:
|
||||
if not by_admin and requester and target_user != requester.user:
|
||||
raise AuthError(400, "Cannot set another user's displayname")
|
||||
|
||||
if not by_admin and self.hs.config.disable_set_displayname:
|
||||
profile = yield self.store.get_profileinfo(target_user.localpart)
|
||||
if profile.display_name:
|
||||
raise SynapseError(400, "Changing displayname is disabled on this server")
|
||||
|
||||
if new_displayname == '':
|
||||
new_displayname = None
|
||||
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
|
||||
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
|
||||
else:
|
||||
new_batchnum = None
|
||||
|
||||
yield self.store.set_profile_displayname(
|
||||
target_user.localpart, new_displayname
|
||||
target_user.localpart, new_displayname, new_batchnum
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
@@ -168,7 +263,35 @@ class BaseProfileHandler(BaseHandler):
|
||||
target_user.to_string(), profile
|
||||
)
|
||||
|
||||
yield self._update_join_states(requester, target_user)
|
||||
if requester:
|
||||
yield self._update_join_states(requester, target_user)
|
||||
|
||||
# start a profile replication push
|
||||
run_in_background(self._replicate_profiles)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_active(self, target_user, active, hide):
|
||||
"""
|
||||
Sets the 'active' flag on a user profile. If set to false, the user account is
|
||||
considered deactivated or hidden.
|
||||
If 'hide' is true, then we interpret active=False as a request to try to hide the
|
||||
user rather than deactivating it. This means withholding the profile from replication
|
||||
(and mark it as inactive) rather than clearing the profile from the HS DB.
|
||||
Note that unlike set_displayname and set_avatar_url, this does *not* perform
|
||||
authorization checks! This is because the only place it's used currently is
|
||||
in account deactivation where we've already done these checks anyway.
|
||||
"""
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
|
||||
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
|
||||
else:
|
||||
new_batchnum = None
|
||||
yield self.store.set_profile_active(
|
||||
target_user.localpart, active, hide, new_batchnum
|
||||
)
|
||||
|
||||
# start a profile replication push
|
||||
run_in_background(self._replicate_profiles)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_avatar_url(self, target_user):
|
||||
@@ -210,8 +333,19 @@ class BaseProfileHandler(BaseHandler):
|
||||
if not by_admin and target_user != requester.user:
|
||||
raise AuthError(400, "Cannot set another user's avatar_url")
|
||||
|
||||
if not by_admin and self.hs.config.disable_set_avatar_url:
|
||||
profile = yield self.store.get_profileinfo(target_user.localpart)
|
||||
if profile.avatar_url:
|
||||
raise SynapseError(400, "Changing avatar url is disabled on this server")
|
||||
|
||||
if len(self.hs.config.replicate_user_profiles_to) > 0:
|
||||
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
|
||||
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
|
||||
else:
|
||||
new_batchnum = None
|
||||
|
||||
yield self.store.set_profile_avatar_url(
|
||||
target_user.localpart, new_avatar_url
|
||||
target_user.localpart, new_avatar_url, new_batchnum,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
@@ -222,6 +356,9 @@ class BaseProfileHandler(BaseHandler):
|
||||
|
||||
yield self._update_join_states(requester, target_user)
|
||||
|
||||
# start a profile replication push
|
||||
run_in_background(self._replicate_profiles)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_profile_query(self, args):
|
||||
user = UserID.from_string(args["user_id"])
|
||||
|
||||
@@ -50,7 +50,9 @@ class RegistrationHandler(BaseHandler):
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
self.room_creation_handler = self.hs.get_room_creation_handler()
|
||||
self.captcha_client = CaptchaServerHttpClient(hs)
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
self._next_generated_user_id = None
|
||||
|
||||
@@ -124,6 +126,7 @@ class RegistrationHandler(BaseHandler):
|
||||
generate_token=True,
|
||||
guest_access_token=None,
|
||||
make_guest=False,
|
||||
display_name=None,
|
||||
admin=False,
|
||||
threepid=None,
|
||||
):
|
||||
@@ -140,6 +143,7 @@ class RegistrationHandler(BaseHandler):
|
||||
since it offers no means of associating a device_id with the
|
||||
access_token. Instead you should call auth_handler.issue_access_token
|
||||
after registration.
|
||||
display_name (str): The displayname to set for this user, if any
|
||||
Returns:
|
||||
A tuple of (user_id, access_token).
|
||||
Raises:
|
||||
@@ -178,13 +182,20 @@ class RegistrationHandler(BaseHandler):
|
||||
password_hash=password_hash,
|
||||
was_guest=was_guest,
|
||||
make_guest=make_guest,
|
||||
create_profile_with_localpart=(
|
||||
# If the user was a guest then they already have a profile
|
||||
None if was_guest else user.localpart
|
||||
),
|
||||
admin=admin,
|
||||
)
|
||||
|
||||
if display_name is None:
|
||||
display_name = (
|
||||
# If the user was a guest then they already have a profile
|
||||
None if was_guest else user.localpart
|
||||
)
|
||||
|
||||
if display_name:
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, display_name, by_admin=True,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
profile = yield self.store.get_profileinfo(localpart)
|
||||
yield self.user_directory_handler.handle_local_profile_change(
|
||||
@@ -209,8 +220,12 @@ class RegistrationHandler(BaseHandler):
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
make_guest=make_guest,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, user.localpart, by_admin=True,
|
||||
)
|
||||
|
||||
except SynapseError:
|
||||
# if user id is taken, just generate another
|
||||
user = None
|
||||
@@ -240,10 +255,7 @@ class RegistrationHandler(BaseHandler):
|
||||
else:
|
||||
# create room expects the localpart of the room alias
|
||||
room_alias_localpart = room_alias.localpart
|
||||
|
||||
# getting the RoomCreationHandler during init gives a dependency
|
||||
# loop
|
||||
yield self.hs.get_room_creation_handler().create_room(
|
||||
yield self.room_creation_handler.create_room(
|
||||
fake_requester,
|
||||
config={
|
||||
"preset": "public_chat",
|
||||
@@ -256,10 +268,15 @@ class RegistrationHandler(BaseHandler):
|
||||
except Exception as e:
|
||||
logger.error("Failed to join new user to %r: %r", r, e)
|
||||
|
||||
# We used to generate default identicons here, but nowadays
|
||||
# we want clients to generate their own as part of their branding
|
||||
# rather than there being consistent matrix-wide ones, so we don't.
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def appservice_register(self, user_localpart, as_token):
|
||||
def appservice_register(self, user_localpart, as_token, password, display_name):
|
||||
# FIXME: this should be factored out and merged with normal register()
|
||||
|
||||
user = UserID(user_localpart, self.hs.hostname)
|
||||
user_id = user.to_string()
|
||||
service = self.store.get_app_service_by_token(as_token)
|
||||
@@ -277,12 +294,26 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id, allowed_appservice=service
|
||||
)
|
||||
|
||||
password_hash = ""
|
||||
if password:
|
||||
password_hash = yield self.auth_handler().hash(password)
|
||||
|
||||
yield self.store.register(
|
||||
user_id=user_id,
|
||||
password_hash="",
|
||||
password_hash=password_hash,
|
||||
appservice_id=service_id,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, display_name or user.localpart, by_admin=True,
|
||||
)
|
||||
|
||||
if self.hs.config.user_directory_search_all_users:
|
||||
profile = yield self.store.get_profileinfo(user_localpart)
|
||||
yield self.user_directory_handler.handle_local_profile_change(
|
||||
user_id, profile
|
||||
)
|
||||
|
||||
defer.returnValue(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -329,7 +360,10 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=None,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, user.localpart, by_admin=True,
|
||||
)
|
||||
except Exception as e:
|
||||
yield self.store.add_access_token_to_user(user_id, token)
|
||||
@@ -360,7 +394,9 @@ class RegistrationHandler(BaseHandler):
|
||||
logger.info("got threepid with medium '%s' and address '%s'",
|
||||
threepid['medium'], threepid['address'])
|
||||
|
||||
if not check_3pid_allowed(self.hs, threepid['medium'], threepid['address']):
|
||||
if not (
|
||||
yield check_3pid_allowed(self.hs, threepid['medium'], threepid['address'])
|
||||
):
|
||||
raise RegistrationError(
|
||||
403, "Third party identifier is not allowed"
|
||||
)
|
||||
@@ -402,6 +438,39 @@ class RegistrationHandler(BaseHandler):
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def shadow_register(self, localpart, display_name, auth_result, params):
|
||||
"""Invokes the current registration on another server, using
|
||||
shared secret registration, passing in any auth_results from
|
||||
other registration UI auth flows (e.g. validated 3pids)
|
||||
Useful for setting up shadow/backup accounts on a parallel deployment.
|
||||
"""
|
||||
|
||||
# TODO: retries
|
||||
shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
|
||||
as_token = self.hs.config.shadow_server.get("as_token")
|
||||
|
||||
yield self.http_client.post_json_get_json(
|
||||
"%s/_matrix/client/r0/register?access_token=%s" % (
|
||||
shadow_hs_url, as_token,
|
||||
),
|
||||
{
|
||||
# XXX: auth_result is an unspecified extension for shadow registration
|
||||
'auth_result': auth_result,
|
||||
# XXX: another unspecified extension for shadow registration to ensure
|
||||
# that the displayname is correctly set by the masters erver
|
||||
'display_name': display_name,
|
||||
'username': localpart,
|
||||
'password': params.get("password"),
|
||||
'bind_email': params.get("bind_email"),
|
||||
'bind_msisdn': params.get("bind_msisdn"),
|
||||
'device_id': params.get("device_id"),
|
||||
'initial_device_display_name': params.get("initial_device_display_name"),
|
||||
'inhibit_login': False,
|
||||
'access_token': as_token,
|
||||
}
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_user_id(self, reseed=False):
|
||||
if reseed or self._next_generated_user_id is None:
|
||||
@@ -488,18 +557,15 @@ class RegistrationHandler(BaseHandler):
|
||||
user_id=user_id,
|
||||
token=token,
|
||||
password_hash=password_hash,
|
||||
create_profile_with_localpart=user.localpart,
|
||||
)
|
||||
if displayname is not None:
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, None, displayname, by_admin=True,
|
||||
)
|
||||
else:
|
||||
yield self._auth_handler.delete_access_tokens_for_user(user_id)
|
||||
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
|
||||
|
||||
if displayname is not None:
|
||||
logger.info("setting user display name: %s -> %s", user_id, displayname)
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, requester, displayname, by_admin=True,
|
||||
)
|
||||
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
def auth_handler(self):
|
||||
|
||||
+45
-365
@@ -21,7 +21,7 @@ import math
|
||||
import string
|
||||
from collections import OrderedDict
|
||||
|
||||
from six import iteritems, string_types
|
||||
from six import string_types
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -32,11 +32,10 @@ from synapse.api.constants import (
|
||||
JoinRules,
|
||||
RoomCreationPreset,
|
||||
)
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
|
||||
from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
@@ -54,12 +53,14 @@ class RoomCreationHandler(BaseHandler):
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": False,
|
||||
"guest_can_join": True,
|
||||
"encryption_alg": "m.megolm.v1.aes-sha2",
|
||||
},
|
||||
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
|
||||
"join_rules": JoinRules.INVITE,
|
||||
"history_visibility": "shared",
|
||||
"original_invitees_have_ops": True,
|
||||
"guest_can_join": True,
|
||||
"encryption_alg": "m.megolm.v1.aes-sha2",
|
||||
},
|
||||
RoomCreationPreset.PUBLIC_CHAT: {
|
||||
"join_rules": JoinRules.PUBLIC,
|
||||
@@ -74,334 +75,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
|
||||
# linearizer to stop two upgrades happening at once
|
||||
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upgrade_room(self, requester, old_room_id, new_version):
|
||||
"""Replace a room with a new room with a different version
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester): the user requesting the upgrade
|
||||
old_room_id (unicode): the id of the room to be replaced
|
||||
new_version (unicode): the new room version to use
|
||||
|
||||
Returns:
|
||||
Deferred[unicode]: the new room id
|
||||
"""
|
||||
yield self.ratelimit(requester)
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
with (yield self._upgrade_linearizer.queue(old_room_id)):
|
||||
# start by allocating a new room id
|
||||
r = yield self.store.get_room(old_room_id)
|
||||
if r is None:
|
||||
raise NotFoundError("Unknown room id %s" % (old_room_id,))
|
||||
new_room_id = yield self._generate_room_id(
|
||||
creator_id=user_id, is_public=r["is_public"],
|
||||
)
|
||||
|
||||
logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
|
||||
|
||||
# we create and auth the tombstone event before properly creating the new
|
||||
# room, to check our user has perms in the old room.
|
||||
tombstone_event, tombstone_context = (
|
||||
yield self.event_creation_handler.create_event(
|
||||
requester, {
|
||||
"type": EventTypes.Tombstone,
|
||||
"state_key": "",
|
||||
"room_id": old_room_id,
|
||||
"sender": user_id,
|
||||
"content": {
|
||||
"body": "This room has been replaced",
|
||||
"replacement_room": new_room_id,
|
||||
}
|
||||
},
|
||||
token_id=requester.access_token_id,
|
||||
)
|
||||
)
|
||||
yield self.auth.check_from_context(tombstone_event, tombstone_context)
|
||||
|
||||
yield self.clone_exiting_room(
|
||||
requester,
|
||||
old_room_id=old_room_id,
|
||||
new_room_id=new_room_id,
|
||||
new_room_version=new_version,
|
||||
tombstone_event_id=tombstone_event.event_id,
|
||||
)
|
||||
|
||||
# now send the tombstone
|
||||
yield self.event_creation_handler.send_nonmember_event(
|
||||
requester, tombstone_event, tombstone_context,
|
||||
)
|
||||
|
||||
old_room_state = yield tombstone_context.get_current_state_ids(self.store)
|
||||
|
||||
# update any aliases
|
||||
yield self._move_aliases_to_new_room(
|
||||
requester, old_room_id, new_room_id, old_room_state,
|
||||
)
|
||||
|
||||
# and finally, shut down the PLs in the old room, and update them in the new
|
||||
# room.
|
||||
yield self._update_upgraded_room_pls(
|
||||
requester, old_room_id, new_room_id, old_room_state,
|
||||
)
|
||||
|
||||
defer.returnValue(new_room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_upgraded_room_pls(
|
||||
self, requester, old_room_id, new_room_id, old_room_state,
|
||||
):
|
||||
"""Send updated power levels in both rooms after an upgrade
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester): the user requesting the upgrade
|
||||
old_room_id (unicode): the id of the room to be replaced
|
||||
new_room_id (unicode): the id of the replacement room
|
||||
old_room_state (dict[tuple[str, str], str]): the state map for the old room
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, ""))
|
||||
|
||||
if old_room_pl_event_id is None:
|
||||
logger.warning(
|
||||
"Not supported: upgrading a room with no PL event. Not setting PLs "
|
||||
"in old room.",
|
||||
)
|
||||
return
|
||||
|
||||
old_room_pl_state = yield self.store.get_event(old_room_pl_event_id)
|
||||
|
||||
# we try to stop regular users from speaking by setting the PL required
|
||||
# to send regular events and invites to 'Moderator' level. That's normally
|
||||
# 50, but if the default PL in a room is 50 or more, then we set the
|
||||
# required PL above that.
|
||||
|
||||
pl_content = dict(old_room_pl_state.content)
|
||||
users_default = int(pl_content.get("users_default", 0))
|
||||
restricted_level = max(users_default + 1, 50)
|
||||
|
||||
updated = False
|
||||
for v in ("invite", "events_default"):
|
||||
current = int(pl_content.get(v, 0))
|
||||
if current < restricted_level:
|
||||
logger.info(
|
||||
"Setting level for %s in %s to %i (was %i)",
|
||||
v, old_room_id, restricted_level, current,
|
||||
)
|
||||
pl_content[v] = restricted_level
|
||||
updated = True
|
||||
else:
|
||||
logger.info(
|
||||
"Not setting level for %s (already %i)",
|
||||
v, current,
|
||||
)
|
||||
|
||||
if updated:
|
||||
try:
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester, {
|
||||
"type": EventTypes.PowerLevels,
|
||||
"state_key": '',
|
||||
"room_id": old_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": pl_content,
|
||||
}, ratelimit=False,
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warning("Unable to update PLs in old room: %s", e)
|
||||
|
||||
logger.info("Setting correct PLs in new room")
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester, {
|
||||
"type": EventTypes.PowerLevels,
|
||||
"state_key": '',
|
||||
"room_id": new_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": old_room_pl_state.content,
|
||||
}, ratelimit=False,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def clone_exiting_room(
|
||||
self, requester, old_room_id, new_room_id, new_room_version,
|
||||
tombstone_event_id,
|
||||
):
|
||||
"""Populate a new room based on an old room
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester): the user requesting the upgrade
|
||||
old_room_id (unicode): the id of the room to be replaced
|
||||
new_room_id (unicode): the id to give the new room (should already have been
|
||||
created with _gemerate_room_id())
|
||||
new_room_version (unicode): the new room version to use
|
||||
tombstone_event_id (unicode|str): the ID of the tombstone event in the old
|
||||
room.
|
||||
Returns:
|
||||
Deferred[None]
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if not self.spam_checker.user_may_create_room(user_id):
|
||||
raise SynapseError(403, "You are not permitted to create rooms")
|
||||
|
||||
creation_content = {
|
||||
"room_version": new_room_version,
|
||||
"predecessor": {
|
||||
"room_id": old_room_id,
|
||||
"event_id": tombstone_event_id,
|
||||
}
|
||||
}
|
||||
|
||||
initial_state = dict()
|
||||
|
||||
types_to_copy = (
|
||||
(EventTypes.JoinRules, ""),
|
||||
(EventTypes.Name, ""),
|
||||
(EventTypes.Topic, ""),
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
(EventTypes.GuestAccess, ""),
|
||||
(EventTypes.RoomAvatar, ""),
|
||||
)
|
||||
|
||||
old_room_state_ids = yield self.store.get_filtered_current_state_ids(
|
||||
old_room_id, StateFilter.from_types(types_to_copy),
|
||||
)
|
||||
# map from event_id to BaseEvent
|
||||
old_room_state_events = yield self.store.get_events(old_room_state_ids.values())
|
||||
|
||||
for k, old_event_id in iteritems(old_room_state_ids):
|
||||
old_event = old_room_state_events.get(old_event_id)
|
||||
if old_event:
|
||||
initial_state[k] = old_event.content
|
||||
|
||||
yield self._send_events_for_new_room(
|
||||
requester,
|
||||
new_room_id,
|
||||
|
||||
# we expect to override all the presets with initial_state, so this is
|
||||
# somewhat arbitrary.
|
||||
preset_config=RoomCreationPreset.PRIVATE_CHAT,
|
||||
|
||||
invite_list=[],
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
)
|
||||
|
||||
# XXX invites/joins
|
||||
# XXX 3pid invites
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _move_aliases_to_new_room(
|
||||
self, requester, old_room_id, new_room_id, old_room_state,
|
||||
):
|
||||
directory_handler = self.hs.get_handlers().directory_handler
|
||||
|
||||
aliases = yield self.store.get_aliases_for_room(old_room_id)
|
||||
|
||||
# check to see if we have a canonical alias.
|
||||
canonical_alias = None
|
||||
canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
|
||||
if canonical_alias_event_id:
|
||||
canonical_alias_event = yield self.store.get_event(canonical_alias_event_id)
|
||||
if canonical_alias_event:
|
||||
canonical_alias = canonical_alias_event.content.get("alias", "")
|
||||
|
||||
# first we try to remove the aliases from the old room (we suppress sending
|
||||
# the room_aliases event until the end).
|
||||
#
|
||||
# Note that we'll only be able to remove aliases that (a) aren't owned by an AS,
|
||||
# and (b) unless the user is a server admin, which the user created.
|
||||
#
|
||||
# This is probably correct - given we don't allow such aliases to be deleted
|
||||
# normally, it would be odd to allow it in the case of doing a room upgrade -
|
||||
# but it makes the upgrade less effective, and you have to wonder why a room
|
||||
# admin can't remove aliases that point to that room anyway.
|
||||
# (cf https://github.com/matrix-org/synapse/issues/2360)
|
||||
#
|
||||
removed_aliases = []
|
||||
for alias_str in aliases:
|
||||
alias = RoomAlias.from_string(alias_str)
|
||||
try:
|
||||
yield directory_handler.delete_association(
|
||||
requester, alias, send_event=False,
|
||||
)
|
||||
removed_aliases.append(alias_str)
|
||||
except SynapseError as e:
|
||||
logger.warning(
|
||||
"Unable to remove alias %s from old room: %s",
|
||||
alias, e,
|
||||
)
|
||||
|
||||
# if we didn't find any aliases, or couldn't remove anyway, we can skip the rest
|
||||
# of this.
|
||||
if not removed_aliases:
|
||||
return
|
||||
|
||||
try:
|
||||
# this can fail if, for some reason, our user doesn't have perms to send
|
||||
# m.room.aliases events in the old room (note that we've already checked that
|
||||
# they have perms to send a tombstone event, so that's not terribly likely).
|
||||
#
|
||||
# If that happens, it's regrettable, but we should carry on: it's the same
|
||||
# as when you remove an alias from the directory normally - it just means that
|
||||
# the aliases event gets out of sync with the directory
|
||||
# (cf https://github.com/vector-im/riot-web/issues/2369)
|
||||
yield directory_handler.send_room_alias_update_event(
|
||||
requester, old_room_id,
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warning(
|
||||
"Failed to send updated alias event on old room: %s", e,
|
||||
)
|
||||
|
||||
# we can now add any aliases we successfully removed to the new room.
|
||||
for alias in removed_aliases:
|
||||
try:
|
||||
yield directory_handler.create_association(
|
||||
requester, RoomAlias.from_string(alias),
|
||||
new_room_id, servers=(self.hs.hostname, ),
|
||||
send_event=False,
|
||||
)
|
||||
logger.info("Moved alias %s to new room", alias)
|
||||
except SynapseError as e:
|
||||
# I'm not really expecting this to happen, but it could if the spam
|
||||
# checking module decides it shouldn't, or similar.
|
||||
logger.error(
|
||||
"Error adding alias %s to new room: %s",
|
||||
alias, e,
|
||||
)
|
||||
|
||||
try:
|
||||
if canonical_alias and (canonical_alias in removed_aliases):
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.CanonicalAlias,
|
||||
"state_key": "",
|
||||
"room_id": new_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": {"alias": canonical_alias, },
|
||||
},
|
||||
ratelimit=False
|
||||
)
|
||||
|
||||
yield directory_handler.send_room_alias_update_event(
|
||||
requester, new_room_id,
|
||||
)
|
||||
except SynapseError as e:
|
||||
# again I'm not really expecting this to fail, but if it does, I'd rather
|
||||
# we returned the new room to the client at this point.
|
||||
logger.error(
|
||||
"Unable to send updated alias events in new room: %s", e,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_room(self, requester, config, ratelimit=True,
|
||||
@@ -494,7 +167,28 @@ class RoomCreationHandler(BaseHandler):
|
||||
visibility = config.get("visibility", None)
|
||||
is_public = visibility == "public"
|
||||
|
||||
room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
|
||||
# autogen room IDs and try to create it. We may clash, so just
|
||||
# try a few times till one goes through, giving up eventually.
|
||||
attempts = 0
|
||||
room_id = None
|
||||
while attempts < 5:
|
||||
try:
|
||||
random_string = stringutils.random_string(18)
|
||||
gen_room_id = RoomID(
|
||||
random_string,
|
||||
self.hs.hostname,
|
||||
)
|
||||
yield self.store.store_room(
|
||||
room_id=gen_room_id.to_string(),
|
||||
room_creator_user_id=user_id,
|
||||
is_public=is_public
|
||||
)
|
||||
room_id = gen_room_id.to_string()
|
||||
break
|
||||
except StoreError:
|
||||
attempts += 1
|
||||
if not room_id:
|
||||
raise StoreError(500, "Couldn't generate a room ID.")
|
||||
|
||||
if room_alias:
|
||||
directory_handler = self.hs.get_handlers().directory_handler
|
||||
@@ -524,15 +218,18 @@ class RoomCreationHandler(BaseHandler):
|
||||
# override any attempt to set room versions via the creation_content
|
||||
creation_content["room_version"] = room_version
|
||||
|
||||
room_member_handler = self.hs.get_room_member_handler()
|
||||
|
||||
yield self._send_events_for_new_room(
|
||||
requester,
|
||||
room_id,
|
||||
room_member_handler,
|
||||
preset_config=preset_config,
|
||||
invite_list=invite_list,
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
room_alias=room_alias,
|
||||
power_level_content_override=config.get("power_level_content_override"),
|
||||
power_level_content_override=config.get("power_level_content_override", {}),
|
||||
creator_join_profile=creator_join_profile,
|
||||
)
|
||||
|
||||
@@ -568,7 +265,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
if is_direct:
|
||||
content["is_direct"] = is_direct
|
||||
|
||||
yield self.room_member_handler.update_membership(
|
||||
yield room_member_handler.update_membership(
|
||||
requester,
|
||||
UserID.from_string(invitee),
|
||||
room_id,
|
||||
@@ -606,13 +303,14 @@ class RoomCreationHandler(BaseHandler):
|
||||
self,
|
||||
creator, # A Requester object.
|
||||
room_id,
|
||||
room_member_handler,
|
||||
preset_config,
|
||||
invite_list,
|
||||
initial_state,
|
||||
creation_content,
|
||||
room_alias=None,
|
||||
power_level_content_override=None,
|
||||
creator_join_profile=None,
|
||||
room_alias,
|
||||
power_level_content_override,
|
||||
creator_join_profile,
|
||||
):
|
||||
def create(etype, content, **kwargs):
|
||||
e = {
|
||||
@@ -628,7 +326,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def send(etype, content, **kwargs):
|
||||
event = create(etype, content, **kwargs)
|
||||
logger.info("Sending %s in new room", etype)
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
creator,
|
||||
event,
|
||||
@@ -651,8 +348,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
content=creation_content,
|
||||
)
|
||||
|
||||
logger.info("Sending %s in new room", EventTypes.Member)
|
||||
yield self.room_member_handler.update_membership(
|
||||
yield room_member_handler.update_membership(
|
||||
creator,
|
||||
creator.user,
|
||||
room_id,
|
||||
@@ -694,8 +390,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
for invitee in invite_list:
|
||||
power_level_content["users"][invitee] = 100
|
||||
|
||||
if power_level_content_override:
|
||||
power_level_content.update(power_level_content_override)
|
||||
power_level_content.update(power_level_content_override)
|
||||
|
||||
yield send(
|
||||
etype=EventTypes.PowerLevels,
|
||||
@@ -734,29 +429,14 @@ class RoomCreationHandler(BaseHandler):
|
||||
content=content,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_room_id(self, creator_id, is_public):
|
||||
# autogen room IDs and try to create it. We may clash, so just
|
||||
# try a few times till one goes through, giving up eventually.
|
||||
attempts = 0
|
||||
while attempts < 5:
|
||||
try:
|
||||
random_string = stringutils.random_string(18)
|
||||
gen_room_id = RoomID(
|
||||
random_string,
|
||||
self.hs.hostname,
|
||||
).to_string()
|
||||
if isinstance(gen_room_id, bytes):
|
||||
gen_room_id = gen_room_id.decode('utf-8')
|
||||
yield self.store.store_room(
|
||||
room_id=gen_room_id,
|
||||
room_creator_user_id=creator_id,
|
||||
is_public=is_public,
|
||||
)
|
||||
defer.returnValue(gen_room_id)
|
||||
except StoreError:
|
||||
attempts += 1
|
||||
raise StoreError(500, "Couldn't generate a room ID.")
|
||||
if "encryption_alg" in config:
|
||||
send(
|
||||
etype=EventTypes.Encryption,
|
||||
state_key="",
|
||||
content={
|
||||
'algorithm': config["encryption_alg"],
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class RoomContextHandler(object):
|
||||
|
||||
+25
-19
@@ -928,7 +928,7 @@ class SyncHandler(object):
|
||||
res = yield self._generate_sync_entry_for_rooms(
|
||||
sync_result_builder, account_data_by_room
|
||||
)
|
||||
newly_joined_rooms, newly_joined_users, _, _ = res
|
||||
newly_joined_rooms, newly_joined_or_invited_users, _, _ = res
|
||||
_, _, newly_left_rooms, newly_left_users = res
|
||||
|
||||
block_all_presence_data = (
|
||||
@@ -937,7 +937,7 @@ class SyncHandler(object):
|
||||
)
|
||||
if self.hs_config.use_presence and not block_all_presence_data:
|
||||
yield self._generate_sync_entry_for_presence(
|
||||
sync_result_builder, newly_joined_rooms, newly_joined_users
|
||||
sync_result_builder, newly_joined_rooms, newly_joined_or_invited_users
|
||||
)
|
||||
|
||||
yield self._generate_sync_entry_for_to_device(sync_result_builder)
|
||||
@@ -945,7 +945,7 @@ class SyncHandler(object):
|
||||
device_lists = yield self._generate_sync_entry_for_device_list(
|
||||
sync_result_builder,
|
||||
newly_joined_rooms=newly_joined_rooms,
|
||||
newly_joined_users=newly_joined_users,
|
||||
newly_joined_or_invited_users=newly_joined_or_invited_users,
|
||||
newly_left_rooms=newly_left_rooms,
|
||||
newly_left_users=newly_left_users,
|
||||
)
|
||||
@@ -1021,7 +1021,8 @@ class SyncHandler(object):
|
||||
@measure_func("_generate_sync_entry_for_device_list")
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_device_list(self, sync_result_builder,
|
||||
newly_joined_rooms, newly_joined_users,
|
||||
newly_joined_rooms,
|
||||
newly_joined_or_invited_users,
|
||||
newly_left_rooms, newly_left_users):
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
since_token = sync_result_builder.since_token
|
||||
@@ -1035,7 +1036,7 @@ class SyncHandler(object):
|
||||
# share a room with?
|
||||
for room_id in newly_joined_rooms:
|
||||
joined_users = yield self.state.get_current_user_in_room(room_id)
|
||||
newly_joined_users.update(joined_users)
|
||||
newly_joined_or_invited_users.update(joined_users)
|
||||
|
||||
for room_id in newly_left_rooms:
|
||||
left_users = yield self.state.get_current_user_in_room(room_id)
|
||||
@@ -1043,7 +1044,7 @@ class SyncHandler(object):
|
||||
|
||||
# TODO: Check that these users are actually new, i.e. either they
|
||||
# weren't in the previous sync *or* they left and rejoined.
|
||||
changed.update(newly_joined_users)
|
||||
changed.update(newly_joined_or_invited_users)
|
||||
|
||||
if not changed and not newly_left_users:
|
||||
defer.returnValue(DeviceLists(
|
||||
@@ -1161,7 +1162,7 @@ class SyncHandler(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_sync_entry_for_presence(self, sync_result_builder, newly_joined_rooms,
|
||||
newly_joined_users):
|
||||
newly_joined_or_invited_users):
|
||||
"""Generates the presence portion of the sync response. Populates the
|
||||
`sync_result_builder` with the result.
|
||||
|
||||
@@ -1169,8 +1170,9 @@ class SyncHandler(object):
|
||||
sync_result_builder(SyncResultBuilder)
|
||||
newly_joined_rooms(list): List of rooms that the user has joined
|
||||
since the last sync (or empty if an initial sync)
|
||||
newly_joined_users(list): List of users that have joined rooms
|
||||
since the last sync (or empty if an initial sync)
|
||||
newly_joined_or_invited_users(list): List of users that have joined
|
||||
or been invited to rooms since the last sync (or empty if an initial
|
||||
sync)
|
||||
"""
|
||||
now_token = sync_result_builder.now_token
|
||||
sync_config = sync_result_builder.sync_config
|
||||
@@ -1196,7 +1198,7 @@ class SyncHandler(object):
|
||||
"presence_key", presence_key
|
||||
)
|
||||
|
||||
extra_users_ids = set(newly_joined_users)
|
||||
extra_users_ids = set(newly_joined_or_invited_users)
|
||||
for room_id in newly_joined_rooms:
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
extra_users_ids.update(users)
|
||||
@@ -1228,7 +1230,8 @@ class SyncHandler(object):
|
||||
|
||||
Returns:
|
||||
Deferred(tuple): Returns a 4-tuple of
|
||||
`(newly_joined_rooms, newly_joined_users, newly_left_rooms, newly_left_users)`
|
||||
`(newly_joined_rooms, newly_joined_or_invited_users,
|
||||
newly_left_rooms, newly_left_users)`
|
||||
"""
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
block_all_room_ephemeral = (
|
||||
@@ -1299,8 +1302,8 @@ class SyncHandler(object):
|
||||
|
||||
sync_result_builder.invited.extend(invited)
|
||||
|
||||
# Now we want to get any newly joined users
|
||||
newly_joined_users = set()
|
||||
# Now we want to get any newly joined or invited users
|
||||
newly_joined_or_invited_users = set()
|
||||
newly_left_users = set()
|
||||
if since_token:
|
||||
for joined_sync in sync_result_builder.joined:
|
||||
@@ -1309,19 +1312,22 @@ class SyncHandler(object):
|
||||
)
|
||||
for event in it:
|
||||
if event.type == EventTypes.Member:
|
||||
if event.membership == Membership.JOIN:
|
||||
newly_joined_users.add(event.state_key)
|
||||
if (
|
||||
event.membership == Membership.JOIN or
|
||||
event.membership == Membership.INVITE
|
||||
):
|
||||
newly_joined_or_invited_users.add(event.state_key)
|
||||
else:
|
||||
prev_content = event.unsigned.get("prev_content", {})
|
||||
prev_membership = prev_content.get("membership", None)
|
||||
if prev_membership == Membership.JOIN:
|
||||
newly_left_users.add(event.state_key)
|
||||
|
||||
newly_left_users -= newly_joined_users
|
||||
newly_left_users -= newly_joined_or_invited_users
|
||||
|
||||
defer.returnValue((
|
||||
newly_joined_rooms,
|
||||
newly_joined_users,
|
||||
newly_joined_or_invited_users,
|
||||
newly_left_rooms,
|
||||
newly_left_users,
|
||||
))
|
||||
@@ -1366,7 +1372,7 @@ class SyncHandler(object):
|
||||
where:
|
||||
room_entries is a list [RoomSyncResultBuilder]
|
||||
invited_rooms is a list [InvitedSyncResult]
|
||||
newly_joined rooms is a list[str] of room ids
|
||||
newly_joined_rooms is a list[str] of room ids
|
||||
newly_left_rooms is a list[str] of room ids
|
||||
"""
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
@@ -1401,7 +1407,7 @@ class SyncHandler(object):
|
||||
if room_id in sync_result_builder.joined_room_ids and non_joins:
|
||||
# Always include if the user (re)joined the room, especially
|
||||
# important so that device list changes are calculated correctly.
|
||||
# If there are non join member events, but we are still in the room,
|
||||
# If there are non-join member events, but we are still in the room,
|
||||
# then the user must have left and joined
|
||||
newly_joined_rooms.append(room_id)
|
||||
|
||||
|
||||
@@ -63,8 +63,11 @@ class TypingHandler(object):
|
||||
self._member_typing_until = {} # clock time we expect to stop
|
||||
self._member_last_federation_poke = {}
|
||||
|
||||
# map room IDs to serial numbers
|
||||
self._room_serials = {}
|
||||
self._latest_room_serial = 0
|
||||
self._reset()
|
||||
# map room IDs to sets of users currently typing
|
||||
self._room_typing = {}
|
||||
|
||||
# caches which room_ids changed at which serials
|
||||
self._typing_stream_change_cache = StreamChangeCache(
|
||||
@@ -76,15 +79,6 @@ class TypingHandler(object):
|
||||
5000,
|
||||
)
|
||||
|
||||
def _reset(self):
|
||||
"""
|
||||
Reset the typing handler's data caches.
|
||||
"""
|
||||
# map room IDs to serial numbers
|
||||
self._room_serials = {}
|
||||
# map room IDs to sets of users currently typing
|
||||
self._room_typing = {}
|
||||
|
||||
def _handle_timeouts(self):
|
||||
logger.info("Checking for typing timeouts")
|
||||
|
||||
|
||||
@@ -157,8 +157,9 @@ class SimpleHttpClient(object):
|
||||
data=query_bytes
|
||||
)
|
||||
|
||||
body = yield make_deferred_yieldable(treq.json_content(response))
|
||||
|
||||
if 200 <= response.code < 300:
|
||||
body = yield make_deferred_yieldable(treq.json_content(response))
|
||||
defer.returnValue(body)
|
||||
else:
|
||||
raise HttpResponseException(response.code, response.phrase, body)
|
||||
|
||||
@@ -468,13 +468,13 @@ def set_cors_headers(request):
|
||||
Args:
|
||||
request (twisted.web.http.Request): The http request to add CORs to.
|
||||
"""
|
||||
request.setHeader(b"Access-Control-Allow-Origin", b"*")
|
||||
request.setHeader("Access-Control-Allow-Origin", "*")
|
||||
request.setHeader(
|
||||
b"Access-Control-Allow-Methods", b"GET, POST, PUT, DELETE, OPTIONS"
|
||||
"Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS"
|
||||
)
|
||||
request.setHeader(
|
||||
b"Access-Control-Allow-Headers",
|
||||
b"Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
||||
"Access-Control-Allow-Headers",
|
||||
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -121,15 +121,16 @@ def parse_string(request, name, default=None, required=False,
|
||||
|
||||
Args:
|
||||
request: the twisted HTTP request.
|
||||
name (bytes|unicode): the name of the query parameter.
|
||||
default (bytes|unicode|None): value to use if the parameter is absent,
|
||||
name (bytes/unicode): the name of the query parameter.
|
||||
default (bytes/unicode|None): value to use if the parameter is absent,
|
||||
defaults to None. Must be bytes if encoding is None.
|
||||
required (bool): whether to raise a 400 SynapseError if the
|
||||
parameter is absent, defaults to False.
|
||||
allowed_values (list[bytes|unicode]): List of allowed values for the
|
||||
allowed_values (list[bytes/unicode]): List of allowed values for the
|
||||
string, or None if any value is allowed, defaults to None. Must be
|
||||
the same type as name, if given.
|
||||
encoding (str|None): The encoding to decode the string content with.
|
||||
encoding: The encoding to decode the name to, and decode the string
|
||||
content with.
|
||||
|
||||
Returns:
|
||||
bytes/unicode|None: A string value or the default. Unicode if encoding
|
||||
|
||||
@@ -85,10 +85,7 @@ class EmailPusher(object):
|
||||
self.timed_call = None
|
||||
|
||||
def on_new_notifications(self, min_stream_ordering, max_stream_ordering):
|
||||
if self.max_stream_ordering:
|
||||
self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
|
||||
else:
|
||||
self.max_stream_ordering = max_stream_ordering
|
||||
self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering)
|
||||
self._start_processing()
|
||||
|
||||
def on_new_receipts(self, min_stream_id, max_stream_id):
|
||||
|
||||
@@ -311,10 +311,10 @@ class HttpPusher(object):
|
||||
]
|
||||
}
|
||||
}
|
||||
if event.type == 'm.room.member' and event.is_state():
|
||||
if event.type == 'm.room.member':
|
||||
d['notification']['membership'] = event.content['membership']
|
||||
d['notification']['user_is_target'] = event.state_key == self.user_id
|
||||
if self.hs.config.push_include_content and event.content:
|
||||
if self.hs.config.push_include_content and 'content' in event:
|
||||
d['notification']['content'] = event.content
|
||||
|
||||
# We no longer send aliases separately, instead, we send the human
|
||||
|
||||
@@ -26,6 +26,7 @@ import bleach
|
||||
import jinja2
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.mail.smtp import sendmail
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import StoreError
|
||||
@@ -84,7 +85,6 @@ class Mailer(object):
|
||||
self.notif_template_html = notif_template_html
|
||||
self.notif_template_text = notif_template_text
|
||||
|
||||
self.sendmail = self.hs.get_sendmail()
|
||||
self.store = self.hs.get_datastore()
|
||||
self.macaroon_gen = self.hs.get_macaroon_generator()
|
||||
self.state_handler = self.hs.get_state_handler()
|
||||
@@ -191,11 +191,11 @@ class Mailer(object):
|
||||
multipart_msg.attach(html_part)
|
||||
|
||||
logger.info("Sending email push notification to %s" % email_address)
|
||||
# logger.debug(html_text)
|
||||
|
||||
yield self.sendmail(
|
||||
yield sendmail(
|
||||
self.hs.config.email_smtp_host,
|
||||
raw_from, raw_to, multipart_msg.as_string().encode('utf8'),
|
||||
reactor=self.hs.get_reactor(),
|
||||
raw_from, raw_to, multipart_msg.as_string(),
|
||||
port=self.hs.config.email_smtp_port,
|
||||
requireAuthentication=self.hs.config.email_smtp_user is not None,
|
||||
username=self.hs.config.email_smtp_user,
|
||||
@@ -333,7 +333,7 @@ class Mailer(object):
|
||||
notif_events, user_id, reason):
|
||||
if len(notifs_by_room) == 1:
|
||||
# Only one room has new stuff
|
||||
room_id = list(notifs_by_room.keys())[0]
|
||||
room_id = notifs_by_room.keys()[0]
|
||||
|
||||
# If the room has some kind of name, use it, but we don't
|
||||
# want the generated-from-names one here otherwise we'll
|
||||
|
||||
@@ -124,7 +124,7 @@ class PushRuleEvaluatorForEvent(object):
|
||||
|
||||
# XXX: optimisation: cache our pattern regexps
|
||||
if condition['key'] == 'content.body':
|
||||
body = self._event.content.get("body", None)
|
||||
body = self._event["content"].get("body", None)
|
||||
if not body:
|
||||
return False
|
||||
|
||||
@@ -140,7 +140,7 @@ class PushRuleEvaluatorForEvent(object):
|
||||
if not display_name:
|
||||
return False
|
||||
|
||||
body = self._event.content.get("body", None)
|
||||
body = self._event["content"].get("body", None)
|
||||
if not body:
|
||||
return False
|
||||
|
||||
|
||||
@@ -51,6 +51,7 @@ REQUIREMENTS = {
|
||||
"daemonize>=2.3.1": ["daemonize"],
|
||||
"bcrypt>=3.1.0": ["bcrypt>=3.1.0"],
|
||||
"pillow>=3.1.2": ["PIL"],
|
||||
"pydenticon>=0.2": ["pydenticon"],
|
||||
"sortedcontainers>=1.4.4": ["sortedcontainers"],
|
||||
"psutil>=2.0.0": ["psutil>=2.0.0"],
|
||||
"pysaml2>=3.0.0": ["saml2"],
|
||||
|
||||
@@ -106,7 +106,7 @@ class ReplicationClientHandler(object):
|
||||
|
||||
Can be overriden in subclasses to handle more.
|
||||
"""
|
||||
logger.debug("Received rdata %s -> %s", stream_name, token)
|
||||
logger.info("Received rdata %s -> %s", stream_name, token)
|
||||
return self.store.process_replication_rows(stream_name, token, rows)
|
||||
|
||||
def on_position(self, stream_name, token):
|
||||
|
||||
@@ -656,7 +656,7 @@ tcp_inbound_commands = LaterGauge(
|
||||
"",
|
||||
["command", "name"],
|
||||
lambda: {
|
||||
(k, p.name,): count
|
||||
(k[0], p.name,): count
|
||||
for p in connected_connections
|
||||
for k, count in iteritems(p.inbound_commands_counter)
|
||||
},
|
||||
@@ -667,7 +667,7 @@ tcp_outbound_commands = LaterGauge(
|
||||
"",
|
||||
["command", "name"],
|
||||
lambda: {
|
||||
(k, p.name,): count
|
||||
(k[0], p.name,): count
|
||||
for p in connected_connections
|
||||
for k, count in iteritems(p.outbound_commands_counter)
|
||||
},
|
||||
|
||||
@@ -47,7 +47,6 @@ from synapse.rest.client.v2_alpha import (
|
||||
register,
|
||||
report_event,
|
||||
room_keys,
|
||||
room_upgrade_rest_servlet,
|
||||
sendtodevice,
|
||||
sync,
|
||||
tags,
|
||||
@@ -117,4 +116,3 @@ class ClientRestResource(JsonResource):
|
||||
sendtodevice.register_servlets(hs, client_resource)
|
||||
user_directory.register_servlets(hs, client_resource)
|
||||
groups.register_servlets(hs, client_resource)
|
||||
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
""" This module contains REST servlets to do with profile: /profile/<paths> """
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
@@ -21,6 +23,8 @@ from synapse.types import UserID
|
||||
|
||||
from .base import ClientV1RestServlet, client_path_patterns
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProfileDisplaynameRestServlet(ClientV1RestServlet):
|
||||
PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)/displayname")
|
||||
@@ -28,6 +32,7 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet):
|
||||
def __init__(self, hs):
|
||||
super(ProfileDisplaynameRestServlet, self).__init__(hs)
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, user_id):
|
||||
@@ -59,11 +64,30 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet):
|
||||
yield self.profile_handler.set_displayname(
|
||||
user, requester, new_name, is_admin)
|
||||
|
||||
if self.hs.config.shadow_server:
|
||||
shadow_user = UserID(
|
||||
user.localpart, self.hs.config.shadow_server.get("hs")
|
||||
)
|
||||
self.shadow_displayname(shadow_user.to_string(), content)
|
||||
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
def on_OPTIONS(self, request, user_id):
|
||||
return (200, {})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def shadow_displayname(self, user_id, body):
|
||||
# TODO: retries
|
||||
shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
|
||||
as_token = self.hs.config.shadow_server.get("as_token")
|
||||
|
||||
yield self.http_client.put_json(
|
||||
"%s/_matrix/client/r0/profile/%s/displayname?access_token=%s&user_id=%s" % (
|
||||
shadow_hs_url, user_id, as_token, user_id
|
||||
),
|
||||
body
|
||||
)
|
||||
|
||||
|
||||
class ProfileAvatarURLRestServlet(ClientV1RestServlet):
|
||||
PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)/avatar_url")
|
||||
@@ -71,6 +95,7 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet):
|
||||
def __init__(self, hs):
|
||||
super(ProfileAvatarURLRestServlet, self).__init__(hs)
|
||||
self.profile_handler = hs.get_profile_handler()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, user_id):
|
||||
@@ -101,11 +126,30 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet):
|
||||
yield self.profile_handler.set_avatar_url(
|
||||
user, requester, new_name, is_admin)
|
||||
|
||||
if self.hs.config.shadow_server:
|
||||
shadow_user = UserID(
|
||||
user.localpart, self.hs.config.shadow_server.get("hs")
|
||||
)
|
||||
self.shadow_avatar_url(shadow_user.to_string(), content)
|
||||
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
def on_OPTIONS(self, request, user_id):
|
||||
return (200, {})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def shadow_avatar_url(self, user_id, body):
|
||||
# TODO: retries
|
||||
shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
|
||||
as_token = self.hs.config.shadow_server.get("as_token")
|
||||
|
||||
yield self.http_client.put_json(
|
||||
"%s/_matrix/client/r0/profile/%s/avatar_url?access_token=%s&user_id=%s" % (
|
||||
shadow_hs_url, user_id, as_token, user_id
|
||||
),
|
||||
body
|
||||
)
|
||||
|
||||
|
||||
class ProfileRestServlet(ClientV1RestServlet):
|
||||
PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)")
|
||||
|
||||
@@ -29,6 +29,7 @@ from synapse.http.servlet import (
|
||||
)
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.threepids import check_3pid_allowed
|
||||
from synapse.types import UserID
|
||||
|
||||
from ._base import client_v2_patterns, interactive_auth_handler
|
||||
|
||||
@@ -51,7 +52,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
|
||||
'id_server', 'client_secret', 'email', 'send_attempt'
|
||||
])
|
||||
|
||||
if not check_3pid_allowed(self.hs, "email", body['email']):
|
||||
if not (yield check_3pid_allowed(self.hs, "email", body['email'])):
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Your email domain is not authorized on this server",
|
||||
@@ -89,7 +90,7 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
|
||||
|
||||
msisdn = phone_number_to_msisdn(body['country'], body['phone_number'])
|
||||
|
||||
if not check_3pid_allowed(self.hs, "msisdn", msisdn):
|
||||
if not (yield check_3pid_allowed(self.hs, "msisdn", msisdn)):
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Account phone numbers are not authorized on this server",
|
||||
@@ -117,6 +118,7 @@ class PasswordRestServlet(RestServlet):
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
self.datastore = self.hs.get_datastore()
|
||||
self._set_password_handler = hs.get_set_password_handler()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
@interactive_auth_handler
|
||||
@defer.inlineCallbacks
|
||||
@@ -135,9 +137,13 @@ class PasswordRestServlet(RestServlet):
|
||||
|
||||
if self.auth.has_access_token(request):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
params = yield self.auth_handler.validate_user_via_ui_auth(
|
||||
requester, body, self.hs.get_ip_from_request(request),
|
||||
)
|
||||
# blindly trust ASes without UI-authing them
|
||||
if requester.app_service:
|
||||
params = body
|
||||
else:
|
||||
params = yield self.auth_handler.validate_user_via_ui_auth(
|
||||
requester, body, self.hs.get_ip_from_request(request),
|
||||
)
|
||||
user_id = requester.user.to_string()
|
||||
else:
|
||||
requester = None
|
||||
@@ -173,11 +179,30 @@ class PasswordRestServlet(RestServlet):
|
||||
user_id, new_password, requester
|
||||
)
|
||||
|
||||
if self.hs.config.shadow_server:
|
||||
shadow_user = UserID(
|
||||
requester.user.localpart, self.hs.config.shadow_server.get("hs")
|
||||
)
|
||||
self.shadow_password(params, shadow_user.to_string())
|
||||
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
def on_OPTIONS(self, _):
|
||||
return 200, {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def shadow_password(self, body, user_id):
|
||||
# TODO: retries
|
||||
shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
|
||||
as_token = self.hs.config.shadow_server.get("as_token")
|
||||
|
||||
yield self.http_client.post_json_get_json(
|
||||
"%s/_matrix/client/r0/account/password?access_token=%s&user_id=%s" % (
|
||||
shadow_hs_url, as_token, user_id,
|
||||
),
|
||||
body
|
||||
)
|
||||
|
||||
|
||||
class DeactivateAccountRestServlet(RestServlet):
|
||||
PATTERNS = client_v2_patterns("/account/deactivate$")
|
||||
@@ -243,7 +268,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
|
||||
['id_server', 'client_secret', 'email', 'send_attempt'],
|
||||
)
|
||||
|
||||
if not check_3pid_allowed(self.hs, "email", body['email']):
|
||||
if not (yield check_3pid_allowed(self.hs, "email", body['email'])):
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Your email domain is not authorized on this server",
|
||||
@@ -280,7 +305,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
||||
|
||||
msisdn = phone_number_to_msisdn(body['country'], body['phone_number'])
|
||||
|
||||
if not check_3pid_allowed(self.hs, "msisdn", msisdn):
|
||||
if not (yield check_3pid_allowed(self.hs, "msisdn", msisdn)):
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Account phone numbers are not authorized on this server",
|
||||
@@ -307,7 +332,8 @@ class ThreepidRestServlet(RestServlet):
|
||||
self.identity_handler = hs.get_handlers().identity_handler
|
||||
self.auth = hs.get_auth()
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
self.datastore = self.hs.get_datastore()
|
||||
self.datastore = hs.get_datastore()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request):
|
||||
@@ -321,27 +347,38 @@ class ThreepidRestServlet(RestServlet):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
body = parse_json_object_from_request(request)
|
||||
if self.hs.config.disable_3pid_changes:
|
||||
raise SynapseError(400, "3PID changes disabled on this server")
|
||||
|
||||
threePidCreds = body.get('threePidCreds')
|
||||
threePidCreds = body.get('three_pid_creds', threePidCreds)
|
||||
if threePidCreds is None:
|
||||
raise SynapseError(400, "Missing param", Codes.MISSING_PARAM)
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
threepid = yield self.identity_handler.threepid_from_creds(threePidCreds)
|
||||
# skip validation if this is a shadow 3PID from an AS
|
||||
if not requester.app_service:
|
||||
threePidCreds = body.get('threePidCreds')
|
||||
threePidCreds = body.get('three_pid_creds', threePidCreds)
|
||||
if threePidCreds is None:
|
||||
raise SynapseError(400, "Missing param", Codes.MISSING_PARAM)
|
||||
|
||||
if not threepid:
|
||||
raise SynapseError(
|
||||
400, "Failed to auth 3pid", Codes.THREEPID_AUTH_FAILED
|
||||
)
|
||||
threepid = yield self.identity_handler.threepid_from_creds(threePidCreds)
|
||||
|
||||
for reqd in ['medium', 'address', 'validated_at']:
|
||||
if reqd not in threepid:
|
||||
logger.warn("Couldn't add 3pid: invalid response from ID server")
|
||||
raise SynapseError(500, "Invalid response from ID Server")
|
||||
if not threepid:
|
||||
raise SynapseError(
|
||||
400, "Failed to auth 3pid", Codes.THREEPID_AUTH_FAILED
|
||||
)
|
||||
|
||||
for reqd in ['medium', 'address', 'validated_at']:
|
||||
if reqd not in threepid:
|
||||
logger.warn("Couldn't add 3pid: invalid response from ID server")
|
||||
raise SynapseError(500, "Invalid response from ID Server")
|
||||
else:
|
||||
# XXX: ASes pass in a validated threepid directly to bypass the IS.
|
||||
# This makes the API entirely change shape when we have an AS token;
|
||||
# it really should be an entirely separate API - perhaps
|
||||
# /account/3pid/replicate or something.
|
||||
threepid = body.get('threepid')
|
||||
|
||||
yield self.auth_handler.add_threepid(
|
||||
user_id,
|
||||
@@ -350,7 +387,7 @@ class ThreepidRestServlet(RestServlet):
|
||||
threepid['validated_at'],
|
||||
)
|
||||
|
||||
if 'bind' in body and body['bind']:
|
||||
if not requester.app_service and ('bind' in body and body['bind']):
|
||||
logger.debug(
|
||||
"Binding threepid %s to %s",
|
||||
threepid, user_id
|
||||
@@ -359,19 +396,43 @@ class ThreepidRestServlet(RestServlet):
|
||||
threePidCreds, user_id
|
||||
)
|
||||
|
||||
if self.hs.config.shadow_server:
|
||||
shadow_user = UserID(
|
||||
requester.user.localpart, self.hs.config.shadow_server.get("hs")
|
||||
)
|
||||
self.shadow_3pid({'threepid': threepid}, shadow_user.to_string())
|
||||
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def shadow_3pid(self, body, user_id):
|
||||
# TODO: retries
|
||||
shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
|
||||
as_token = self.hs.config.shadow_server.get("as_token")
|
||||
|
||||
yield self.http_client.post_json_get_json(
|
||||
"%s/_matrix/client/r0/account/3pid?access_token=%s&user_id=%s" % (
|
||||
shadow_hs_url, as_token, user_id,
|
||||
),
|
||||
body
|
||||
)
|
||||
|
||||
|
||||
class ThreepidDeleteRestServlet(RestServlet):
|
||||
PATTERNS = client_v2_patterns("/account/3pid/delete$", releases=())
|
||||
|
||||
def __init__(self, hs):
|
||||
super(ThreepidDeleteRestServlet, self).__init__()
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.auth_handler = hs.get_auth_handler()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
if self.hs.config.disable_3pid_changes:
|
||||
raise SynapseError(400, "3PID changes disabled on this server")
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
assert_params_in_dict(body, ['medium', 'address'])
|
||||
|
||||
@@ -389,6 +450,12 @@ class ThreepidDeleteRestServlet(RestServlet):
|
||||
logger.exception("Failed to remove threepid")
|
||||
raise SynapseError(500, "Failed to remove threepid")
|
||||
|
||||
if self.hs.config.shadow_server:
|
||||
shadow_user = UserID(
|
||||
requester.user.localpart, self.hs.config.shadow_server.get("hs")
|
||||
)
|
||||
self.shadow_3pid_delete(body, shadow_user.to_string())
|
||||
|
||||
if ret:
|
||||
id_server_unbind_result = "success"
|
||||
else:
|
||||
@@ -398,6 +465,19 @@ class ThreepidDeleteRestServlet(RestServlet):
|
||||
"id_server_unbind_result": id_server_unbind_result,
|
||||
}))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def shadow_3pid_delete(self, body, user_id):
|
||||
# TODO: retries
|
||||
shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
|
||||
as_token = self.hs.config.shadow_server.get("as_token")
|
||||
|
||||
yield self.http_client.post_json_get_json(
|
||||
"%s/_matrix/client/r0/account/3pid/delete?access_token=%s&user_id=%s" % (
|
||||
shadow_hs_url, as_token, user_id
|
||||
),
|
||||
body
|
||||
)
|
||||
|
||||
|
||||
class WhoamiRestServlet(RestServlet):
|
||||
PATTERNS = client_v2_patterns("/account/whoami$")
|
||||
|
||||
@@ -19,6 +19,7 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import AuthError, SynapseError
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.types import UserID
|
||||
|
||||
from ._base import client_v2_patterns
|
||||
|
||||
@@ -38,6 +39,7 @@ class AccountDataServlet(RestServlet):
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.notifier = hs.get_notifier()
|
||||
self._profile_handler = hs.get_profile_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, request, user_id, account_data_type):
|
||||
@@ -47,6 +49,11 @@ class AccountDataServlet(RestServlet):
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
if account_data_type == "im.vector.hide_profile":
|
||||
user = UserID.from_string(user_id)
|
||||
hide_profile = body.get('hide_profile')
|
||||
yield self._profile_handler.set_active(user, not hide_profile, True)
|
||||
|
||||
max_id = yield self.store.add_account_data_for_user(
|
||||
user_id, account_data_type, body
|
||||
)
|
||||
|
||||
@@ -68,29 +68,6 @@ function captchaDone() {
|
||||
</html>
|
||||
"""
|
||||
|
||||
TERMS_TEMPLATE = """
|
||||
<html>
|
||||
<head>
|
||||
<title>Authentication</title>
|
||||
<meta name='viewport' content='width=device-width, initial-scale=1,
|
||||
user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
|
||||
<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
|
||||
</head>
|
||||
<body>
|
||||
<form id="registrationForm" method="post" action="%(myurl)s">
|
||||
<div>
|
||||
<p>
|
||||
Please click the button below if you agree to the
|
||||
<a href="%(terms_url)s">privacy policy of this homeserver.</a>
|
||||
</p>
|
||||
<input type="hidden" name="session" value="%(session)s" />
|
||||
<input type="submit" value="Agree" />
|
||||
</div>
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
SUCCESS_TEMPLATE = """
|
||||
<html>
|
||||
<head>
|
||||
@@ -153,27 +130,6 @@ class AuthRestServlet(RestServlet):
|
||||
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
|
||||
request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
|
||||
|
||||
request.write(html_bytes)
|
||||
finish_request(request)
|
||||
defer.returnValue(None)
|
||||
elif stagetype == LoginType.TERMS:
|
||||
session = request.args['session'][0]
|
||||
|
||||
html = TERMS_TEMPLATE % {
|
||||
'session': session,
|
||||
'terms_url': "%s/_matrix/consent?v=%s" % (
|
||||
self.hs.config.public_baseurl,
|
||||
self.hs.config.user_consent_version,
|
||||
),
|
||||
'myurl': "%s/auth/%s/fallback/web" % (
|
||||
CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS
|
||||
),
|
||||
}
|
||||
html_bytes = html.encode("utf8")
|
||||
request.setResponseCode(200)
|
||||
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
|
||||
request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
|
||||
|
||||
request.write(html_bytes)
|
||||
finish_request(request)
|
||||
defer.returnValue(None)
|
||||
@@ -183,7 +139,7 @@ class AuthRestServlet(RestServlet):
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, stagetype):
|
||||
yield
|
||||
if stagetype == LoginType.RECAPTCHA:
|
||||
if stagetype == "m.login.recaptcha":
|
||||
if ('g-recaptcha-response' not in request.args or
|
||||
len(request.args['g-recaptcha-response'])) == 0:
|
||||
raise SynapseError(400, "No captcha response supplied")
|
||||
@@ -222,41 +178,6 @@ class AuthRestServlet(RestServlet):
|
||||
request.write(html_bytes)
|
||||
finish_request(request)
|
||||
|
||||
defer.returnValue(None)
|
||||
elif stagetype == LoginType.TERMS:
|
||||
if ('session' not in request.args or
|
||||
len(request.args['session'])) == 0:
|
||||
raise SynapseError(400, "No session supplied")
|
||||
|
||||
session = request.args['session'][0]
|
||||
authdict = {'session': session}
|
||||
|
||||
success = yield self.auth_handler.add_oob_auth(
|
||||
LoginType.TERMS,
|
||||
authdict,
|
||||
self.hs.get_ip_from_request(request)
|
||||
)
|
||||
|
||||
if success:
|
||||
html = SUCCESS_TEMPLATE
|
||||
else:
|
||||
html = TERMS_TEMPLATE % {
|
||||
'session': session,
|
||||
'terms_url': "%s/_matrix/consent?v=%s" % (
|
||||
self.hs.config.public_baseurl,
|
||||
self.hs.config.user_consent_version,
|
||||
),
|
||||
'myurl': "%s/auth/%s/fallback/web" % (
|
||||
CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS
|
||||
),
|
||||
}
|
||||
html_bytes = html.encode("utf8")
|
||||
request.setResponseCode(200)
|
||||
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
|
||||
request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
|
||||
|
||||
request.write(html_bytes)
|
||||
finish_request(request)
|
||||
defer.returnValue(None)
|
||||
else:
|
||||
raise SynapseError(404, "Unknown auth stage type")
|
||||
|
||||
@@ -16,7 +16,9 @@
|
||||
|
||||
import hmac
|
||||
import logging
|
||||
import re
|
||||
from hashlib import sha1
|
||||
from string import capwords
|
||||
|
||||
from six import string_types
|
||||
|
||||
@@ -73,7 +75,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
|
||||
'id_server', 'client_secret', 'email', 'send_attempt'
|
||||
])
|
||||
|
||||
if not check_3pid_allowed(self.hs, "email", body['email']):
|
||||
if not (yield check_3pid_allowed(self.hs, "email", body['email'])):
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Your email domain is not authorized to register on this server",
|
||||
@@ -115,7 +117,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
|
||||
|
||||
msisdn = phone_number_to_msisdn(body['country'], body['phone_number'])
|
||||
|
||||
if not check_3pid_allowed(self.hs, "msisdn", msisdn):
|
||||
if not (yield check_3pid_allowed(self.hs, "msisdn", msisdn)):
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Phone numbers are not authorized to register on this server",
|
||||
@@ -227,6 +229,8 @@ class RegisterRestServlet(RestServlet):
|
||||
raise SynapseError(400, "Invalid username")
|
||||
desired_username = body['username']
|
||||
|
||||
desired_display_name = body.get('display_name')
|
||||
|
||||
appservice = None
|
||||
if self.auth.has_access_token(request):
|
||||
appservice = yield self.auth.get_appservice_by_req(request)
|
||||
@@ -250,7 +254,8 @@ class RegisterRestServlet(RestServlet):
|
||||
|
||||
if isinstance(desired_username, string_types):
|
||||
result = yield self._do_appservice_registration(
|
||||
desired_username, access_token, body
|
||||
desired_username, desired_password, desired_display_name,
|
||||
access_token, body
|
||||
)
|
||||
defer.returnValue((200, result)) # we throw for non 200 responses
|
||||
return
|
||||
@@ -302,13 +307,6 @@ class RegisterRestServlet(RestServlet):
|
||||
session_id, "registered_user_id", None
|
||||
)
|
||||
|
||||
if desired_username is not None:
|
||||
yield self.registration_handler.check_username(
|
||||
desired_username,
|
||||
guest_access_token=guest_access_token,
|
||||
assigned_user_id=registered_user_id,
|
||||
)
|
||||
|
||||
# Only give msisdn flows if the x_show_msisdn flag is given:
|
||||
# this is a hack to work around the fact that clients were shipped
|
||||
# that use fallback registration if they see any flows that they don't
|
||||
@@ -359,13 +357,6 @@ class RegisterRestServlet(RestServlet):
|
||||
[LoginType.MSISDN, LoginType.EMAIL_IDENTITY]
|
||||
])
|
||||
|
||||
# Append m.login.terms to all flows if we're requiring consent
|
||||
if self.hs.config.user_consent_at_registration:
|
||||
new_flows = []
|
||||
for flow in flows:
|
||||
flow.append(LoginType.TERMS)
|
||||
flows.extend(new_flows)
|
||||
|
||||
auth_result, params, session_id = yield self.auth_handler.check_auth(
|
||||
flows, body, self.hs.get_ip_from_request(request)
|
||||
)
|
||||
@@ -382,7 +373,7 @@ class RegisterRestServlet(RestServlet):
|
||||
medium = auth_result[login_type]['medium']
|
||||
address = auth_result[login_type]['address']
|
||||
|
||||
if not check_3pid_allowed(self.hs, medium, address):
|
||||
if not (yield check_3pid_allowed(self.hs, medium, address)):
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Third party identifiers (email/phone numbers)" +
|
||||
@@ -390,6 +381,81 @@ class RegisterRestServlet(RestServlet):
|
||||
Codes.THREEPID_DENIED,
|
||||
)
|
||||
|
||||
if self.hs.config.register_mxid_from_3pid:
|
||||
# override the desired_username based on the 3PID if any.
|
||||
# reset it first to avoid folks picking their own username.
|
||||
desired_username = None
|
||||
|
||||
# we should have an auth_result at this point if we're going to progress
|
||||
# to register the user (i.e. we haven't picked up a registered_user_id
|
||||
# from our session store), in which case get ready and gen the
|
||||
# desired_username
|
||||
if auth_result:
|
||||
if (
|
||||
self.hs.config.register_mxid_from_3pid == 'email' and
|
||||
LoginType.EMAIL_IDENTITY in auth_result
|
||||
):
|
||||
address = auth_result[LoginType.EMAIL_IDENTITY]['address']
|
||||
desired_username = synapse.types.strip_invalid_mxid_characters(
|
||||
address.replace('@', '-').lower()
|
||||
)
|
||||
|
||||
# find a unique mxid for the account, suffixing numbers
|
||||
# if needed
|
||||
while True:
|
||||
try:
|
||||
yield self.registration_handler.check_username(
|
||||
desired_username,
|
||||
guest_access_token=guest_access_token,
|
||||
assigned_user_id=registered_user_id,
|
||||
)
|
||||
# if we got this far we passed the check.
|
||||
break
|
||||
except SynapseError as e:
|
||||
if e.errcode == Codes.USER_IN_USE:
|
||||
m = re.match(r'^(.*?)(\d+)$', desired_username)
|
||||
if m:
|
||||
desired_username = m.group(1) + str(
|
||||
int(m.group(2)) + 1
|
||||
)
|
||||
else:
|
||||
desired_username += "1"
|
||||
else:
|
||||
# something else went wrong.
|
||||
break
|
||||
|
||||
# XXX: a nasty heuristic to turn an email address into
|
||||
# a displayname, as part of register_mxid_from_3pid
|
||||
parts = address.replace('.', ' ').split('@')
|
||||
org_parts = parts[1].split(' ')
|
||||
|
||||
if org_parts[-2] == "matrix" and org_parts[-1] == "org":
|
||||
org = "Tchap Admin"
|
||||
elif org_parts[-2] == "gouv" and org_parts[-1] == "fr":
|
||||
org = org_parts[-3] if len(org_parts) > 2 else org_parts[-2]
|
||||
else:
|
||||
org = org_parts[-2]
|
||||
|
||||
desired_display_name = (
|
||||
capwords(parts[0]) + " [" + capwords(org) + "]"
|
||||
)
|
||||
elif (
|
||||
self.hs.config.register_mxid_from_3pid == 'msisdn' and
|
||||
LoginType.MSISDN in auth_result
|
||||
):
|
||||
desired_username = auth_result[LoginType.MSISDN]['address']
|
||||
else:
|
||||
raise SynapseError(
|
||||
400, "Cannot derive mxid from 3pid; no recognised 3pid"
|
||||
)
|
||||
|
||||
if desired_username is not None:
|
||||
yield self.registration_handler.check_username(
|
||||
desired_username,
|
||||
guest_access_token=guest_access_token,
|
||||
assigned_user_id=registered_user_id,
|
||||
)
|
||||
|
||||
if registered_user_id is not None:
|
||||
logger.info(
|
||||
"Already registered user ID %r for this session",
|
||||
@@ -402,9 +468,16 @@ class RegisterRestServlet(RestServlet):
|
||||
# NB: This may be from the auth handler and NOT from the POST
|
||||
assert_params_in_dict(params, ["password"])
|
||||
|
||||
desired_username = params.get("username", None)
|
||||
if not self.hs.config.register_mxid_from_3pid:
|
||||
desired_username = params.get("username", None)
|
||||
else:
|
||||
# we keep the original desired_username derived from the 3pid above
|
||||
pass
|
||||
|
||||
guest_access_token = params.get("guest_access_token", None)
|
||||
new_password = params.get("password", None)
|
||||
|
||||
# XXX: don't we need to validate these for length etc like we did on
|
||||
# the ones from the JSON body earlier on in the method?
|
||||
|
||||
if desired_username is not None:
|
||||
desired_username = desired_username.lower()
|
||||
@@ -415,9 +488,10 @@ class RegisterRestServlet(RestServlet):
|
||||
|
||||
(registered_user_id, _) = yield self.registration_handler.register(
|
||||
localpart=desired_username,
|
||||
password=new_password,
|
||||
password=params.get("password", None),
|
||||
guest_access_token=guest_access_token,
|
||||
generate_token=False,
|
||||
display_name=desired_display_name,
|
||||
threepid=threepid,
|
||||
)
|
||||
# Necessary due to auth checks prior to the threepid being
|
||||
@@ -425,6 +499,14 @@ class RegisterRestServlet(RestServlet):
|
||||
if is_threepid_reserved(self.hs.config, threepid):
|
||||
yield self.store.upsert_monthly_active_user(registered_user_id)
|
||||
|
||||
if self.hs.config.shadow_server:
|
||||
yield self.registration_handler.shadow_register(
|
||||
localpart=desired_username,
|
||||
display_name=desired_display_name,
|
||||
auth_result=auth_result,
|
||||
params=params,
|
||||
)
|
||||
|
||||
# remember that we've now registered that user account, and with
|
||||
# what user ID (since the user may not have specified)
|
||||
self.auth_handler.set_session_data(
|
||||
@@ -452,23 +534,39 @@ class RegisterRestServlet(RestServlet):
|
||||
params.get("bind_msisdn")
|
||||
)
|
||||
|
||||
if auth_result and LoginType.TERMS in auth_result:
|
||||
logger.info("%s has consented to the privacy policy" % registered_user_id)
|
||||
yield self.store.user_set_consent_version(
|
||||
registered_user_id, self.hs.config.user_consent_version,
|
||||
)
|
||||
|
||||
defer.returnValue((200, return_dict))
|
||||
|
||||
def on_OPTIONS(self, _):
|
||||
return 200, {}
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_appservice_registration(self, username, as_token, body):
|
||||
def _do_appservice_registration(
|
||||
self, username, password, display_name, as_token, body
|
||||
):
|
||||
|
||||
# FIXME: appservice_register() is horribly duplicated with register()
|
||||
# and they should probably just be combined together with a config flag.
|
||||
user_id = yield self.registration_handler.appservice_register(
|
||||
username, as_token
|
||||
username, as_token, password, display_name
|
||||
)
|
||||
defer.returnValue((yield self._create_registration_details(user_id, body)))
|
||||
result = yield self._create_registration_details(user_id, body)
|
||||
|
||||
auth_result = body.get('auth_result')
|
||||
if auth_result and LoginType.EMAIL_IDENTITY in auth_result:
|
||||
threepid = auth_result[LoginType.EMAIL_IDENTITY]
|
||||
yield self._register_email_threepid(
|
||||
user_id, threepid, result["access_token"],
|
||||
body.get("bind_email")
|
||||
)
|
||||
|
||||
if auth_result and LoginType.MSISDN in auth_result:
|
||||
threepid = auth_result[LoginType.MSISDN]
|
||||
yield self._register_msisdn_threepid(
|
||||
user_id, threepid, result["access_token"],
|
||||
body.get("bind_msisdn")
|
||||
)
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_shared_secret_registration(self, username, password, body):
|
||||
|
||||
@@ -17,7 +17,7 @@ import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import Codes, NotFoundError, SynapseError
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.http.servlet import (
|
||||
RestServlet,
|
||||
parse_json_object_from_request,
|
||||
@@ -208,25 +208,10 @@ class RoomKeysServlet(RestServlet):
|
||||
user_id, version, room_id, session_id
|
||||
)
|
||||
|
||||
# Convert room_keys to the right format to return.
|
||||
if session_id:
|
||||
# If the client requests a specific session, but that session was
|
||||
# not backed up, then return an M_NOT_FOUND.
|
||||
if room_keys['rooms'] == {}:
|
||||
raise NotFoundError("No room_keys found")
|
||||
else:
|
||||
room_keys = room_keys['rooms'][room_id]['sessions'][session_id]
|
||||
room_keys = room_keys['rooms'][room_id]['sessions'][session_id]
|
||||
elif room_id:
|
||||
# If the client requests all sessions from a room, but no sessions
|
||||
# are found, then return an empty result rather than an error, so
|
||||
# that clients don't have to handle an error condition, and an
|
||||
# empty result is valid. (Similarly if the client requests all
|
||||
# sessions from the backup, but in that case, room_keys is already
|
||||
# in the right format, so we don't need to do anything about it.)
|
||||
if room_keys['rooms'] == {}:
|
||||
room_keys = {'sessions': {}}
|
||||
else:
|
||||
room_keys = room_keys['rooms'][room_id]
|
||||
room_keys = room_keys['rooms'][room_id]
|
||||
|
||||
defer.returnValue((200, room_keys))
|
||||
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import KNOWN_ROOM_VERSIONS
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.http.servlet import (
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
parse_json_object_from_request,
|
||||
)
|
||||
|
||||
from ._base import client_v2_patterns
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RoomUpgradeRestServlet(RestServlet):
|
||||
"""Handler for room uprade requests.
|
||||
|
||||
Handles requests of the form:
|
||||
|
||||
POST /_matrix/client/r0/rooms/$roomid/upgrade HTTP/1.1
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"new_version": "2",
|
||||
}
|
||||
|
||||
Creates a new room and shuts down the old one. Returns the ID of the new room.
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
PATTERNS = client_v2_patterns(
|
||||
# /rooms/$roomid/upgrade
|
||||
"/rooms/(?P<room_id>[^/]*)/upgrade$",
|
||||
v2_alpha=False,
|
||||
)
|
||||
|
||||
def __init__(self, hs):
|
||||
super(RoomUpgradeRestServlet, self).__init__()
|
||||
self._hs = hs
|
||||
self._room_creation_handler = hs.get_room_creation_handler()
|
||||
self._auth = hs.get_auth()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request, room_id):
|
||||
requester = yield self._auth.get_user_by_req(request)
|
||||
|
||||
content = parse_json_object_from_request(request)
|
||||
assert_params_in_dict(content, ("new_version", ))
|
||||
new_version = content["new_version"]
|
||||
|
||||
if new_version not in KNOWN_ROOM_VERSIONS:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Your homeserver does not support this room version",
|
||||
Codes.UNSUPPORTED_ROOM_VERSION,
|
||||
)
|
||||
|
||||
new_room_id = yield self._room_creation_handler.upgrade_room(
|
||||
requester, room_id, new_version
|
||||
)
|
||||
|
||||
ret = {
|
||||
"replacement_room": new_room_id,
|
||||
}
|
||||
|
||||
defer.returnValue((200, ret))
|
||||
|
||||
|
||||
def register_servlets(hs, http_server):
|
||||
RoomUpgradeRestServlet(hs).register(http_server)
|
||||
@@ -15,6 +15,8 @@
|
||||
|
||||
import logging
|
||||
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
@@ -37,6 +39,7 @@ class UserDirectorySearchRestServlet(RestServlet):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
@@ -61,6 +64,14 @@ class UserDirectorySearchRestServlet(RestServlet):
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
if self.hs.config.user_directory_defer_to_id_server:
|
||||
signed_body = sign_json(body, self.hs.hostname, self.hs.config.signing_key[0])
|
||||
url = "%s/_matrix/identity/api/v1/user_directory/search" % (
|
||||
self.hs.config.user_directory_defer_to_id_server,
|
||||
)
|
||||
resp = yield self.http_client.post_json_get_json(url, signed_body)
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
limit = body.get("limit", 10)
|
||||
limit = min(limit, 50)
|
||||
|
||||
|
||||
@@ -137,36 +137,27 @@ class ConsentResource(Resource):
|
||||
request (twisted.web.http.Request):
|
||||
"""
|
||||
|
||||
version = parse_string(request, "v", default=self._default_consent_version)
|
||||
username = parse_string(request, "u", required=False, default="")
|
||||
userhmac = None
|
||||
has_consented = False
|
||||
public_version = username == ""
|
||||
if not public_version:
|
||||
userhmac_bytes = parse_string(request, "h", required=True, encoding=None)
|
||||
version = parse_string(request, "v",
|
||||
default=self._default_consent_version)
|
||||
username = parse_string(request, "u", required=True)
|
||||
userhmac = parse_string(request, "h", required=True, encoding=None)
|
||||
|
||||
self._check_hash(username, userhmac_bytes)
|
||||
self._check_hash(username, userhmac)
|
||||
|
||||
if username.startswith('@'):
|
||||
qualified_user_id = username
|
||||
else:
|
||||
qualified_user_id = UserID(username, self.hs.hostname).to_string()
|
||||
if username.startswith('@'):
|
||||
qualified_user_id = username
|
||||
else:
|
||||
qualified_user_id = UserID(username, self.hs.hostname).to_string()
|
||||
|
||||
u = yield self.store.get_user_by_id(qualified_user_id)
|
||||
if u is None:
|
||||
raise NotFoundError("Unknown user")
|
||||
|
||||
has_consented = u["consent_version"] == version
|
||||
userhmac = userhmac_bytes.decode("ascii")
|
||||
u = yield self.store.get_user_by_id(qualified_user_id)
|
||||
if u is None:
|
||||
raise NotFoundError("Unknown user")
|
||||
|
||||
try:
|
||||
self._render_template(
|
||||
request, "%s.html" % (version,),
|
||||
user=username,
|
||||
userhmac=userhmac,
|
||||
version=version,
|
||||
has_consented=has_consented,
|
||||
public_version=public_version,
|
||||
user=username, userhmac=userhmac, version=version,
|
||||
has_consented=(u["consent_version"] == version),
|
||||
)
|
||||
except TemplateNotFound:
|
||||
raise NotFoundError("Unknown policy version")
|
||||
@@ -232,7 +223,7 @@ class ConsentResource(Resource):
|
||||
key=self._hmac_secret,
|
||||
msg=userid.encode('utf-8'),
|
||||
digestmod=sha256,
|
||||
).hexdigest().encode('ascii')
|
||||
).hexdigest()
|
||||
|
||||
if not compare_digest(want_mac, userhmac):
|
||||
raise SynapseError(http_client.FORBIDDEN, "HMAC incorrect")
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
@@ -0,0 +1,92 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import logging
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.sign import sign_json
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from OpenSSL import crypto
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from synapse.http.server import respond_with_json_bytes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LocalKey(Resource):
|
||||
"""HTTP resource containing encoding the TLS X.509 certificate and NACL
|
||||
signature verification keys for this server::
|
||||
|
||||
GET /key HTTP/1.1
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
{
|
||||
"server_name": "this.server.example.com"
|
||||
"verify_keys": {
|
||||
"algorithm:version": # base64 encoded NACL verification key.
|
||||
},
|
||||
"tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert.
|
||||
"signatures": {
|
||||
"this.server.example.com": {
|
||||
"algorithm:version": # NACL signature for this server.
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.response_body = encode_canonical_json(
|
||||
self.response_json_object(hs.config)
|
||||
)
|
||||
Resource.__init__(self)
|
||||
|
||||
@staticmethod
|
||||
def response_json_object(server_config):
|
||||
verify_keys = {}
|
||||
for key in server_config.signing_key:
|
||||
verify_key_bytes = key.verify_key.encode()
|
||||
key_id = "%s:%s" % (key.alg, key.version)
|
||||
verify_keys[key_id] = encode_base64(verify_key_bytes)
|
||||
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1,
|
||||
server_config.tls_certificate
|
||||
)
|
||||
json_object = {
|
||||
u"server_name": server_config.server_name,
|
||||
u"verify_keys": verify_keys,
|
||||
u"tls_certificate": encode_base64(x509_certificate_bytes)
|
||||
}
|
||||
for key in server_config.signing_key:
|
||||
json_object = sign_json(
|
||||
json_object,
|
||||
server_config.server_name,
|
||||
key,
|
||||
)
|
||||
|
||||
return json_object
|
||||
|
||||
def render_GET(self, request):
|
||||
return respond_with_json_bytes(
|
||||
request, 200, self.response_body,
|
||||
)
|
||||
|
||||
def getChild(self, name, request):
|
||||
if name == b'':
|
||||
return self
|
||||
@@ -0,0 +1,68 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from pydenticon import Generator
|
||||
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
from synapse.http.servlet import parse_integer
|
||||
|
||||
FOREGROUND = [
|
||||
"rgb(45,79,255)",
|
||||
"rgb(254,180,44)",
|
||||
"rgb(226,121,234)",
|
||||
"rgb(30,179,253)",
|
||||
"rgb(232,77,65)",
|
||||
"rgb(49,203,115)",
|
||||
"rgb(141,69,170)"
|
||||
]
|
||||
|
||||
BACKGROUND = "rgb(224,224,224)"
|
||||
SIZE = 5
|
||||
|
||||
|
||||
class IdenticonResource(Resource):
|
||||
isLeaf = True
|
||||
|
||||
def __init__(self):
|
||||
Resource.__init__(self)
|
||||
self.generator = Generator(
|
||||
SIZE, SIZE, foreground=FOREGROUND, background=BACKGROUND,
|
||||
)
|
||||
|
||||
def generate_identicon(self, name, width, height):
|
||||
v_padding = width % SIZE
|
||||
h_padding = height % SIZE
|
||||
top_padding = v_padding // 2
|
||||
left_padding = h_padding // 2
|
||||
bottom_padding = v_padding - top_padding
|
||||
right_padding = h_padding - left_padding
|
||||
width -= v_padding
|
||||
height -= h_padding
|
||||
padding = (top_padding, bottom_padding, left_padding, right_padding)
|
||||
identicon = self.generator.generate(
|
||||
name, width, height, padding=padding
|
||||
)
|
||||
return identicon
|
||||
|
||||
def render_GET(self, request):
|
||||
name = "/".join(request.postpath)
|
||||
width = parse_integer(request, "width", default=96)
|
||||
height = parse_integer(request, "height", default=96)
|
||||
identicon_bytes = self.generate_identicon(name, width, height)
|
||||
request.setHeader(b"Content-Type", b"image/png")
|
||||
request.setHeader(
|
||||
b"Cache-Control", b"public,max-age=86400,s-maxage=86400"
|
||||
)
|
||||
return identicon_bytes
|
||||
@@ -45,6 +45,7 @@ from ._base import FileInfo, respond_404, respond_with_responder
|
||||
from .config_resource import MediaConfigResource
|
||||
from .download_resource import DownloadResource
|
||||
from .filepath import MediaFilePaths
|
||||
from .identicon_resource import IdenticonResource
|
||||
from .media_storage import MediaStorage
|
||||
from .preview_url_resource import PreviewUrlResource
|
||||
from .storage_provider import StorageProviderWrapper
|
||||
@@ -768,6 +769,7 @@ class MediaRepositoryResource(Resource):
|
||||
self.putChild(b"thumbnail", ThumbnailResource(
|
||||
hs, media_repo, media_repo.media_storage,
|
||||
))
|
||||
self.putChild(b"identicon", IdenticonResource())
|
||||
if hs.config.url_preview_enabled:
|
||||
self.putChild(b"preview_url", PreviewUrlResource(
|
||||
hs, media_repo, media_repo.media_storage,
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import cgi
|
||||
import datetime
|
||||
import errno
|
||||
@@ -25,7 +24,6 @@ import shutil
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import six
|
||||
from six import string_types
|
||||
from six.moves import urllib_parse as urlparse
|
||||
|
||||
@@ -100,7 +98,7 @@ class PreviewUrlResource(Resource):
|
||||
# XXX: if get_user_by_req fails, what should we do in an async render?
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
url = parse_string(request, "url")
|
||||
if b"ts" in request.args:
|
||||
if "ts" in request.args:
|
||||
ts = parse_integer(request, "ts")
|
||||
else:
|
||||
ts = self.clock.time_msec()
|
||||
@@ -182,12 +180,7 @@ class PreviewUrlResource(Resource):
|
||||
cache_result["expires_ts"] > ts and
|
||||
cache_result["response_code"] / 100 == 2
|
||||
):
|
||||
# It may be stored as text in the database, not as bytes (such as
|
||||
# PostgreSQL). If so, encode it back before handing it on.
|
||||
og = cache_result["og"]
|
||||
if isinstance(og, six.text_type):
|
||||
og = og.encode('utf8')
|
||||
defer.returnValue(og)
|
||||
defer.returnValue(cache_result["og"])
|
||||
return
|
||||
|
||||
media_info = yield self._download_url(url, user)
|
||||
@@ -220,17 +213,14 @@ class PreviewUrlResource(Resource):
|
||||
elif _is_html(media_info['media_type']):
|
||||
# TODO: somehow stop a big HTML tree from exploding synapse's RAM
|
||||
|
||||
with open(media_info['filename'], 'rb') as file:
|
||||
body = file.read()
|
||||
file = open(media_info['filename'])
|
||||
body = file.read()
|
||||
file.close()
|
||||
|
||||
# clobber the encoding from the content-type, or default to utf-8
|
||||
# XXX: this overrides any <meta/> or XML charset headers in the body
|
||||
# which may pose problems, but so far seems to work okay.
|
||||
match = re.match(
|
||||
r'.*; *charset="?(.*?)"?(;|$)',
|
||||
media_info['media_type'],
|
||||
re.I
|
||||
)
|
||||
match = re.match(r'.*; *charset=(.*?)(;|$)', media_info['media_type'], re.I)
|
||||
encoding = match.group(1) if match else "utf-8"
|
||||
|
||||
og = decode_and_calc_og(body, media_info['uri'], encoding)
|
||||
|
||||
@@ -0,0 +1,100 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from synapse.config._base import ConfigError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DomainRuleChecker(object):
|
||||
"""
|
||||
A re-implementation of the SpamChecker that prevents users in one domain from
|
||||
inviting users in other domains to rooms, based on a configuration.
|
||||
|
||||
Takes a config in the format:
|
||||
|
||||
spam_checker:
|
||||
module: "rulecheck.DomainRuleChecker"
|
||||
config:
|
||||
domain_mapping:
|
||||
"inviter_domain": [ "invitee_domain_permitted", "other_domain_permitted" ]
|
||||
"other_inviter_domain": [ "invitee_domain_permitted" ]
|
||||
default: False
|
||||
}
|
||||
|
||||
Don't forget to consider if you can invite users from your own domain.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
self.domain_mapping = config["domain_mapping"] or {}
|
||||
self.default = config["default"]
|
||||
|
||||
def check_event_for_spam(self, event):
|
||||
"""Implements synapse.events.SpamChecker.check_event_for_spam
|
||||
"""
|
||||
return False
|
||||
|
||||
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||
"""Implements synapse.events.SpamChecker.user_may_invite
|
||||
"""
|
||||
inviter_domain = self._get_domain_from_id(inviter_userid)
|
||||
invitee_domain = self._get_domain_from_id(invitee_userid)
|
||||
|
||||
if inviter_domain not in self.domain_mapping:
|
||||
return self.default
|
||||
|
||||
return invitee_domain in self.domain_mapping[inviter_domain]
|
||||
|
||||
def user_may_create_room(self, userid):
|
||||
"""Implements synapse.events.SpamChecker.user_may_create_room
|
||||
"""
|
||||
return True
|
||||
|
||||
def user_may_create_room_alias(self, userid, room_alias):
|
||||
"""Implements synapse.events.SpamChecker.user_may_create_room_alias
|
||||
"""
|
||||
return True
|
||||
|
||||
def user_may_publish_room(self, userid, room_id):
|
||||
"""Implements synapse.events.SpamChecker.user_may_publish_room
|
||||
"""
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def parse_config(config):
|
||||
"""Implements synapse.events.SpamChecker.parse_config
|
||||
"""
|
||||
if "default" in config:
|
||||
return config
|
||||
else:
|
||||
raise ConfigError("No default set for spam_config DomainRuleChecker")
|
||||
|
||||
@staticmethod
|
||||
def _get_domain_from_id(mxid):
|
||||
"""Parses a string and returns the domain part of the mxid.
|
||||
|
||||
Args:
|
||||
mxid (str): a valid mxid
|
||||
|
||||
Returns:
|
||||
str: the domain part of the mxid
|
||||
|
||||
"""
|
||||
idx = mxid.find(":")
|
||||
if idx == -1:
|
||||
raise Exception("Invalid ID: %r" % (mxid,))
|
||||
return mxid[idx + 1:]
|
||||
@@ -23,7 +23,6 @@ import abc
|
||||
import logging
|
||||
|
||||
from twisted.enterprise import adbapi
|
||||
from twisted.mail.smtp import sendmail
|
||||
from twisted.web.client import BrowserLikePolicyForHTTPS
|
||||
|
||||
from synapse.api.auth import Auth
|
||||
@@ -175,7 +174,6 @@ class HomeServer(object):
|
||||
'message_handler',
|
||||
'pagination_handler',
|
||||
'room_context_handler',
|
||||
'sendmail',
|
||||
]
|
||||
|
||||
# This is overridden in derived application classes
|
||||
@@ -271,9 +269,6 @@ class HomeServer(object):
|
||||
def build_room_creation_handler(self):
|
||||
return RoomCreationHandler(self)
|
||||
|
||||
def build_sendmail(self):
|
||||
return sendmail
|
||||
|
||||
def build_state_handler(self):
|
||||
return StateHandler(self)
|
||||
|
||||
|
||||
@@ -7,9 +7,6 @@ import synapse.handlers.auth
|
||||
import synapse.handlers.deactivate_account
|
||||
import synapse.handlers.device
|
||||
import synapse.handlers.e2e_keys
|
||||
import synapse.handlers.room
|
||||
import synapse.handlers.room_member
|
||||
import synapse.handlers.message
|
||||
import synapse.handlers.set_password
|
||||
import synapse.rest.media.v1.media_repository
|
||||
import synapse.server_notices.server_notices_manager
|
||||
@@ -53,9 +50,6 @@ class HomeServer(object):
|
||||
def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler:
|
||||
pass
|
||||
|
||||
def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler:
|
||||
pass
|
||||
|
||||
def get_event_creation_handler(self) -> synapse.handlers.message.EventCreationHandler:
|
||||
pass
|
||||
|
||||
|
||||
@@ -261,7 +261,7 @@ class StateHandler(object):
|
||||
logger.debug("calling resolve_state_groups from compute_event_context")
|
||||
|
||||
entry = yield self.resolve_state_groups_for_events(
|
||||
event.room_id, event.prev_event_ids(),
|
||||
event.room_id, [e for e, _ in event.prev_events],
|
||||
)
|
||||
|
||||
prev_state_ids = entry.state
|
||||
@@ -607,7 +607,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto
|
||||
return v1.resolve_events_with_store(
|
||||
state_sets, event_map, state_res_store.get_events,
|
||||
)
|
||||
elif room_version in (RoomVersions.VDH_TEST, RoomVersions.STATE_V2_TEST):
|
||||
elif room_version == RoomVersions.VDH_TEST:
|
||||
return v2.resolve_events_with_store(
|
||||
state_sets, event_map, state_res_store,
|
||||
)
|
||||
|
||||
+8
-12
@@ -53,10 +53,6 @@ def resolve_events_with_store(state_sets, event_map, state_res_store):
|
||||
|
||||
logger.debug("Computing conflicted state")
|
||||
|
||||
# We use event_map as a cache, so if its None we need to initialize it
|
||||
if event_map is None:
|
||||
event_map = {}
|
||||
|
||||
# First split up the un/conflicted state
|
||||
unconflicted_state, conflicted_state = _seperate(state_sets)
|
||||
|
||||
@@ -159,7 +155,7 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store):
|
||||
event = yield _get_event(event_id, event_map, state_res_store)
|
||||
|
||||
pl = None
|
||||
for aid in event.auth_event_ids():
|
||||
for aid, _ in event.auth_events:
|
||||
aev = yield _get_event(aid, event_map, state_res_store)
|
||||
if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
|
||||
pl = aev
|
||||
@@ -167,7 +163,7 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store):
|
||||
|
||||
if pl is None:
|
||||
# Couldn't find power level. Check if they're the creator of the room
|
||||
for aid in event.auth_event_ids():
|
||||
for aid, _ in event.auth_events:
|
||||
aev = yield _get_event(aid, event_map, state_res_store)
|
||||
if (aev.type, aev.state_key) == (EventTypes.Create, ""):
|
||||
if aev.content.get("creator") == event.sender:
|
||||
@@ -299,7 +295,7 @@ def _add_event_and_auth_chain_to_graph(graph, event_id, event_map,
|
||||
graph.setdefault(eid, set())
|
||||
|
||||
event = yield _get_event(eid, event_map, state_res_store)
|
||||
for aid in event.auth_event_ids():
|
||||
for aid, _ in event.auth_events:
|
||||
if aid in auth_diff:
|
||||
if aid not in graph:
|
||||
state.append(aid)
|
||||
@@ -369,7 +365,7 @@ def _iterative_auth_checks(event_ids, base_state, event_map, state_res_store):
|
||||
event = event_map[event_id]
|
||||
|
||||
auth_events = {}
|
||||
for aid in event.auth_event_ids():
|
||||
for aid, _ in event.auth_events:
|
||||
ev = yield _get_event(aid, event_map, state_res_store)
|
||||
|
||||
if ev.rejected_reason is None:
|
||||
@@ -417,9 +413,9 @@ def _mainline_sort(event_ids, resolved_power_event_id, event_map,
|
||||
while pl:
|
||||
mainline.append(pl)
|
||||
pl_ev = yield _get_event(pl, event_map, state_res_store)
|
||||
auth_events = pl_ev.auth_event_ids()
|
||||
auth_events = pl_ev.auth_events
|
||||
pl = None
|
||||
for aid in auth_events:
|
||||
for aid, _ in auth_events:
|
||||
ev = yield _get_event(aid, event_map, state_res_store)
|
||||
if (ev.type, ev.state_key) == (EventTypes.PowerLevels, ""):
|
||||
pl = aid
|
||||
@@ -464,10 +460,10 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor
|
||||
if depth is not None:
|
||||
defer.returnValue(depth)
|
||||
|
||||
auth_events = event.auth_event_ids()
|
||||
auth_events = event.auth_events
|
||||
event = None
|
||||
|
||||
for aid in auth_events:
|
||||
for aid, _ in auth_events:
|
||||
aev = yield _get_event(aid, event_map, state_res_store)
|
||||
if (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""):
|
||||
event = aev
|
||||
|
||||
@@ -35,7 +35,7 @@ def _make_exclusive_regex(services_cache):
|
||||
exclusive_user_regexes = [
|
||||
regex.pattern
|
||||
for service in services_cache
|
||||
for regex in service.get_exlusive_user_regexes()
|
||||
for regex in service.get_exclusive_user_regexes()
|
||||
]
|
||||
if exclusive_user_regexes:
|
||||
exclusive_user_regex = "|".join("(" + r + ")" for r in exclusive_user_regexes)
|
||||
|
||||
@@ -22,19 +22,14 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.background_updates import BackgroundUpdateStore
|
||||
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
|
||||
|
||||
from ._base import Cache, db_to_json
|
||||
from ._base import Cache, SQLBaseStore, db_to_json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = (
|
||||
"drop_device_list_streams_non_unique_indexes"
|
||||
)
|
||||
|
||||
|
||||
class DeviceStore(BackgroundUpdateStore):
|
||||
class DeviceStore(SQLBaseStore):
|
||||
def __init__(self, db_conn, hs):
|
||||
super(DeviceStore, self).__init__(db_conn, hs)
|
||||
|
||||
@@ -57,30 +52,6 @@ class DeviceStore(BackgroundUpdateStore):
|
||||
columns=["user_id", "device_id"],
|
||||
)
|
||||
|
||||
# create a unique index on device_lists_remote_cache
|
||||
self.register_background_index_update(
|
||||
"device_lists_remote_cache_unique_idx",
|
||||
index_name="device_lists_remote_cache_unique_id",
|
||||
table="device_lists_remote_cache",
|
||||
columns=["user_id", "device_id"],
|
||||
unique=True,
|
||||
)
|
||||
|
||||
# And one on device_lists_remote_extremeties
|
||||
self.register_background_index_update(
|
||||
"device_lists_remote_extremeties_unique_idx",
|
||||
index_name="device_lists_remote_extremeties_unique_idx",
|
||||
table="device_lists_remote_extremeties",
|
||||
columns=["user_id"],
|
||||
unique=True,
|
||||
)
|
||||
|
||||
# once they complete, we can remove the old non-unique indexes.
|
||||
self.register_background_update_handler(
|
||||
DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES,
|
||||
self._drop_device_list_streams_non_unique_indexes,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def store_device(self, user_id, device_id,
|
||||
initial_device_display_name):
|
||||
@@ -268,19 +239,7 @@ class DeviceStore(BackgroundUpdateStore):
|
||||
|
||||
def update_remote_device_list_cache_entry(self, user_id, device_id, content,
|
||||
stream_id):
|
||||
"""Updates a single device in the cache of a remote user's devicelist.
|
||||
|
||||
Note: assumes that we are the only thread that can be updating this user's
|
||||
device list.
|
||||
|
||||
Args:
|
||||
user_id (str): User to update device list for
|
||||
device_id (str): ID of decivice being updated
|
||||
content (dict): new data on this device
|
||||
stream_id (int): the version of the device list
|
||||
|
||||
Returns:
|
||||
Deferred[None]
|
||||
"""Updates a single user's device in the cache.
|
||||
"""
|
||||
return self.runInteraction(
|
||||
"update_remote_device_list_cache_entry",
|
||||
@@ -313,11 +272,7 @@ class DeviceStore(BackgroundUpdateStore):
|
||||
},
|
||||
values={
|
||||
"content": json.dumps(content),
|
||||
},
|
||||
|
||||
# we don't need to lock, because we assume we are the only thread
|
||||
# updating this user's devices.
|
||||
lock=False,
|
||||
}
|
||||
)
|
||||
|
||||
txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id,))
|
||||
@@ -334,26 +289,11 @@ class DeviceStore(BackgroundUpdateStore):
|
||||
},
|
||||
values={
|
||||
"stream_id": stream_id,
|
||||
},
|
||||
|
||||
# again, we can assume we are the only thread updating this user's
|
||||
# extremity.
|
||||
lock=False,
|
||||
}
|
||||
)
|
||||
|
||||
def update_remote_device_list_cache(self, user_id, devices, stream_id):
|
||||
"""Replace the entire cache of the remote user's devices.
|
||||
|
||||
Note: assumes that we are the only thread that can be updating this user's
|
||||
device list.
|
||||
|
||||
Args:
|
||||
user_id (str): User to update device list for
|
||||
devices (list[dict]): list of device objects supplied over federation
|
||||
stream_id (int): the version of the device list
|
||||
|
||||
Returns:
|
||||
Deferred[None]
|
||||
"""Replace the cache of the remote user's devices.
|
||||
"""
|
||||
return self.runInteraction(
|
||||
"update_remote_device_list_cache",
|
||||
@@ -398,11 +338,7 @@ class DeviceStore(BackgroundUpdateStore):
|
||||
},
|
||||
values={
|
||||
"stream_id": stream_id,
|
||||
},
|
||||
|
||||
# we don't need to lock, because we can assume we are the only thread
|
||||
# updating this user's extremity.
|
||||
lock=False,
|
||||
}
|
||||
)
|
||||
|
||||
def get_devices_by_remote(self, destination, from_stream_id):
|
||||
@@ -653,14 +589,10 @@ class DeviceStore(BackgroundUpdateStore):
|
||||
combined list of changes to devices, and which destinations need to be
|
||||
poked. `destination` may be None if no destinations need to be poked.
|
||||
"""
|
||||
# We do a group by here as there can be a large number of duplicate
|
||||
# entries, since we throw away device IDs.
|
||||
sql = """
|
||||
SELECT MAX(stream_id) AS stream_id, user_id, destination
|
||||
FROM device_lists_stream
|
||||
SELECT stream_id, user_id, destination FROM device_lists_stream
|
||||
LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id)
|
||||
WHERE ? < stream_id AND stream_id <= ?
|
||||
GROUP BY user_id, destination
|
||||
"""
|
||||
return self._execute(
|
||||
"get_all_device_list_changes_for_remotes", None,
|
||||
@@ -786,19 +718,3 @@ class DeviceStore(BackgroundUpdateStore):
|
||||
"_prune_old_outbound_device_pokes",
|
||||
_prune_txn,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size):
|
||||
def f(conn):
|
||||
txn = conn.cursor()
|
||||
txn.execute(
|
||||
"DROP INDEX IF EXISTS device_lists_remote_cache_id"
|
||||
)
|
||||
txn.execute(
|
||||
"DROP INDEX IF EXISTS device_lists_remote_extremeties_id"
|
||||
)
|
||||
txn.close()
|
||||
|
||||
yield self.runWithConnection(f)
|
||||
yield self._end_background_update(DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES)
|
||||
defer.returnValue(1)
|
||||
|
||||
@@ -118,11 +118,6 @@ class EndToEndRoomKeyStore(SQLBaseStore):
|
||||
these room keys.
|
||||
"""
|
||||
|
||||
try:
|
||||
version = int(version)
|
||||
except ValueError:
|
||||
defer.returnValue({'rooms': {}})
|
||||
|
||||
keyvalues = {
|
||||
"user_id": user_id,
|
||||
"version": version,
|
||||
@@ -217,23 +212,14 @@ class EndToEndRoomKeyStore(SQLBaseStore):
|
||||
Raises:
|
||||
StoreError: with code 404 if there are no e2e_room_keys_versions present
|
||||
Returns:
|
||||
A deferred dict giving the info metadata for this backup version, with
|
||||
fields including:
|
||||
version(str)
|
||||
algorithm(str)
|
||||
auth_data(object): opaque dict supplied by the client
|
||||
A deferred dict giving the info metadata for this backup version
|
||||
"""
|
||||
|
||||
def _get_e2e_room_keys_version_info_txn(txn):
|
||||
if version is None:
|
||||
this_version = self._get_current_version(txn, user_id)
|
||||
else:
|
||||
try:
|
||||
this_version = int(version)
|
||||
except ValueError:
|
||||
# Our versions are all ints so if we can't convert it to an integer,
|
||||
# it isn't there.
|
||||
raise StoreError(404, "No row found")
|
||||
this_version = version
|
||||
|
||||
result = self._simple_select_one_txn(
|
||||
txn,
|
||||
@@ -250,7 +236,6 @@ class EndToEndRoomKeyStore(SQLBaseStore):
|
||||
),
|
||||
)
|
||||
result["auth_data"] = json.loads(result["auth_data"])
|
||||
result["version"] = str(result["version"])
|
||||
return result
|
||||
|
||||
return self.runInteraction(
|
||||
|
||||
@@ -40,10 +40,7 @@ class EndToEndKeyStore(SQLBaseStore):
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
# In py3 we need old_key_json to match new_key_json type. The DB
|
||||
# returns unicode while encode_canonical_json returns bytes.
|
||||
new_key_json = encode_canonical_json(device_keys).decode("utf-8")
|
||||
|
||||
new_key_json = encode_canonical_json(device_keys)
|
||||
if old_key_json == new_key_json:
|
||||
return False
|
||||
|
||||
|
||||
@@ -477,7 +477,7 @@ class EventFederationStore(EventFederationWorkerStore):
|
||||
"is_state": False,
|
||||
}
|
||||
for ev in events
|
||||
for e_id in ev.prev_event_ids()
|
||||
for e_id, _ in ev.prev_events
|
||||
],
|
||||
)
|
||||
|
||||
@@ -510,7 +510,7 @@ class EventFederationStore(EventFederationWorkerStore):
|
||||
|
||||
txn.executemany(query, [
|
||||
(e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id, False)
|
||||
for ev in events for e_id in ev.prev_event_ids()
|
||||
for ev in events for e_id, _ in ev.prev_events
|
||||
if not ev.internal_metadata.is_outlier()
|
||||
])
|
||||
|
||||
|
||||
+49
-112
@@ -38,7 +38,6 @@ from synapse.state import StateResolutionStore
|
||||
from synapse.storage.background_updates import BackgroundUpdateStore
|
||||
from synapse.storage.event_federation import EventFederationStore
|
||||
from synapse.storage.events_worker import EventsWorkerStore
|
||||
from synapse.storage.state import StateGroupWorkerStore
|
||||
from synapse.types import RoomStreamToken, get_domain_from_id
|
||||
from synapse.util import batch_iter
|
||||
from synapse.util.async_helpers import ObservableDeferred
|
||||
@@ -206,8 +205,7 @@ def _retry_on_integrity_error(func):
|
||||
|
||||
# inherits from EventFederationStore so that we can call _update_backward_extremities
|
||||
# and _handle_mult_prev_events (though arguably those could both be moved in here)
|
||||
class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore,
|
||||
BackgroundUpdateStore):
|
||||
class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore):
|
||||
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
|
||||
EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
|
||||
|
||||
@@ -416,7 +414,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
|
||||
)
|
||||
if len_1:
|
||||
all_single_prev_not_state = all(
|
||||
len(event.prev_event_ids()) == 1
|
||||
len(event.prev_events) == 1
|
||||
and not event.is_state()
|
||||
for event, ctx in ev_ctx_rm
|
||||
)
|
||||
@@ -440,7 +438,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
|
||||
# guess this by looking at the prev_events and checking
|
||||
# if they match the current forward extremities.
|
||||
for ev, _ in ev_ctx_rm:
|
||||
prev_event_ids = set(ev.prev_event_ids())
|
||||
prev_event_ids = set(e for e, _ in ev.prev_events)
|
||||
if latest_event_ids == prev_event_ids:
|
||||
state_delta_reuse_delta_counter.inc()
|
||||
break
|
||||
@@ -551,7 +549,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
|
||||
result.difference_update(
|
||||
e_id
|
||||
for event in new_events
|
||||
for e_id in event.prev_event_ids()
|
||||
for e_id, _ in event.prev_events
|
||||
)
|
||||
|
||||
# Finally, remove any events which are prev_events of any existing events.
|
||||
@@ -869,7 +867,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
|
||||
"auth_id": auth_id,
|
||||
}
|
||||
for event, _ in events_and_contexts
|
||||
for auth_id in event.auth_event_ids()
|
||||
for auth_id, _ in event.auth_events
|
||||
if event.is_state()
|
||||
],
|
||||
)
|
||||
@@ -2036,37 +2034,55 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
|
||||
|
||||
logger.info("[purge] finding redundant state groups")
|
||||
|
||||
# Get all state groups that are referenced by events that are to be
|
||||
# deleted. We then go and check if they are referenced by other events
|
||||
# or state groups, and if not we delete them.
|
||||
# Get all state groups that are only referenced by events that are
|
||||
# to be deleted.
|
||||
# This works by first getting state groups that we may want to delete,
|
||||
# joining against event_to_state_groups to get events that use that
|
||||
# state group, then left joining against events_to_purge again. Any
|
||||
# state group where the left join produce *no nulls* are referenced
|
||||
# only by events that are going to be purged.
|
||||
txn.execute("""
|
||||
SELECT DISTINCT state_group FROM events_to_purge
|
||||
INNER JOIN event_to_state_groups USING (event_id)
|
||||
SELECT state_group FROM
|
||||
(
|
||||
SELECT DISTINCT state_group FROM events_to_purge
|
||||
INNER JOIN event_to_state_groups USING (event_id)
|
||||
) AS sp
|
||||
INNER JOIN event_to_state_groups USING (state_group)
|
||||
LEFT JOIN events_to_purge AS ep USING (event_id)
|
||||
GROUP BY state_group
|
||||
HAVING SUM(CASE WHEN ep.event_id IS NULL THEN 1 ELSE 0 END) = 0
|
||||
""")
|
||||
|
||||
referenced_state_groups = set(sg for sg, in txn)
|
||||
logger.info(
|
||||
"[purge] found %i referenced state groups",
|
||||
len(referenced_state_groups),
|
||||
)
|
||||
state_rows = txn.fetchall()
|
||||
logger.info("[purge] found %i redundant state groups", len(state_rows))
|
||||
|
||||
logger.info("[purge] finding state groups that can be deleted")
|
||||
# make a set of the redundant state groups, so that we can look them up
|
||||
# efficiently
|
||||
state_groups_to_delete = set([sg for sg, in state_rows])
|
||||
|
||||
state_groups_to_delete, remaining_state_groups = (
|
||||
self._find_unreferenced_groups_during_purge(
|
||||
txn, referenced_state_groups,
|
||||
# Now we get all the state groups that rely on these state groups
|
||||
logger.info("[purge] finding state groups which depend on redundant"
|
||||
" state groups")
|
||||
remaining_state_groups = []
|
||||
for i in range(0, len(state_rows), 100):
|
||||
chunk = [sg for sg, in state_rows[i:i + 100]]
|
||||
# look for state groups whose prev_state_group is one we are about
|
||||
# to delete
|
||||
rows = self._simple_select_many_txn(
|
||||
txn,
|
||||
table="state_group_edges",
|
||||
column="prev_state_group",
|
||||
iterable=chunk,
|
||||
retcols=["state_group"],
|
||||
keyvalues={},
|
||||
)
|
||||
)
|
||||
remaining_state_groups.extend(
|
||||
row["state_group"] for row in rows
|
||||
|
||||
logger.info(
|
||||
"[purge] found %i state groups to delete",
|
||||
len(state_groups_to_delete),
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"[purge] de-delta-ing %i remaining state groups",
|
||||
len(remaining_state_groups),
|
||||
)
|
||||
# exclude state groups we are about to delete: no point in
|
||||
# updating them
|
||||
if row["state_group"] not in state_groups_to_delete
|
||||
)
|
||||
|
||||
# Now we turn the state groups that reference to-be-deleted state
|
||||
# groups to non delta versions.
|
||||
@@ -2111,11 +2127,11 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
|
||||
logger.info("[purge] removing redundant state groups")
|
||||
txn.executemany(
|
||||
"DELETE FROM state_groups_state WHERE state_group = ?",
|
||||
((sg,) for sg in state_groups_to_delete),
|
||||
state_rows
|
||||
)
|
||||
txn.executemany(
|
||||
"DELETE FROM state_groups WHERE id = ?",
|
||||
((sg,) for sg in state_groups_to_delete),
|
||||
state_rows
|
||||
)
|
||||
|
||||
logger.info("[purge] removing events from event_to_state_groups")
|
||||
@@ -2211,85 +2227,6 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore
|
||||
|
||||
logger.info("[purge] done")
|
||||
|
||||
def _find_unreferenced_groups_during_purge(self, txn, state_groups):
|
||||
"""Used when purging history to figure out which state groups can be
|
||||
deleted and which need to be de-delta'ed (due to one of its prev groups
|
||||
being scheduled for deletion).
|
||||
|
||||
Args:
|
||||
txn
|
||||
state_groups (set[int]): Set of state groups referenced by events
|
||||
that are going to be deleted.
|
||||
|
||||
Returns:
|
||||
tuple[set[int], set[int]]: The set of state groups that can be
|
||||
deleted and the set of state groups that need to be de-delta'ed
|
||||
"""
|
||||
# Graph of state group -> previous group
|
||||
graph = {}
|
||||
|
||||
# Set of events that we have found to be referenced by events
|
||||
referenced_groups = set()
|
||||
|
||||
# Set of state groups we've already seen
|
||||
state_groups_seen = set(state_groups)
|
||||
|
||||
# Set of state groups to handle next.
|
||||
next_to_search = set(state_groups)
|
||||
while next_to_search:
|
||||
# We bound size of groups we're looking up at once, to stop the
|
||||
# SQL query getting too big
|
||||
if len(next_to_search) < 100:
|
||||
current_search = next_to_search
|
||||
next_to_search = set()
|
||||
else:
|
||||
current_search = set(itertools.islice(next_to_search, 100))
|
||||
next_to_search -= current_search
|
||||
|
||||
# Check if state groups are referenced
|
||||
sql = """
|
||||
SELECT DISTINCT state_group FROM event_to_state_groups
|
||||
LEFT JOIN events_to_purge AS ep USING (event_id)
|
||||
WHERE state_group IN (%s) AND ep.event_id IS NULL
|
||||
""" % (",".join("?" for _ in current_search),)
|
||||
txn.execute(sql, list(current_search))
|
||||
|
||||
referenced = set(sg for sg, in txn)
|
||||
referenced_groups |= referenced
|
||||
|
||||
# We don't continue iterating up the state group graphs for state
|
||||
# groups that are referenced.
|
||||
current_search -= referenced
|
||||
|
||||
rows = self._simple_select_many_txn(
|
||||
txn,
|
||||
table="state_group_edges",
|
||||
column="prev_state_group",
|
||||
iterable=current_search,
|
||||
keyvalues={},
|
||||
retcols=("prev_state_group", "state_group",),
|
||||
)
|
||||
|
||||
prevs = set(row["state_group"] for row in rows)
|
||||
# We don't bother re-handling groups we've already seen
|
||||
prevs -= state_groups_seen
|
||||
next_to_search |= prevs
|
||||
state_groups_seen |= prevs
|
||||
|
||||
for row in rows:
|
||||
# Note: Each state group can have at most one prev group
|
||||
graph[row["state_group"]] = row["prev_state_group"]
|
||||
|
||||
to_delete = state_groups_seen - referenced_groups
|
||||
|
||||
to_dedelta = set()
|
||||
for sg in referenced_groups:
|
||||
prev_sg = graph.get(sg)
|
||||
if prev_sg and prev_sg in to_delete:
|
||||
to_dedelta.add(sg)
|
||||
|
||||
return to_delete, to_dedelta
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def is_event_after(self, event_id1, event_id2):
|
||||
"""Returns True if event_id1 is after event_id2 in the stream
|
||||
|
||||
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# Remember to update this number every time a change is made to database
|
||||
# schema files, so the users will be informed on server restarts.
|
||||
SCHEMA_VERSION = 52
|
||||
SCHEMA_VERSION = 51
|
||||
|
||||
dir_path = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
+84
-13
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,6 +21,8 @@ from synapse.storage.roommember import ProfileInfo
|
||||
|
||||
from ._base import SQLBaseStore
|
||||
|
||||
BATCH_SIZE = 100
|
||||
|
||||
|
||||
class ProfileWorkerStore(SQLBaseStore):
|
||||
@defer.inlineCallbacks
|
||||
@@ -62,6 +65,55 @@ class ProfileWorkerStore(SQLBaseStore):
|
||||
desc="get_profile_avatar_url",
|
||||
)
|
||||
|
||||
def get_latest_profile_replication_batch_number(self):
|
||||
def f(txn):
|
||||
txn.execute("SELECT MAX(batch) as maxbatch FROM profiles")
|
||||
rows = self.cursor_to_dict(txn)
|
||||
return rows[0]['maxbatch']
|
||||
return self.runInteraction(
|
||||
"get_latest_profile_replication_batch_number", f,
|
||||
)
|
||||
|
||||
def get_profile_batch(self, batchnum):
|
||||
return self._simple_select_list(
|
||||
table="profiles",
|
||||
keyvalues={
|
||||
"batch": batchnum,
|
||||
},
|
||||
retcols=("user_id", "displayname", "avatar_url", "active"),
|
||||
desc="get_profile_batch",
|
||||
)
|
||||
|
||||
def assign_profile_batch(self):
|
||||
def f(txn):
|
||||
sql = (
|
||||
"UPDATE profiles SET batch = "
|
||||
"(SELECT COALESCE(MAX(batch), -1) + 1 FROM profiles) "
|
||||
"WHERE user_id in ("
|
||||
" SELECT user_id FROM profiles WHERE batch is NULL limit ?"
|
||||
")"
|
||||
)
|
||||
txn.execute(sql, (BATCH_SIZE,))
|
||||
return txn.rowcount
|
||||
return self.runInteraction("assign_profile_batch", f)
|
||||
|
||||
def get_replication_hosts(self):
|
||||
def f(txn):
|
||||
txn.execute("SELECT host, last_synced_batch FROM profile_replication_status")
|
||||
rows = self.cursor_to_dict(txn)
|
||||
return {r['host']: r['last_synced_batch'] for r in rows}
|
||||
return self.runInteraction("get_replication_hosts", f)
|
||||
|
||||
def update_replication_batch_for_host(self, host, last_synced_batch):
|
||||
return self._simple_upsert(
|
||||
table="profile_replication_status",
|
||||
keyvalues={"host": host},
|
||||
values={
|
||||
"last_synced_batch": last_synced_batch,
|
||||
},
|
||||
desc="update_replication_batch_for_host",
|
||||
)
|
||||
|
||||
def get_from_remote_profile_cache(self, user_id):
|
||||
return self._simple_select_one(
|
||||
table="remote_profile_cache",
|
||||
@@ -71,27 +123,46 @@ class ProfileWorkerStore(SQLBaseStore):
|
||||
desc="get_from_remote_profile_cache",
|
||||
)
|
||||
|
||||
def create_profile(self, user_localpart):
|
||||
return self._simple_insert(
|
||||
table="profiles",
|
||||
values={"user_id": user_localpart},
|
||||
desc="create_profile",
|
||||
)
|
||||
|
||||
def set_profile_displayname(self, user_localpart, new_displayname):
|
||||
return self._simple_update_one(
|
||||
def set_profile_displayname(self, user_localpart, new_displayname, batchnum):
|
||||
return self._simple_upsert(
|
||||
table="profiles",
|
||||
keyvalues={"user_id": user_localpart},
|
||||
updatevalues={"displayname": new_displayname},
|
||||
values={
|
||||
"displayname": new_displayname,
|
||||
"batch": batchnum,
|
||||
},
|
||||
desc="set_profile_displayname",
|
||||
lock=False # we can do this because user_id has a unique index
|
||||
)
|
||||
|
||||
def set_profile_avatar_url(self, user_localpart, new_avatar_url):
|
||||
return self._simple_update_one(
|
||||
def set_profile_avatar_url(self, user_localpart, new_avatar_url, batchnum):
|
||||
return self._simple_upsert(
|
||||
table="profiles",
|
||||
keyvalues={"user_id": user_localpart},
|
||||
updatevalues={"avatar_url": new_avatar_url},
|
||||
values={
|
||||
"avatar_url": new_avatar_url,
|
||||
"batch": batchnum,
|
||||
},
|
||||
desc="set_profile_avatar_url",
|
||||
lock=False # we can do this because user_id has a unique index
|
||||
)
|
||||
|
||||
def set_profile_active(self, user_localpart, active, hide, batchnum):
|
||||
values = {
|
||||
"active": int(active),
|
||||
"batch": batchnum,
|
||||
}
|
||||
if not active and not hide:
|
||||
# we are deactivating for real (not in hide mode)
|
||||
# so clear the profile.
|
||||
values["avatar_url"] = None
|
||||
values["displayname"] = None
|
||||
return self._simple_upsert(
|
||||
table="profiles",
|
||||
keyvalues={"user_id": user_localpart},
|
||||
values=values,
|
||||
desc="set_profile_active",
|
||||
lock=False # we can do this because user_id has a unique index
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -167,7 +167,7 @@ class RegistrationStore(RegistrationWorkerStore,
|
||||
|
||||
def register(self, user_id, token=None, password_hash=None,
|
||||
was_guest=False, make_guest=False, appservice_id=None,
|
||||
create_profile_with_localpart=None, admin=False):
|
||||
admin=False):
|
||||
"""Attempts to register an account.
|
||||
|
||||
Args:
|
||||
@@ -181,8 +181,6 @@ class RegistrationStore(RegistrationWorkerStore,
|
||||
make_guest (boolean): True if the the new user should be guest,
|
||||
false to add a regular user account.
|
||||
appservice_id (str): The ID of the appservice registering the user.
|
||||
create_profile_with_localpart (str): Optionally create a profile for
|
||||
the given localpart.
|
||||
Raises:
|
||||
StoreError if the user_id could not be registered.
|
||||
"""
|
||||
@@ -195,7 +193,6 @@ class RegistrationStore(RegistrationWorkerStore,
|
||||
was_guest,
|
||||
make_guest,
|
||||
appservice_id,
|
||||
create_profile_with_localpart,
|
||||
admin
|
||||
)
|
||||
|
||||
@@ -208,7 +205,6 @@ class RegistrationStore(RegistrationWorkerStore,
|
||||
was_guest,
|
||||
make_guest,
|
||||
appservice_id,
|
||||
create_profile_with_localpart,
|
||||
admin,
|
||||
):
|
||||
now = int(self.clock.time())
|
||||
@@ -273,14 +269,6 @@ class RegistrationStore(RegistrationWorkerStore,
|
||||
(next_id, user_id, token,)
|
||||
)
|
||||
|
||||
if create_profile_with_localpart:
|
||||
# set a default displayname serverside to avoid ugly race
|
||||
# between auto-joins and clients trying to set displaynames
|
||||
txn.execute(
|
||||
"INSERT INTO profiles(user_id, displayname) VALUES (?,?)",
|
||||
(create_profile_with_localpart, create_profile_with_localpart)
|
||||
)
|
||||
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_user_by_id, (user_id,)
|
||||
)
|
||||
|
||||
@@ -47,7 +47,7 @@ class RoomWorkerStore(SQLBaseStore):
|
||||
Args:
|
||||
room_id (str): The ID of the room to retrieve.
|
||||
Returns:
|
||||
A dict containing the room information, or None if the room is unknown.
|
||||
A namedtuple containing the room information, or an empty list.
|
||||
"""
|
||||
return self._simple_select_one(
|
||||
table="rooms",
|
||||
|
||||
@@ -20,6 +20,9 @@ CREATE TABLE device_lists_remote_cache (
|
||||
content TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX device_lists_remote_cache_id ON device_lists_remote_cache(user_id, device_id);
|
||||
|
||||
|
||||
-- The last update we got for a user. Empty if we're not receiving updates for
|
||||
-- that user.
|
||||
CREATE TABLE device_lists_remote_extremeties (
|
||||
@@ -27,11 +30,7 @@ CREATE TABLE device_lists_remote_extremeties (
|
||||
stream_id TEXT NOT NULL
|
||||
);
|
||||
|
||||
-- we used to create non-unique indexes on these tables, but as of update 52 we create
|
||||
-- unique indexes concurrently:
|
||||
--
|
||||
-- CREATE INDEX device_lists_remote_cache_id ON device_lists_remote_cache(user_id, device_id);
|
||||
-- CREATE INDEX device_lists_remote_extremeties_id ON device_lists_remote_extremeties(user_id, stream_id);
|
||||
CREATE INDEX device_lists_remote_extremeties_id ON device_lists_remote_extremeties(user_id, stream_id);
|
||||
|
||||
|
||||
-- Stream of device lists updates. Includes both local and remotes
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
/* Copyright 2018 New Vector Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Add a batch number to track changes to profiles and the
|
||||
* order they're made in so we can replicate user profiles
|
||||
* to other hosts as they change
|
||||
*/
|
||||
ALTER TABLE profiles ADD COLUMN batch BIGINT DEFAULT NULL;
|
||||
|
||||
/*
|
||||
* Index on the batch number so we can get profiles
|
||||
* by their batch
|
||||
*/
|
||||
CREATE INDEX profiles_batch_idx ON profiles(batch);
|
||||
|
||||
/*
|
||||
* A table to track what batch of user profiles has been
|
||||
* synced to what profile replication target.
|
||||
*/
|
||||
CREATE TABLE profile_replication_status (
|
||||
host TEXT NOT NULL,
|
||||
last_synced_batch BIGINT NOT NULL
|
||||
);
|
||||
+8
-4
@@ -13,7 +13,11 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- This is needed to efficiently check for unreferenced state groups during
|
||||
-- purge. Added events_to_state_group(state_group) index
|
||||
INSERT into background_updates (update_name, progress_json)
|
||||
VALUES ('event_to_state_groups_sg_index', '{}');
|
||||
/*
|
||||
* A flag saying whether the user owning the profile has been deactivated
|
||||
* This really belongs on the users table, not here, but the users table
|
||||
* stores users by their full user_id and profiles stores them by localpart,
|
||||
* so we can't easily join between the two tables. Plus, the batch number
|
||||
* realy ought to represent data in this table that has changed.
|
||||
*/
|
||||
ALTER TABLE profiles ADD COLUMN active SMALLINT DEFAULT 1 NOT NULL;
|
||||
@@ -1,36 +0,0 @@
|
||||
/* Copyright 2018 New Vector Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- register a background update which will create a unique index on
|
||||
-- device_lists_remote_cache
|
||||
INSERT into background_updates (update_name, progress_json)
|
||||
VALUES ('device_lists_remote_cache_unique_idx', '{}');
|
||||
|
||||
-- and one on device_lists_remote_extremeties
|
||||
INSERT into background_updates (update_name, progress_json, depends_on)
|
||||
VALUES (
|
||||
'device_lists_remote_extremeties_unique_idx', '{}',
|
||||
|
||||
-- doesn't really depend on this, but we need to make sure both happen
|
||||
-- before we drop the old indexes.
|
||||
'device_lists_remote_cache_unique_idx'
|
||||
);
|
||||
|
||||
-- once they complete, we can drop the old indexes.
|
||||
INSERT into background_updates (update_name, progress_json, depends_on)
|
||||
VALUES (
|
||||
'drop_device_list_streams_non_unique_indexes', '{}',
|
||||
'device_lists_remote_extremeties_unique_idx'
|
||||
);
|
||||
@@ -1,53 +0,0 @@
|
||||
/* Copyright 2018 New Vector Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* Change version column to an integer so we can do MAX() sensibly
|
||||
*/
|
||||
CREATE TABLE e2e_room_keys_versions_new (
|
||||
user_id TEXT NOT NULL,
|
||||
version BIGINT NOT NULL,
|
||||
algorithm TEXT NOT NULL,
|
||||
auth_data TEXT NOT NULL,
|
||||
deleted SMALLINT DEFAULT 0 NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO e2e_room_keys_versions_new
|
||||
SELECT user_id, CAST(version as BIGINT), algorithm, auth_data, deleted FROM e2e_room_keys_versions;
|
||||
|
||||
DROP TABLE e2e_room_keys_versions;
|
||||
ALTER TABLE e2e_room_keys_versions_new RENAME TO e2e_room_keys_versions;
|
||||
|
||||
CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version);
|
||||
|
||||
/* Change e2e_rooms_keys to match
|
||||
*/
|
||||
CREATE TABLE e2e_room_keys_new (
|
||||
user_id TEXT NOT NULL,
|
||||
room_id TEXT NOT NULL,
|
||||
session_id TEXT NOT NULL,
|
||||
version BIGINT NOT NULL,
|
||||
first_message_index INT,
|
||||
forwarded_count INT,
|
||||
is_verified BOOLEAN,
|
||||
session_data TEXT NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO e2e_room_keys_new
|
||||
SELECT user_id, room_id, session_id, CAST(version as BIGINT), first_message_index, forwarded_count, is_verified, session_data FROM e2e_room_keys;
|
||||
|
||||
DROP TABLE e2e_room_keys;
|
||||
ALTER TABLE e2e_room_keys_new RENAME TO e2e_room_keys;
|
||||
|
||||
CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id);
|
||||
@@ -1257,7 +1257,6 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
|
||||
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
|
||||
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
|
||||
CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
|
||||
EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
|
||||
|
||||
def __init__(self, db_conn, hs):
|
||||
super(StateStore, self).__init__(db_conn, hs)
|
||||
@@ -1276,12 +1275,6 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
|
||||
columns=["state_key"],
|
||||
where_clause="type='m.room.member'",
|
||||
)
|
||||
self.register_background_index_update(
|
||||
self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME,
|
||||
index_name="event_to_state_groups_sg_index",
|
||||
table="event_to_state_groups",
|
||||
columns=["state_group"],
|
||||
)
|
||||
|
||||
def _store_event_state_mappings_txn(self, txn, events_and_contexts):
|
||||
state_groups = {}
|
||||
|
||||
@@ -228,6 +228,18 @@ def contains_invalid_mxid_characters(localpart):
|
||||
return any(c not in mxid_localpart_allowed_characters for c in localpart)
|
||||
|
||||
|
||||
def strip_invalid_mxid_characters(localpart):
|
||||
"""Removes any invalid characters from an mxid
|
||||
|
||||
Args:
|
||||
localpart (basestring): the localpart to be stripped
|
||||
|
||||
Returns:
|
||||
localpart (basestring): the localpart having been stripped
|
||||
"""
|
||||
return filter(lambda c: c in mxid_localpart_allowed_characters, localpart)
|
||||
|
||||
|
||||
class StreamToken(
|
||||
namedtuple("Token", (
|
||||
"room_key",
|
||||
|
||||
@@ -16,9 +16,12 @@
|
||||
import logging
|
||||
import re
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_3pid_allowed(hs, medium, address):
|
||||
"""Checks whether a given format of 3PID is allowed to be used on this HS
|
||||
|
||||
@@ -28,9 +31,22 @@ def check_3pid_allowed(hs, medium, address):
|
||||
address (str): address within that medium (e.g. "wotan@matrix.org")
|
||||
msisdns need to first have been canonicalised
|
||||
Returns:
|
||||
bool: whether the 3PID medium/address is allowed to be added to this HS
|
||||
defered bool: whether the 3PID medium/address is allowed to be added to this HS
|
||||
"""
|
||||
|
||||
if hs.config.check_is_for_allowed_local_3pids:
|
||||
data = yield hs.get_simple_http_client().get_json(
|
||||
"https://%s%s" % (
|
||||
hs.config.check_is_for_allowed_local_3pids,
|
||||
"/_matrix/identity/api/v1/info"
|
||||
),
|
||||
{'medium': medium, 'address': address}
|
||||
)
|
||||
if hs.config.allow_invited_3pids and data.get('invited'):
|
||||
defer.returnValue(True)
|
||||
else:
|
||||
defer.returnValue(data['hs'] == hs.config.server_name)
|
||||
|
||||
if hs.config.allowed_local_3pids:
|
||||
for constraint in hs.config.allowed_local_3pids:
|
||||
logger.debug(
|
||||
@@ -41,8 +57,8 @@ def check_3pid_allowed(hs, medium, address):
|
||||
medium == constraint['medium'] and
|
||||
re.match(constraint['pattern'], address)
|
||||
):
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
else:
|
||||
return True
|
||||
defer.returnValue(True)
|
||||
|
||||
return False
|
||||
defer.returnValue(False)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user