1
0

Compare commits

...

1022 Commits

Author SHA1 Message Date
Erik Johnston
5fbdf2bcec Merge branch 'release-v0.14.0' of github.com:matrix-org/synapse 2016-03-30 12:36:40 +01:00
Erik Johnston
e7aaa7c61e Bump version and changelog 2016-03-30 12:35:25 +01:00
Erik Johnston
a932acaa6b Merge pull request #672 from nikriek/new-author
Add new author
2016-03-29 23:19:52 +01:00
Niklas Riekenbrauck
82312d4fff Add new author 2016-03-29 23:42:15 +02:00
Erik Johnston
15844040c2 Bump version and changelog 2016-03-23 16:57:41 +00:00
Erik Johnston
7a3815b372 Merge branch 'develop' of github.com:matrix-org/synapse into release-v0.14.0 2016-03-23 16:55:29 +00:00
Erik Johnston
647b041d1a Merge pull request #666 from matrix-org/erikj/intern
Intern lots of strings
2016-03-23 16:54:59 +00:00
Erik Johnston
8122ad7bab Simplify intern_dict 2016-03-23 16:41:54 +00:00
Erik Johnston
2f0180b09e Don't bother interning keys that are already interned 2016-03-23 16:29:46 +00:00
Erik Johnston
acdfef7b14 Intern all the things 2016-03-23 16:25:54 +00:00
Erik Johnston
f96526ffc2 Intern sender, event_id and room_id in events 2016-03-23 15:04:11 +00:00
Erik Johnston
fe9794706a Intern type and state_key on events 2016-03-23 14:58:08 +00:00
Erik Johnston
75daede92f String intern 2016-03-23 14:53:53 +00:00
Erik Johnston
fbdeb1778d Merge pull request #664 from matrix-org/erikj/public_room_list
Don't require alias in public room list.
2016-03-23 14:42:01 +00:00
Erik Johnston
b275765545 Comment about weird SQL 2016-03-23 14:15:32 +00:00
Erik Johnston
0c1a27b787 SQLite and postgres doesn't share a true literal 2016-03-23 14:10:49 +00:00
Erik Johnston
84afeb41f3 Ensure all old public rooms have aliases 2016-03-23 13:59:34 +00:00
Erik Johnston
b2802a1351 Ensure published rooms have public join rules 2016-03-23 13:59:31 +00:00
Erik Johnston
0677fc1c4e Comment 2016-03-23 13:25:22 +00:00
Erik Johnston
2749da542c Merge pull request #663 from matrix-org/erikj/invite_for_user
Make get_invites return RoomsForUser
2016-03-23 13:19:26 +00:00
Erik Johnston
e14baa7a3b Merge pull request #665 from matrix-org/erikj/dont_cache_events
Only cache events in the event cache
2016-03-23 13:19:12 +00:00
Erik Johnston
0e7363e0b3 Merge pull request #662 from matrix-org/erikj/state_cache
Make StateHandler._state_cache only store event_ids.
2016-03-23 11:43:03 +00:00
Erik Johnston
d87a846ebc Don't cache events in get_recent_events_for_room 2016-03-23 11:42:50 +00:00
Erik Johnston
8b0dfc9fc4 Don't cache events in get_current_state_for_key 2016-03-23 11:42:17 +00:00
Erik Johnston
34473a9c7f Don't require alias in public room list.
Rooms now no longer require an alias to be published.

Also, changes the way we pull out state of each room to not require
fetching all state events.
2016-03-23 10:42:19 +00:00
Erik Johnston
b6507869cd Make get_invites return RoomsForUser 2016-03-23 10:32:10 +00:00
Erik Johnston
9e2e994395 Reduce cache size 2016-03-23 09:28:07 +00:00
Erik Johnston
d531ebcb57 Key StateHandler._state_cache off of state groups 2016-03-22 18:02:36 +00:00
Erik Johnston
c4a8cbd15a Make LruCache use a dedicated _Node class 2016-03-22 16:06:21 +00:00
Erik Johnston
99f929f36b Make StateHandler._state_cache only store event_ids. 2016-03-22 16:06:04 +00:00
Erik Johnston
d787e41b20 Measure StateHandler._resolve_events 2016-03-22 14:44:48 +00:00
Erik Johnston
6cf0ba1466 Bump get_unread_event_push_actions_by_room_for_user cache 2016-03-22 14:18:21 +00:00
Erik Johnston
76d18a5776 Bump get_aliases_for_room cache 2016-03-22 14:08:13 +00:00
Mark Haines
cd9ba1ed89 Merge pull request #661 from matrix-org/markjh/member_count
Fix membership count for BulkPushRuleEvaluator
2016-03-22 13:59:56 +00:00
Mark Haines
5defb25ac6 Use get_users_in_room to count the number of room members rather than using read_receipts 2016-03-22 13:52:45 +00:00
Erik Johnston
fa2f96c2e3 Merge pull request #660 from matrix-org/erikj/state_cache
Don't cache events in _state_group_cache
2016-03-22 13:12:06 +00:00
Erik Johnston
f93304e77f Merge pull request #659 from matrix-org/erikj/state_cache_factor
Make stateGroupCache honour CACHE_SIZE_FACTOR
2016-03-22 13:12:01 +00:00
Erik Johnston
2c86187a1b Don't cache events in _state_group_cache
Instead, simply cache the event ids, relying on the event cache to cache
the actual events.

The problem was that while the state groups cache was limited in the
number of groups it could hold, each individual group could consist of
thousands of events.
2016-03-22 12:00:09 +00:00
Erik Johnston
d6ac752538 Merge pull request #657 from matrix-org/erikj/roomlist
Add published room list edit API
2016-03-22 11:57:39 +00:00
Erik Johnston
97785bfc0f Doc string 2016-03-22 10:41:44 +00:00
Erik Johnston
b591277620 Make stateGroupCache honour CACHE_SIZE_FACTOR 2016-03-22 10:32:50 +00:00
Mark Haines
63137bb901 Merge pull request #658 from matrix-org/markjh/cleanup
Remove some unused parameters from persist_event
2016-03-22 10:18:39 +00:00
Matthew Hodgson
d3654694d0 an invalide is something else... 2016-03-22 00:52:31 +00:00
Mark Haines
5244c0b48e Remove unused backfilled parameter from persist_event 2016-03-21 18:06:08 +00:00
Erik Johnston
3e7fac0d56 Add published room list edit API 2016-03-21 15:06:07 +00:00
Mark Haines
58f8226c7f remove unused current_state variable from on_receive_pdu 2016-03-21 14:20:34 +00:00
Erik Johnston
e4054abfdc Merge pull request #655 from matrix-org/erikj/edu_yield
Yield on EDU handling
2016-03-18 15:23:20 +00:00
Erik Johnston
9adf0e92bc Catch exceptions from EDU handling 2016-03-18 15:12:50 +00:00
Erik Johnston
1660145a08 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/edu_yield 2016-03-18 15:02:43 +00:00
Erik Johnston
7f79a6405b Merge pull request #656 from matrix-org/erikj/get_event
Dedupe requested event list in _get_events
2016-03-18 14:53:13 +00:00
Erik Johnston
8595ff7842 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/get_event 2016-03-18 14:47:19 +00:00
Erik Johnston
58e207cd77 Don't assume existence of event_id in __str__ 2016-03-18 14:31:44 +00:00
Erik Johnston
67ed8065db Dedupe requested event list in _get_events 2016-03-18 14:31:31 +00:00
Erik Johnston
916227b4df Merge pull request #652 from matrix-org/erikj/delete_alias
Update aliases event after deletion
2016-03-18 14:02:46 +00:00
Erik Johnston
3c5f25507b Yield on EDU handling 2016-03-18 13:55:16 +00:00
Erik Johnston
56aa4e7a9a Check canonical alias event exists 2016-03-17 15:24:19 +00:00
David Baker
384ee6eafb Merge pull request #650 from matrix-org/dbkr/register_idempotent_with_username
Make registration idempotent, part 2
2016-03-17 14:34:08 +00:00
Mark Haines
2ec3460967 Merge pull request #651 from matrix-org/markjh/unused_II
Remove dead code left over from presence changes
2016-03-17 13:37:53 +00:00
Mark Haines
7a38612620 Remove another unused function from presence 2016-03-17 11:54:19 +00:00
Erik Johnston
2cd9260500 Update aliases event after deletion
Attempt to update the appropriate `m.room.aliases` event after deleting
an alias. This may fail due to the deleter not being in the room.

Will also check if the canonical alias of the event is set to the
deleted alias, and if so will attempt to delete it.
2016-03-17 11:42:00 +00:00
Mark Haines
673c96ce97 Remove dead code left over from presence changes 2016-03-17 11:03:47 +00:00
Mark Haines
4ebb688f4f Add option to definitions.py to search for functions a function refers to 2016-03-17 10:59:12 +00:00
David Baker
5670205e2a remove debug logging 2016-03-16 19:49:42 +00:00
David Baker
f984decd66 Unused import 2016-03-16 19:40:48 +00:00
David Baker
a7daa5ae13 Make registration idempotent, part 2: be idempotent if the client specifies a username. 2016-03-16 19:36:57 +00:00
David Baker
48b2e853a8 Merge pull request #649 from matrix-org/dbkr/idempotent_registration
Make registration idempotent
2016-03-16 16:35:45 +00:00
David Baker
b58d10a875 pep8 2016-03-16 16:22:20 +00:00
David Baker
3ee7d7dc7f time_msec() 2016-03-16 16:18:52 +00:00
David Baker
3176aebf9d string with symbols is a bit too symboly. 2016-03-16 15:55:49 +00:00
David Baker
9671e6750c Replace other time.time(). 2016-03-16 15:51:28 +00:00
David Baker
742b6c6d15 Use hs get_clock instead of time.time() 2016-03-16 15:42:35 +00:00
David Baker
f5e90422f5 take extra return val from check_auth in account too 2016-03-16 14:33:19 +00:00
David Baker
ff7d3dc3a0 Fix tests 2016-03-16 14:25:14 +00:00
David Baker
99797947aa pep8 & remove debug logging 2016-03-16 12:51:34 +00:00
David Baker
c12b9d719a Make registration idempotent: if you specify the same session, make it give you an access token for the user that was registered on previous uses of that session. Tweak the UI auth layer to not delete sessions when their auth has completed and hence expire themn so they don't hang around until server restart. Allow server-side data to be associated with UI auth sessions. 2016-03-16 11:56:24 +00:00
Mark Haines
add89a03a6 Merge pull request #647 from matrix-org/markjh/pushers_stream
Add replication stream for pushers
2016-03-16 11:26:22 +00:00
Richard van der Hoff
467c1599c9 Merge pull request #648 from matrix-org/rav/password_reset
Password reset docs and script
2016-03-16 10:47:33 +00:00
Richard van der Hoff
660ae8e0f3 Clarify that we do have reset functionality via the IS 2016-03-16 10:40:38 +00:00
Mark Haines
ba660ecde2 Add a comment to offer a hint to an explanation for why we have a unique constraint on (app_id, pushkey, user_id) 2016-03-16 10:35:00 +00:00
Richard van der Hoff
a877209c8b Password reset docs and script
Replace the bash/perl gen_password script with a python one, and write a note
on how to use it.
2016-03-16 09:45:37 +00:00
Mark Haines
ee32d622ce Fix a couple of errors when deleting pushers 2016-03-15 17:47:36 +00:00
Mark Haines
6df1c79c22 Merge branch 'develop' into markjh/pushers_stream 2016-03-15 17:44:39 +00:00
Mark Haines
12904932c4 Hook up adding a pusher to the notifier for replication. 2016-03-15 17:42:03 +00:00
Mark Haines
b6e8420aee Add replication stream for pushers 2016-03-15 17:33:10 +00:00
Erik Johnston
91779b49c4 Merge pull request #646 from matrix-org/erikj/reject_invite_federation
Persist rejection of invites over federation
2016-03-15 15:03:00 +00:00
Erik Johnston
e5f0e58931 Remove needless PreserveLoggingContext 2016-03-15 13:48:40 +00:00
Erik Johnston
9e982750ee Persist rejection of invites over federation 2016-03-15 13:24:31 +00:00
Erik Johnston
5ca695cc12 English 2016-03-15 09:48:10 +00:00
Erik Johnston
13e29a697c Bump version and changelog 2016-03-15 09:43:32 +00:00
Erik Johnston
6b1e9b8dfe Merge pull request #645 from matrix-org/erikj/3pid_guest_config
Add config to create guest account on 3pid invite
2016-03-14 16:53:53 +00:00
Erik Johnston
590fbbef03 Add config to create guest account on 3pid invite
Currently, when a 3pid invite request is sent to an identity server, it
includes a provisioned guest access token. This allows the link in the,
say, invite email to include the guest access token ensuring that the
same account is used each time the link is clicked.

This flow has a number of flaws, including when using different servers
or servers that have guest access disabled.

For now, we keep this implementation but hide it behind a config option
until a better flow is implemented.
2016-03-14 15:50:40 +00:00
Erik Johnston
a547e2df85 Return list, not generator. 2016-03-14 15:30:19 +00:00
Mark Haines
e462aa97bf Merge pull request #644 from matrix-org/markjh/parse_jsonIII
Fix regression where synapse checked whether push rules were valid
2016-03-14 14:36:17 +00:00
Mark Haines
398cd1edfb Fix regression where synapse checked whether push rules were valid JSON before the compatibility hack that handled clients sending invalid JSON 2016-03-14 14:16:41 +00:00
Erik Johnston
494d0c8e02 Merge pull request #642 from matrix-org/erikj/logout
Implement logout
2016-03-11 20:16:25 +00:00
Mark Haines
ffb9dd02fe Merge pull request #643 from matrix-org/markjh/parse_json_II
Use parse_json_object_from_request to parse JSON out of request bodies
2016-03-11 17:27:37 +00:00
Erik Johnston
15122da0e2 Thats not how transactions work. 2016-03-11 16:45:27 +00:00
Mark Haines
e9c1cabac2 Use parse_json_object_from_request to parse JSON out of request bodies 2016-03-11 16:41:03 +00:00
Erik Johnston
ae6ff09494 Emtpy commit 2016-03-11 16:33:22 +00:00
Erik Johnston
b13035cc91 Implement logout 2016-03-11 16:27:50 +00:00
Erik Johnston
c081228439 Fix SQL statement 2016-03-11 15:09:17 +00:00
Erik Johnston
b5afe6bc38 Merge pull request #641 from matrix-org/dbkr/fix_change_password
Fix logout on password change
2016-03-11 14:48:20 +00:00
David Baker
2dee03aee5 more pep8 2016-03-11 14:38:23 +00:00
David Baker
af59826a2f Make select more sensible when dseleting access tokens, rename pusher deletion to match access token deletion and make exception arg optional. 2016-03-11 14:34:09 +00:00
David Baker
f523177850 Delete old, unused methods and rename new one to just be user_delete_access_tokens with an except_token_ids argument doing what it says on the tin. 2016-03-11 14:29:01 +00:00
David Baker
57c444b3ad Dear PyCharm, please indent sensibly for me. Thx. 2016-03-11 14:25:05 +00:00
Erik Johnston
d5fda6e3b0 Merge pull request #640 from matrix-org/erikj/keyclient_host
Make key client send a Host header
2016-03-11 13:32:59 +00:00
Erik Johnston
58443a022d Merge pull request #635 from matrix-org/erikj/sync_order
Use topological orders for initial sync timeline
2016-03-11 13:17:49 +00:00
David Baker
aa11db5f11 Fix cache invalidation so deleting access tokens (which we did when changing password) actually takes effect without HS restart. Reinstate the code to avoid logging out the session that changed the password, removed in 415c2f0549 2016-03-11 13:14:18 +00:00
Erik Johnston
2e2be463f8 Make key client send a Host header 2016-03-11 10:29:05 +00:00
Daniel Wagner-Hall
379c60b08d Merge pull request #638 from matrix-org/daniel/appserviceid
Store appservice ID on register
2016-03-10 15:58:27 +00:00
Daniel Wagner-Hall
465605d616 Store appservice ID on register 2016-03-10 15:58:22 +00:00
Erik Johnston
703826886c Merge pull request #639 from matrix-org/erikj/as_user_update_batch
Update users table in a batched manner
2016-03-10 15:38:00 +00:00
Erik Johnston
9669a99d1a Update users table in a batched manner 2016-03-10 15:12:19 +00:00
Erik Johnston
c22a3f37a9 Merge pull request #637 from blide/develop
SYN-646 - Register endpoint returns refresh_token
2016-03-10 14:03:48 +00:00
blide
1be438f2a6 Flake8 fix 2016-03-10 12:13:35 +03:00
blide
40160e24ab Register endpoint returns refresh_token
Guest registration still doesn't return refresh_token
2016-03-10 10:29:19 +03:00
Erik Johnston
8a88684736 Add comment 2016-03-09 16:51:22 +00:00
Erik Johnston
af2fe6110c Return the correct token form 2016-03-09 16:11:53 +00:00
Erik Johnston
3ecaabc7fd Use topological orders for initial sync timeline 2016-03-09 15:45:34 +00:00
Erik Johnston
1309b8ca97 Merge pull request #634 from matrix-org/erikj/pin_saml2_version
Pin pysaml2 version to 3.x
2016-03-09 11:57:33 +00:00
Erik Johnston
07cf96ebf7 Pin pysaml2 version to 3.x
This is due to the fact that `from saml2 import config` fails in version
4.x
2016-03-09 11:54:56 +00:00
Mark Haines
b7b899cae6 Merge pull request #628 from matrix-org/markjh/parse_json
Add a parse_json_object function

to deduplicate all the copy+pasted _parse_json functions. Also document
the parse_.* functions.
2016-03-09 11:26:30 +00:00
Mark Haines
b7dbe5147a Add a parse_json_object function
to deduplicate all the copy+pasted _parse_json functions. Also document
the parse_.* functions.
2016-03-09 11:26:26 +00:00
Erik Johnston
158a322e82 Ensure integer is an integer 2016-03-09 10:20:48 +00:00
Richard van der Hoff
ce829c2aef Reinstate coverage checks for integ tests 2016-03-09 00:14:19 +00:00
Richard van der Hoff
4814e7c9b2 Specify synapse-directory for integ tests 2016-03-09 00:06:57 +00:00
Richard van der Hoff
866d0e7cb8 Only build py27 tox env for integ tests 2016-03-08 23:47:50 +00:00
Richard van der Hoff
1748d4b739 Use sytest build scripts, rather than reinventing the wheel 2016-03-08 23:37:39 +00:00
Mark Haines
5f5817ab05 Merge pull request #632 from matrix-org/markjh/py3v2
Fix relative imports so they work in both py3 and py27
2016-03-08 18:11:10 +00:00
Daniel Wagner-Hall
b117f67227 Merge pull request #633 from matrix-org/daniel/ick
Idempotent-ise schema update script
2016-03-08 17:45:46 +00:00
Daniel Wagner-Hall
3b97797c8d Merge branch 'develop' into daniel/ick 2016-03-08 17:35:09 +00:00
Daniel Wagner-Hall
edca2d9891 Idempotent-ise schema update script
If any ASes don't have an ID, the schema will fail, and then it will
error when trying to add the column again.
2016-03-08 17:32:29 +00:00
David Baker
c00f4e48ba Merge pull request #630 from matrix-org/dbkr/post_urlencoded_encode_params
Encode unicode objects given to post_urlencode*
2016-03-08 13:51:49 +00:00
Mark Haines
7076082ae6 Fix relative imports so they work in both py3 and py27 2016-03-08 11:45:50 +00:00
Mark Haines
ea72bd9600 Merge pull request #631 from matrix-org/markjh/py3v1
Use syntax that's valid on both py2.7 and py3
2016-03-08 11:08:20 +00:00
David Baker
f40131b4d9 Merge pull request #627 from matrix-org/dbkr/guest_reuse_send_user_id
Send the user ID matching the guest access token
2016-03-08 10:20:41 +00:00
David Baker
9a3c80a348 pep8 2016-03-08 10:09:07 +00:00
David Baker
7bcee4733a Encode unicode objects given to post_urlencode* otherwise urllib.urlencode chokes. 2016-03-08 10:04:38 +00:00
Mark Haines
239badea9b Use syntax that works on both py2.7 and py3 2016-03-07 20:13:10 +00:00
David Baker
316c00936f Fix tests 2016-03-07 17:32:36 +00:00
David Baker
874fd43257 Send the user ID matching the guest access token, since there is no Matrix API to discover what user ID an access token is for. 2016-03-07 17:13:56 +00:00
Erik Johnston
80916e6884 Merge pull request #626 from matrix-org/erikj/visibility
Send history visibility on boundary changes
2016-03-07 11:56:07 +00:00
Erik Johnston
2ab0bf4b97 Send history visibility on boundary changes 2016-03-04 16:54:32 +00:00
Mark Haines
b7a3be693b Merge pull request #618 from matrix-org/markjh/pushrule_stream
Add a stream for push rule updates
2016-03-04 16:35:08 +00:00
Erik Johnston
beebc0a40f Merge pull request #625 from matrix-org/erikj/kick_ban_sync
Always include kicks and bans in full /sync
2016-03-04 16:33:52 +00:00
Mark Haines
9848b54cac Prefill from the correct stream 2016-03-04 16:20:22 +00:00
Mark Haines
deda48068c prefill the push rules stream change cache 2016-03-04 16:19:42 +00:00
Mark Haines
ebcbb23226 s/stream_ordering/event_stream_ordering/ in push 2016-03-04 16:15:23 +00:00
Mark Haines
7e9fc9b6af /FNARG/d 2016-03-04 15:54:09 +00:00
Erik Johnston
1a1abd8d05 Merge pull request #624 from matrix-org/erikj/invite_profile
Add profile information to invites
2016-03-04 15:29:45 +00:00
Erik Johnston
125f674eae Always include kicks and bans in full /sync 2016-03-04 15:27:55 +00:00
Erik Johnston
13cbd31040 Spelling 2016-03-04 15:22:39 +00:00
Mark Haines
0ff9aaf6c1 poke jenkins 2016-03-04 15:15:06 +00:00
Mark Haines
3110c37d02 Fix unit tests 2016-03-04 14:48:17 +00:00
Mark Haines
ec7460b4f2 Merge branch 'develop' into markjh/pushrule_stream 2016-03-04 14:44:34 +00:00
Mark Haines
1b4f4a936f Hook up the push rules stream to account_data in /sync 2016-03-04 14:44:01 +00:00
Erik Johnston
ed61a49169 Add profile information to invites 2016-03-04 14:35:02 +00:00
Erik Johnston
389d558a3b Merge pull request #598 from Rugvip/invite-state
config,handlers/_base: added homeserver config for what state is included in a room invite
2016-03-04 09:58:52 +00:00
Erik Johnston
44b084a75e Merge pull request #596 from Rugvip/create
handlers/_base: don't allow room create event to be changed
2016-03-04 09:43:20 +00:00
Patrik Oldsberg
bb0e82fff1 tests/utils: added room_invite_state_types to test config
Signed-off-by: Patrik Oldsberg <patrik.oldsberg@ericsson.com>
2016-03-04 10:43:17 +01:00
Patrik Oldsberg
5fc59f009c config,handlers/_base: added homeserver config for what state is included in a room invite
Signed-off-by: Patrik Oldsberg <patrik.oldsberg@ericsson.com>
2016-03-04 10:43:17 +01:00
Erik Johnston
ce82b9e48f Merge pull request #610 from Rugvip/unique-user
handlers/register: make sure another user id is generated when a collision occurs
2016-03-04 09:40:20 +00:00
Richard van der Hoff
09b1d98070 Merge pull request #623 from matrix-org/rav/fix_createroom_race
Make sure we add all invited members before returning from createRoom
2016-03-04 09:29:15 +00:00
Erik Johnston
dd463e246d Merge pull request #614 from matrix-org/erikj/alias_delete
Allow alias creators to delete aliases
2016-03-04 09:02:33 +00:00
Richard van der Hoff
fa6d6bbceb Merge pull request #615 from matrix-org/rav/SYN-642
Handle rejections of invites from local users locally
2016-03-04 00:12:19 +00:00
Richard van der Hoff
a92b4ea76f Make sure we add all invited members before returning from createRoom
add a missing yield.
2016-03-04 00:06:03 +00:00
Richard van der Hoff
361fc53917 Merge branch 'develop' into rav/SYN-642 2016-03-03 19:14:02 +00:00
Richard van der Hoff
62d808becc jenkins-*.sh: set -x
Also move the options from the shebang line to the body of the script, so that
they take effect even if somebody explicitly runs "bash jenkins.sh"
2016-03-03 19:13:43 +00:00
Richard van der Hoff
a85179aff3 Merge remote-tracking branch 'origin/develop' into rav/SYN-642 2016-03-03 19:05:54 +00:00
Richard van der Hoff
5d6fbc1655 Empty commit 2016-03-03 19:04:11 +00:00
Daniel Wagner-Hall
0b3083c75b Merge pull request #621 from matrix-org/daniel/ratelimiting
Pass whole requester to ratelimiting
2016-03-03 17:02:48 +00:00
Daniel Wagner-Hall
b4022cc487 Pass whole requester to ratelimiting
This will enable more detailed decisions
2016-03-03 16:43:42 +00:00
Erik Johnston
50c250b808 Merge pull request #620 from matrix-org/erikj/jenkins
Add some jenkins tests
2016-03-03 16:43:15 +00:00
Erik Johnston
c037170faa Split up jenkins tests 2016-03-03 16:26:38 +00:00
Erik Johnston
7678ec3f9b Mkdir 2016-03-03 16:26:38 +00:00
Erik Johnston
fc9c7b6cbc Add jenkins-postgres.sh 2016-03-03 16:26:38 +00:00
Erik Johnston
246b8c6e4a Change port-base in jenkins-sqlite.sh 2016-03-03 16:26:38 +00:00
Erik Johnston
6789b63131 Use different PORT_BASE 2016-03-03 16:26:37 +00:00
Erik Johnston
91f4ac602b Exclude all jenkins build scripts 2016-03-03 16:26:37 +00:00
Erik Johnston
690596b770 Add jenkins-sqlite.sh 2016-03-03 16:26:37 +00:00
Erik Johnston
5c90451ea0 Merge pull request #619 from matrix-org/dbkr/dont_use_checkpw
Stop using checkpw as it seems to have vanished from bcrypt.
2016-03-03 16:25:41 +00:00
Mark Haines
3406eba4ef Move the code for formatting push rules into a separate function 2016-03-03 16:11:59 +00:00
Mark Haines
ddf9e7b302 Hook up the push rules to the notifier 2016-03-03 14:57:45 +00:00
Daniel Wagner-Hall
95481e7ba7 Merge pull request #571 from matrix-org/daniel/asids
Mark AS users with their AS's ID
2016-03-03 13:56:28 +00:00
Richard van der Hoff
79f34bdbc2 Merge branch 'develop' into rav/SYN-642 2016-03-03 11:39:25 +00:00
Richard van der Hoff
b139e51041 jenkins.sh: set -x
Also move the options from the shebang line to the body of the script, so that
they take effect even if somebody explicitly runs "bash jenkins.sh"
2016-03-03 11:38:36 +00:00
Richard van der Hoff
74cd80e530 Fix typo 2016-03-03 10:28:58 +00:00
David Baker
ff8b87118d Stop using checkpw as it seems to have vanished from bcrypt. Use bcrypt.hashpw(password, hashed) == hashed as per the bcrypt README. 2016-03-02 18:06:45 +00:00
Mark Haines
2223204eba Hook push rules up to the replication API 2016-03-02 17:26:20 +00:00
Richard van der Hoff
fc1f932cc0 Move arg default to the start of the function
Also don't overwrite the list that gets passed in.
2016-03-02 16:44:14 +00:00
Matthew Hodgson
c0147f86a1 Merge pull request #616 from matrix-org/matthew/800x600
add 800x600 thumbnails to make vector look prettier (and anyone else …
2016-03-02 16:02:26 +00:00
Matthew Hodgson
47c361d2f8 add 800x600 thumbnails to make vector look prettier (and anyone else who likes big thumbnails) 2016-03-02 15:57:54 +00:00
Richard van der Hoff
863d3f26b3 fix pyflakes quibble 2016-03-02 15:52:50 +00:00
Richard van der Hoff
9ff940a0ef Address review comments 2016-03-02 15:40:30 +00:00
Erik Johnston
2a78dac60d Merge pull request #612 from matrix-org/erikj/cache_size
Add environment variable SYNAPSE_CACHE_FACTOR, default it to 0.1
2016-03-02 14:36:10 +00:00
Erik Johnston
27185de752 Set SYNAPSE_CACHE_FACTOR=1 in jenkins 2016-03-02 14:30:50 +00:00
Erik Johnston
dda2058d90 Add SYNAPSE_CACHE_FACTOR to README 2016-03-02 14:03:10 +00:00
Mark Haines
a1cf9e3bf3 Add a stream for push rule updates 2016-03-01 18:16:37 +00:00
Richard van der Hoff
05ea111c47 Fix pyflakes warning 2016-03-01 17:45:24 +00:00
Richard van der Hoff
8a1d3b86af Handle rejections of invites from local users locally
Slightly hacky fix to SYN-642, which avoids the federation codepath when trying
to reject invites from local users.
2016-03-01 17:27:22 +00:00
Mark Haines
a612ce6659 Merge pull request #489 from matrix-org/markjh/replication
Add a /replication API for extracting the updates that happened on synapse.
2016-03-01 15:08:24 +00:00
Mark Haines
d50ca1b1ed Merge pull request #613 from matrix-org/markjh/yield
Load the current id in the IdGenerator constructor
2016-03-01 14:54:29 +00:00
Mark Haines
60a0f81c7a Add a /replication API for extracting the updates that happened on
synapse

This is necessary for replicating the data in synapse to be visible to a
separate service because presence and typing notifications aren't stored
in a database so won't be visible to another process.

This API can be used to either get the raw data by requesting the tables
themselves or to just receive notifications for updates by following the
streams meta-stream.

Returns updates for each table requested a JSON array of arrays with a
row for each row in the table.

Each table is prefixed by a header row with the: name of the table,
current stream_id position for the table, number of rows, number of
columns and the names of the columns.
This is followed by the rows that have been added to the server since
the requester last asked.

The API has a timeout and is hooked up to the notifier so that a slave
can long poll for updates.
2016-03-01 14:49:41 +00:00
Erik Johnston
f9af8962f8 Allow alias creators to delete aliases 2016-03-01 14:46:31 +00:00
Mark Haines
54172924c8 Load the current id in the IdGenerator constructor
Rather than loading them lazily. This allows us to remove all
the yield statements and spurious arguments for the get_next
methods.

It also allows us to replace all instances of get_next_txn with
get_next since get_next no longer needs to access the db.
2016-03-01 14:32:56 +00:00
Erik Johnston
374f9b2f07 Limit stream change cache size too 2016-03-01 13:30:15 +00:00
Erik Johnston
ce2cdced61 Move cache size fiddling to descriptors only. Fix tests 2016-03-01 13:21:46 +00:00
Erik Johnston
910fc0f28f Add enviroment variable SYNAPSE_CACHE_FACTOR, default it to 0.1 2016-03-01 12:56:39 +00:00
Erik Johnston
742ec37ca3 Merge pull request #611 from matrix-org/erikj/expiring_cache_size
Report size of ExpiringCache
2016-03-01 11:15:56 +00:00
Erik Johnston
72165e5b77 Reraise exception 2016-03-01 11:00:10 +00:00
Erik Johnston
ff2d7551c7 Correct cache miss detection 2016-03-01 10:59:17 +00:00
Erik Johnston
903fb34b39 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/expiring_cache_size 2016-03-01 09:43:27 +00:00
Patrik Oldsberg
9c48f1ed22 handlers/register: make sure another user id is generated when a collision occurs
Signed-off-by: Patrik Oldsberg <patrik.oldsberg@ericsson.com>
2016-02-29 23:12:37 +01:00
David Baker
bfdcc7b9b6 Merge pull request #607 from matrix-org/dbkr/send_inviter_member_event
Send the invier's member event in room invite state
2016-02-26 14:32:08 +00:00
Mark Haines
4bf13a8207 Merge pull request #609 from matrix-org/markjh/change_action
Add support for changing the actions for default rules

See matrix-org/matrix-doc#283

Works by adding dummy rules to the push rules table with a negative priority class and then using those rules to clobber the default rule actions when adding the default rules in ``list_with_base_rules``
2016-02-26 14:28:28 +00:00
Mark Haines
de27f7fc79 Add support for changing the actions for default rules
See matrix-org/matrix-doc#283

Works by adding dummy rules to the push rules table with a negative priority class and then using those rules to clobber the default rule actions when adding the default rules in ``list_with_base_rules``
2016-02-26 14:28:19 +00:00
David Baker
413e36b17a Merge remote-tracking branch 'origin/develop' into dbkr/send_inviter_member_event 2016-02-26 13:40:25 +00:00
David Baker
354d3842b5 Empty to commit to force re-test 2016-02-26 13:29:20 +00:00
David Baker
9329cd5f13 Merge pull request #608 from gergelypolonkai/syn-638
Add error codes for malformed/bad JSON in /login
2016-02-26 11:12:00 +00:00
Gergely Polonkai
87acd8fb07 Fix to appease the PEP8 dragon 2016-02-26 12:05:38 +01:00
Gergely Polonkai
a53774721a Add error codes for malformed/bad JSON in /login
Signed-off-by: Gergely Polonkai <gergely@polonkaieu>
2016-02-26 10:22:35 +01:00
David Baker
0f0b011440 Send the invier's member event in room invite state so the invitee has their display name and avatar. 2016-02-25 18:12:09 +00:00
Mark Haines
faa3d172ab Merge pull request #606 from matrix-org/markjh/get_enabled
Make sure we return a JSON object when returning the values of specif…

…ic keys from a push rule
2016-02-25 15:13:18 +00:00
Mark Haines
15c2ac2cac Make sure we return a JSON object when returning the values of specif…
…ic keys from a push rule
2016-02-25 15:13:07 +00:00
Mark Haines
fb9b5b6f4a Merge pull request #605 from matrix-org/markjh/dead_code
Remove unused get_rule_attr method
2016-02-24 17:08:55 +00:00
Mark Haines
4ecfbac85f Merge branch 'develop' into markjh/dead_code 2016-02-24 16:37:46 +00:00
Mark Haines
9892d017b2 Remove unused get_rule_attr method 2016-02-24 16:31:07 +00:00
Daniel Wagner-Hall
e8d34bccbd Merge pull request #604 from matrix-org/daniel/guestaccesstoken
Generate guest access token on 3pid invites

This means that following the same link across multiple sessions or
devices can re-use the same guest account.

Note that this is somewhat of an abuse vector; we can't throw up
captchas on this flow, so this is a way of registering ephemeral
accounts for spam, whose sign-up we don't rate limit.
2016-02-24 14:41:29 +00:00
Daniel Wagner-Hall
33300673b7 Generate guest access token on 3pid invites
This means that following the same link across multiple sessions or
devices can re-use the same guest account.

Note that this is somewhat of an abuse vector; we can't throw up
captchas on this flow, so this is a way of registering ephemeral
accounts for spam, whose sign-up we don't rate limit.
2016-02-24 14:41:25 +00:00
Daniel Wagner-Hall
869580206d Ignore invalid POST bodies when joining rooms 2016-02-24 08:50:28 +00:00
Erik Johnston
278d6c0527 Report size of ExpiringCache 2016-02-23 16:46:21 +00:00
Erik Johnston
e7ab0e0f9f Merge pull request #603 from matrix-org/erikj/presence
Create a new stream_id per presence update
2016-02-23 16:14:01 +00:00
Erik Johnston
6451fcd085 Create a new stream_id per presence update 2016-02-23 15:51:39 +00:00
Erik Johnston
b5f77eb12a Check presence token interval is less than 100, rather than the token itself 2016-02-23 15:47:37 +00:00
Erik Johnston
e3e0ac6ec7 Merge pull request #602 from matrix-org/erikj/presence
Change the way we figure out presence updates for small deltas
2016-02-23 15:18:41 +00:00
Daniel Wagner-Hall
f1dd03548f Set WORKSPACE if not set 2016-02-23 15:15:10 +00:00
Daniel Wagner-Hall
28ad246bb4 Merge pull request #584 from matrix-org/daniel/ephemeralthreepids
Allow third_party_signed to be specified on /join
2016-02-23 15:11:35 +00:00
Daniel Wagner-Hall
577951b032 Allow third_party_signed to be specified on /join 2016-02-23 15:11:25 +00:00
Erik Johnston
13f86c3489 Handle get_all_entities_changed returning None 2016-02-23 15:05:37 +00:00
Erik Johnston
6e0209112b Add comments 2016-02-23 14:57:45 +00:00
Erik Johnston
c77dae7a1a Change the way we figure out presence updates for small deltas 2016-02-23 14:54:40 +00:00
Erik Johnston
a7b2ce32f7 Merge pull request #600 from matrix-org/erikj/presence
Measure PresenceEventSource.get_new_events
2016-02-23 14:54:26 +00:00
Daniel Wagner-Hall
0d4b3a133d Merge pull request #601 from Rugvip/pep8
handlers/sync: style fix, line too long
2016-02-23 14:52:28 +00:00
Erik Johnston
02e928cf9b Don't include defer.returnValue in Measure block 2016-02-23 14:52:16 +00:00
Erik Johnston
56a94ccd9e Measure PresenceEventSource.get_new_events 2016-02-23 14:52:16 +00:00
Patrik Oldsberg
baf056bae8 handlers/sync: style fix, line too long
Signed-off-by: Patrik Oldsberg <patrik.oldsberg@ericsson.com>
2016-02-23 15:49:36 +01:00
Mark Haines
10d581d1cf Merge pull request #595 from matrix-org/markjh/coverage
Check that the disable_registration config key is handled correctly
2016-02-23 11:59:40 +00:00
Erik Johnston
138c405974 Pick up currently_active across federation 2016-02-23 10:40:11 +00:00
Erik Johnston
8fe3b450d2 Merge pull request #597 from Rugvip/account-data-sync
handlers/sync: fix SyncResult not counting account_data change when converting to bool
2016-02-23 09:55:23 +00:00
Patrik Oldsberg
210b7d8e00 handlers/_base: don't allow room create event to be changed
Signed-off-by: Patrik Oldsberg <patrik.oldsberg@ericsson.com>
2016-02-23 00:22:41 +01:00
Patrik Oldsberg
1dcfb201c4 handlers/sync: fix SyncResult not counting account_data change when converting to bool
This fixes account_data events not triggering an immediate /sync response

Signed-off-by: Patrik Oldsberg <patrik.oldsberg@ericsson.com>
2016-02-23 00:21:59 +01:00
Richard van der Hoff
f7e3de02ef Merge pull request #587 from matrix-org/rav/guest_access_after_room_join
Allow guest users access to messages in rooms they have joined
2016-02-22 16:36:06 +00:00
Mark Haines
4d14655c2b Merge pull request #594 from matrix-org/markjh/coverage
Add a test for TreeCache.__contains__
2016-02-22 16:02:06 +00:00
Mark Haines
5e2890bd49 Check that the disable_registration config key is handled correctly 2016-02-22 16:01:29 +00:00
Richard van der Hoff
5be3944730 address review comments
drop commented-out special casing for historyvisibility event
s/he/they/ for users
2016-02-22 15:27:44 +00:00
Mark Haines
7641a90c34 Add a test for TreeCache.__contains__ 2016-02-22 15:22:38 +00:00
Mark Haines
c43609e035 Merge pull request #591 from matrix-org/markjh/coverage
Test Filter.filter_rooms
2016-02-22 15:21:10 +00:00
Erik Johnston
9e696bd6a3 Remove superfluous call to get_state_at when we already have an event for that stream position 2016-02-22 13:54:46 +00:00
Erik Johnston
60bec24083 Merge pull request #589 from matrix-org/erikj/presence
Only send presence updates to remote hosts if user is ours
2016-02-22 13:18:06 +00:00
Mark Haines
5c79ef9396 Test Filter.filter_rooms
Also check that the __repr__ method for FilterCollection does something
sensible.
2016-02-19 17:55:28 +00:00
Richard van der Hoff
6c5b147a39 Interpret unknown visibilities the same as shared 2016-02-19 17:11:11 +00:00
Mark Haines
b82d6f70a4 Merge pull request #590 from matrix-org/markjh/formatting
Fix flake8 warnings for tests
2016-02-19 15:55:02 +00:00
Mark Haines
700487a7c7 Fix flake8 warnings for tests 2016-02-19 15:34:38 +00:00
Erik Johnston
3dbaeef58c Correctly filter states 2016-02-19 12:27:35 +00:00
Erik Johnston
42ac5f0c1a Only send presence updates to remote hosts if user is ours 2016-02-19 12:19:56 +00:00
Richard van der Hoff
05aee12652 Merge branch 'develop' into rav/guest_access_after_room_join 2016-02-19 12:00:16 +00:00
Erik Johnston
24d9f2c140 Add Measures to presence 2016-02-19 11:50:48 +00:00
Richard van der Hoff
b71ca2b014 Allow guest users access to messages in rooms they have joined
There should be no difference between guest users and non-guest users in terms
of access to messages. Define the semantics of the is_peeking argument to
filter_events_for_clients (slightly) better; interpret it appropriately, and
set it correctly from /sync.
2016-02-19 11:41:02 +00:00
Erik Johnston
4a95eb0a12 Add presence metric 2016-02-19 11:32:04 +00:00
Erik Johnston
be799453aa Remove spurious extra metrics 2016-02-19 11:29:33 +00:00
Erik Johnston
ea7786e8ca Merge pull request #586 from matrix-org/erikj/presence
Fix presence `currently_active`. Add presence metrics.
2016-02-19 11:26:32 +00:00
Erik Johnston
929cb0ed7d Don't set currently_active for remote presence 2016-02-19 10:58:27 +00:00
Erik Johnston
5f4eca3816 More metrics 2016-02-19 10:21:41 +00:00
Erik Johnston
5614b4dafb Add presence metrics 2016-02-19 09:50:54 +00:00
Erik Johnston
e5ad2e5267 Merge pull request #582 from matrix-org/erikj/presence
Rewrite presence for performance.
2016-02-19 09:37:50 +00:00
Erik Johnston
e12ec335a5 "You are not..." 2016-02-18 17:01:53 +00:00
Erik Johnston
220231d8e3 Merge pull request #573 from matrix-org/erikj/sync_fix
Mitigate against incorrect old state in /sync.
2016-02-18 16:40:58 +00:00
Erik Johnston
e6c5e3f28a Close cursor 2016-02-18 16:39:28 +00:00
Erik Johnston
42109a62a4 Remove unused param from get_max_token 2016-02-18 16:37:28 +00:00
Erik Johnston
b8cdec92c7 WheelTimer: Don't scan list, use index. 2016-02-18 16:33:07 +00:00
Mark Haines
9c902025bf Merge pull request #579 from matrix-org/markjh/dead_code
Remove dead code for setting device specific rules.

It wasn't possible to hit the code from the API because of a typo
in parsing the request path. Since no-one was using the feature
we might as well remove the dead code.
2016-02-18 16:05:23 +00:00
Mark Haines
b9977ea667 Remove dead code for setting device specific rules.
It wasn't possible to hit the code from the API because of a typo
in parsing the request path. Since no-one was using the feature
we might as well remove the dead code.
2016-02-18 16:05:13 +00:00
Erik Johnston
48b652bcbe Remove invalid arg. 2016-02-18 14:57:09 +00:00
Erik Johnston
b4796a62ee Add unit test 2016-02-18 11:52:33 +00:00
Daniel Wagner-Hall
35cda2e692 Merge pull request #583 from matrix-org/daniel/roomcleanupincremental
Merge all of the room membership codepaths into one
2016-02-18 11:48:28 +00:00
Daniel Wagner-Hall
f8d21e1431 Review comments 2016-02-18 11:02:14 +00:00
Erik Johnston
9da9826b85 Remove old tests. 2016-02-18 10:46:16 +00:00
Erik Johnston
fe95f2217c Add stuff pulled from the DB to the cache 2016-02-18 10:26:24 +00:00
Erik Johnston
8351538873 PEP8 2016-02-18 10:12:12 +00:00
Erik Johnston
112283e230 Prefix TS fields with _ts 2016-02-18 10:11:43 +00:00
Erik Johnston
b31ec214a5 Remove status_msg when going offline. Don't offline -> online if you send a message 2016-02-18 09:54:08 +00:00
Erik Johnston
114b929f8b Check presence state is a valid one 2016-02-18 09:16:32 +00:00
Erik Johnston
ddca9c56fc Move if statement 2016-02-18 09:11:53 +00:00
Erik Johnston
58371fa263 Comment 2016-02-18 09:09:50 +00:00
Daniel Wagner-Hall
7e90fb6a57 Merge branch 'develop' into daniel/roomcleanupincremental
Conflicts:
	synapse/rest/client/v1/room.py
2016-02-17 15:53:59 +00:00
Daniel Wagner-Hall
591af2d074 Some cleanup
I'm not particularly happy with the "action" switching, but there's no
convenient way to defer the work that needs to happen after it, so... :(
2016-02-17 15:50:13 +00:00
Erik Johnston
c229c87398 Remove spurious comment 2016-02-17 15:48:29 +00:00
Erik Johnston
e5999bfb1a Initial cut 2016-02-17 15:40:50 +00:00
Daniel Wagner-Hall
a4e278bfe7 Respond to federated invite with non-empty context
Currently, we magically perform an extra database hit to find the
inviter, and use this to guess where we should send the event. Instead,
fill in a valid context, so that other callers relying on the context
actually have one.
2016-02-17 15:25:12 +00:00
Erik Johnston
9e7900da1e Add wheeltimer impl 2016-02-17 14:29:28 +00:00
Erik Johnston
200de16440 Merge pull request #580 from Rugvip/develop
api/filtering: don't assume that event content will always be a dict
2016-02-17 12:51:02 +00:00
Patrik Oldsberg
536f949a1a api/filtering: don't assume that event content will always be a dict
Signed-off-by: Patrik Oldsberg <patrik.oldsberg@ericsson.com>
2016-02-17 12:59:41 +01:00
Erik Johnston
97d1b3a506 Merge pull request #581 from Rugvip/event_id
client/v1/room: include event_id in response to state event PUT
2016-02-17 11:06:14 +00:00
Patrik Oldsberg
71d5d2c669 client/v1/room: include event_id in response to state event PUT, in accordance with the spec
Signed-off-by: Patrik Oldsberg <patrik.oldsberg@ericsson.com>
2016-02-17 11:53:43 +01:00
Daniel Wagner-Hall
6605adf669 Some cleanup, some TODOs, more to do 2016-02-16 19:05:02 +00:00
Mark Haines
458782bf67 Fix typo in request validation for adding push rules. 2016-02-16 18:00:30 +00:00
Mark Haines
c2025c0425 Merge pull request #578 from matrix-org/markjh/idempotent_rules
Make adding push rules idempotent

Also remove the **kwargs from the add_push_rule method.

Fixes https://matrix.org/jira/browse/SYN-391
2016-02-16 15:53:41 +00:00
Mark Haines
a9c9868957 Make adding push rules idempotent
Also remove the **kwargs from the add_push_rule method.

Fixes https://matrix.org/jira/browse/SYN-391
2016-02-16 15:53:38 +00:00
Daniel Wagner-Hall
d1fb790818 Some cleanup 2016-02-16 14:25:23 +00:00
Daniel Wagner-Hall
1f403325ac Tidy? up room creation event sending 2016-02-16 12:00:50 +00:00
Daniel Wagner-Hall
04686df17a Add comment 2016-02-16 11:52:46 +00:00
Daniel Wagner-Hall
feedaa37fa Merge branch 'develop' into daniel/roomcleanupincremental
Conflicts:
	synapse/handlers/room.py
2016-02-16 11:34:48 +00:00
Daniel Wagner-Hall
a182e5d721 Merge pull request #577 from matrix-org/daniel/createroom
Simplify room creation code
2016-02-16 11:21:06 +00:00
Daniel Wagner-Hall
4bfb32f685 Branch off member and non member sends
Unclean, needs tidy-up, but works
2016-02-15 18:21:30 +00:00
Daniel Wagner-Hall
1a2197d7bf Simplify room creation code 2016-02-15 18:19:01 +00:00
Daniel Wagner-Hall
e560045cfd Simplify room creation code 2016-02-15 18:18:39 +00:00
Daniel Wagner-Hall
8168341e9b Use update_membership for profile updates 2016-02-15 17:14:34 +00:00
Daniel Wagner-Hall
1bbb67c452 Use update_membership to kick guests 2016-02-15 16:40:22 +00:00
Daniel Wagner-Hall
150fcde0dc Reuse update_membership from /join 2016-02-15 16:16:03 +00:00
Daniel Wagner-Hall
73e616df2a Inline _do_local_membership_update 2016-02-15 16:02:22 +00:00
Daniel Wagner-Hall
f318d4f2a4 Inline _do_join as it now only has one caller
Also, consistently apply rate limiting.

Again, ugly, but a step in the right direction.
2016-02-15 15:57:10 +00:00
Daniel Wagner-Hall
e71095801f Merge implementation of /join by alias or ID
This code is kind of rough (passing the remote servers down a long
chain), but is a step towards improvement.
2016-02-15 15:39:16 +00:00
Daniel Wagner-Hall
dbeed36dec Merge some room joining codepaths
Force joining by alias to go through the send_membership_event checks,
rather than bypassing them straight into _do_join. This is the first of
many stages of cleanup.
2016-02-15 14:38:27 +00:00
Daniel Wagner-Hall
4de08a4672 Revert "Merge two of the room join codepaths"
This reverts commit cf81375b94.

It subtly violates a guest joining auth check
2016-02-12 16:17:24 +00:00
Daniel Wagner-Hall
d7aa103f00 Merge pull request #575 from matrix-org/daniel/roomcleanup
Merge two of the room join codepaths

There's at least one more to merge in.

Side-effects:
 * Stop reporting None as displayname and avatar_url in some cases
 * Joining a room by alias populates guest-ness in join event
 * Remove unspec'd PUT version of /join/<room_id_or_alias> which has not
   been called on matrix.org according to logs
 * Stop recording access_token_id on /join/room_id - currently we don't
   record it on /join/room_alias; I can try to thread it through at some
   point.
2016-02-12 15:11:59 +00:00
Daniel Wagner-Hall
cf81375b94 Merge two of the room join codepaths
There's at least one more to merge in.

Side-effects:
 * Stop reporting None as displayname and avatar_url in some cases
 * Joining a room by alias populates guest-ness in join event
 * Remove unspec'd PUT version of /join/<room_id_or_alias> which has not
   been called on matrix.org according to logs
 * Stop recording access_token_id on /join/room_id - currently we don't
   record it on /join/room_alias; I can try to thread it through at some
   point.
2016-02-12 15:11:49 +00:00
Mark Haines
66f9a49ce9 Merge pull request #574 from matrix-org/markjh/connection_closed
Catch the exceptions thrown by twisted when you write to a closed con…
2016-02-12 14:29:36 +00:00
Mark Haines
58c9f20692 Catch the exceptions thrown by twisted when you write to a closed connection 2016-02-12 13:46:59 +00:00
Erik Johnston
ec0f3836ff Merge branch 'master' of github.com:matrix-org/synapse into develop 2016-02-12 11:19:37 +00:00
Erik Johnston
4d54d87c3e Mitigate against incorrect old state in /sync.
There have been reports from the field that servers occasionally have
incorrect notions of what the old state of a room is. This proves
problematic as /sync relies on a correct old state.
This patch makes /sync specifically include in the 'state' key any
current state events that haven't been correctly included.
2016-02-12 11:13:06 +00:00
Daniel Wagner-Hall
ee4f332ec5 Merge pull request #572 from matrix-org/daniel/exclusivity
Enforce user_id exclusivity for AS registrations
2016-02-11 17:42:53 +00:00
Daniel Wagner-Hall
dc6da63e30 Enforce user_id exclusivity for AS registrations
This whole set of checks is kind of an ugly mess, which I may clean up
at some point, but for now let's be correct.
2016-02-11 17:37:38 +00:00
Daniel Wagner-Hall
763360594d Mark AS users with their AS's ID 2016-02-11 17:26:42 +00:00
Erik Johnston
7e0a1683e6 Merge branch 'release-v0.13.3' of github.com:matrix-org/synapse 2016-02-11 16:04:51 +00:00
Erik Johnston
2a24f906a9 Bump version and changelog 2016-02-11 16:04:15 +00:00
Erik Johnston
a79af259e9 Merge branch 'master' of github.com:matrix-org/synapse into develop 2016-02-11 16:02:17 +00:00
Erik Johnston
ce14c7a995 Fix SYN-627, events are in incorrect room in /sync 2016-02-11 15:02:56 +00:00
Erik Johnston
88a973cde5 Merge branch 'release-v0.13.2' of github.com:matrix-org/synapse 2016-02-11 10:55:16 +00:00
Erik Johnston
1a830b751d Bump version and changelog 2016-02-11 10:53:42 +00:00
Erik Johnston
abc1b22193 Merge pull request #570 from matrix-org/erikj/events_fixes
Return events in correct order for /events
2016-02-11 10:13:26 +00:00
Erik Johnston
0eff740523 Return events in correct order for /events 2016-02-11 10:07:27 +00:00
Erik Johnston
a1b7902944 Add some paranoia logging 2016-02-11 09:22:37 +00:00
Erik Johnston
7718303e71 Merge branch 'master' of github.com:matrix-org/synapse into develop 2016-02-10 17:06:53 +00:00
Matthew Hodgson
103b432c84 0.13.1 2016-02-10 16:35:17 +00:00
Matthew Hodgson
a45cc801d2 bump for 0.13.1 2016-02-10 16:34:46 +00:00
Matthew Hodgson
7634687057 Merge branch 'master' of git+ssh://github.com/matrix-org/synapse 2016-02-10 16:27:15 +00:00
Matthew Hodgson
b3ecb96e36 try to bump syweb to 0.6.8 2016-02-10 16:27:12 +00:00
Erik Johnston
907c1faf1e Merge branch 'release-v0.13.0' of github.com:matrix-org/synapse 2016-02-10 14:52:06 +00:00
Erik Johnston
6c3126d950 Update CHANGES 2016-02-10 14:49:48 +00:00
Erik Johnston
6e89e69d08 Bump version and changelog 2016-02-10 14:36:06 +00:00
Erik Johnston
e66d0bd03a Merge branch 'develop' of github.com:matrix-org/synapse into release-v0.13.0 2016-02-10 14:12:48 +00:00
Erik Johnston
4a2ace1857 Merge pull request #569 from matrix-org/erikj/initial_sync
Improvements to initial /syncs
2016-02-10 13:43:15 +00:00
Erik Johnston
5189bfdef4 Batch fetch _get_state_groups_from_groups 2016-02-10 13:24:42 +00:00
Erik Johnston
24f00a6c33 Use _simple_select_many for _get_state_group_for_events 2016-02-10 12:57:50 +00:00
Erik Johnston
8e49892b21 Only calculate initial sync for 10 rooms at a time
This helps to ensure we don't completely starve other requests.
2016-02-10 11:42:07 +00:00
Erik Johnston
e557dc80b8 Merge pull request #566 from matrix-org/erikj/logcontext
Don't bother copying records on parent context
2016-02-10 11:41:45 +00:00
Erik Johnston
4eb8f9ca8a Remove comment 2016-02-10 11:29:21 +00:00
Erik Johnston
f7ef5c1d57 Merge pull request #568 from matrix-org/erikj/unread_notif
Atomically persit push actions when we persist the event
2016-02-10 11:25:32 +00:00
Erik Johnston
00c9ad49df s/parent_context/previous_context/ 2016-02-10 11:25:19 +00:00
Erik Johnston
9777c5f49a Set parent context on instansiation 2016-02-10 11:23:32 +00:00
Erik Johnston
0214745239 Rename functions 2016-02-10 11:09:56 +00:00
Erik Johnston
46a02ff15b Merge pull request #532 from floviolleau/floviolleau/documentation
Update documentation
2016-02-10 10:40:28 +00:00
Erik Johnston
6ad9586c84 Merge pull request #565 from matrix-org/erikj/macaroon_config
Derive macaroon_secret_key from signing key.
2016-02-09 16:34:15 +00:00
Erik Johnston
78a5482267 Typo 2016-02-09 16:23:11 +00:00
Erik Johnston
7b0d846407 Atomically persit push actions when we persist the event 2016-02-09 16:19:15 +00:00
Erik Johnston
f28cc45183 Pass in current state to push action handler 2016-02-09 16:01:40 +00:00
Erik Johnston
e664e9737c Fix test 2016-02-09 14:57:43 +00:00
Erik Johnston
13ba8d878c Fix test 2016-02-09 14:55:21 +00:00
Erik Johnston
78d6c1b5be Change a log from debug to info 2016-02-09 14:44:12 +00:00
Erik Johnston
feb294d552 Remove dead code 2016-02-09 14:32:17 +00:00
Erik Johnston
70a8608749 Invalidate get_last_receipt_event_id_for_user cache 2016-02-09 14:27:29 +00:00
Erik Johnston
7e3b586c1e Merge pull request #567 from matrix-org/erikj/sync_ephemeral
Don't load all ephemeral state for a room on every sync
2016-02-09 14:11:47 +00:00
Erik Johnston
eff12e838c Don't load all ephemeral state for a room on every sync 2016-02-09 13:55:59 +00:00
Erik Johnston
82631c5f94 Fix unit tests 2016-02-09 13:50:37 +00:00
Daniel Wagner-Hall
b58a8b1ee0 Merge pull request #560 from matrix-org/daniel/tox
Remove pyc files before running tests
2016-02-09 13:44:08 +00:00
Daniel Wagner-Hall
5b75b637b8 Remove pyc files before running tests 2016-02-09 13:44:04 +00:00
Erik Johnston
9ac9b75bc4 Merge branch 'develop' of github.com:matrix-org/synapse into develop 2016-02-09 12:58:10 +00:00
Daniel Wagner-Hall
ebaa999f92 Revert "Reject additional path segments"
This reverts commit 1d19a5ec0f.

iOS Console is apparently relying on these paths.
2016-02-09 12:46:52 +00:00
Erik Johnston
6c558ee8bc Measure some /sync related things 2016-02-09 11:31:42 +00:00
Erik Johnston
31a2b892d8 Revert to putting it around the entire block 2016-02-09 11:25:09 +00:00
Erik Johnston
9daa4e2a85 Don't create new logging context 2016-02-09 11:06:19 +00:00
Erik Johnston
3e2fcd67b2 Don't bother copying records on parent context 2016-02-09 10:50:31 +00:00
Erik Johnston
241b71852e Fix bug in util.metrics.Measure 2016-02-09 10:28:13 +00:00
Erik Johnston
97294ef2fd Create new context when measuring 2016-02-09 10:12:00 +00:00
Erik Johnston
549698b1e0 Don't measure across event stream call, as it lasts for a long time. 2016-02-09 09:37:09 +00:00
Erik Johnston
c486b7b41c Change logcontext warns to debug 2016-02-09 09:20:38 +00:00
Erik Johnston
f078ecbc8f Derive macaroon_secret_key from signing key.
Unfortunately, there are people that are running synapse without a
`macaroon_sercret_key` set. Mandating they set one is a good solution,
except that breaking auto upgrades is annoying.
2016-02-08 16:35:44 +00:00
Erik Johnston
2bb5f035af Merge pull request #564 from matrix-org/erikj/logcontext
Fix up logcontexts
2016-02-08 15:16:16 +00:00
Erik Johnston
cca5c06679 Merge pull request #562 from matrix-org/erikj/push_metric
Add metrics to pushers
2016-02-08 14:57:40 +00:00
Daniel Wagner-Hall
0897357993 Merge pull request #563 from matrix-org/daniel/dollarz
Reject additional path segments
2016-02-08 14:46:37 +00:00
Erik Johnston
2c1fbea531 Fix up logcontexts 2016-02-08 14:26:45 +00:00
Erik Johnston
13e6262659 Add metrics to pushers 2016-02-08 14:26:45 +00:00
Daniel Wagner-Hall
1d19a5ec0f Reject additional path segments 2016-02-08 10:50:55 +00:00
Mark Haines
77c7ed0e93 Report the v1 and v2 patterns separately 2016-02-05 15:43:27 +00:00
Mark Haines
b052621f67 List the URL patterns in synapse 2016-02-05 15:18:58 +00:00
Daniel Wagner-Hall
8f1031586f Merge pull request #550 from matrix-org/daniel/guestnames
Allocate guest user IDs numericcally

The current random IDs are ugly and confusing when presented in UIs.
This makes them prettier and easier to read.

Also, disable non-automated registration of numeric IDs so that we don't
need to worry so much about people carving out our automated address
space and us needing to keep retrying ID registration.
2016-02-05 11:22:42 +00:00
Daniel Wagner-Hall
79a1c0574b Allocate guest user IDs numericcally
The current random IDs are ugly and confusing when presented in UIs.
This makes them prettier and easier to read.

Also, disable non-automated registration of numeric IDs so that we don't
need to worry so much about people carving out our automated address
space and us needing to keep retrying ID registration.
2016-02-05 11:22:30 +00:00
Daniel Wagner-Hall
489f92e0e5 Merge pull request #559 from matrix-org/daniel/media
Host /media/r0 as well as /media/v1
2016-02-05 11:04:24 +00:00
Daniel Wagner-Hall
737c4223ef Host /media/r0 as well as /media/v1 2016-02-05 10:47:46 +00:00
Daniel Wagner-Hall
db0da033eb Merge pull request #558 from matrix-org/daniel/config
Error if macaroon key is missing from config

Currently we store all access tokens in the DB, and fall back to that
check if we can't validate the macaroon, so our fallback works here, but
for guests, their macaroons don't get persisted, so we don't get to
find them in the database. Each restart, we generate a new ephemeral
key, so guests lose access after each server restart.

I tried to fix up the config stuff to be less insane, but gave up, so
instead I bolt on yet another piece of custom one-off insanity.

Also, add some basic tests for config generation and loading.
2016-02-05 01:58:36 +00:00
Daniel Wagner-Hall
6a9f1209df Error if macaroon key is missing from config
Currently we store all access tokens in the DB, and fall back to that
check if we can't validate the macaroon, so our fallback works here, but
for guests, their macaroons don't get persisted, so we don't get to
find them in the database. Each restart, we generate a new ephemeral
key, so guests lose access after each server restart.

I tried to fix up the config stuff to be less insane, but gave up, so
instead I bolt on yet another piece of custom one-off insanity.

Also, add some basic tests for config generation and loading.
2016-02-05 01:58:23 +00:00
David Baker
34dda7cc7f Merge pull request #557 from matrix-org/dbkr/profile_dont_return_null
Omit keys rather then returning null in profile API
2016-02-04 15:39:12 +00:00
Erik Johnston
4d36e73230 Actually return something sensible 2016-02-03 16:35:00 +00:00
Erik Johnston
709e09e1c3 Remove old log line 2016-02-03 16:32:20 +00:00
Erik Johnston
aa4af94c69 We return dicts now. 2016-02-03 16:29:32 +00:00
Erik Johnston
b84d59c5f0 Add descriptions 2016-02-03 16:22:35 +00:00
Erik Johnston
33c71c3a4b Preserve log context over when deferring to thread pool in media repo 2016-02-03 16:17:18 +00:00
Erik Johnston
c8e4d5de7f Merge pull request #555 from matrix-org/erikj/logcontext
Allowing tagging log contexts
2016-02-03 15:20:00 +00:00
David Baker
156cea5b45 No braces here 2016-02-03 15:04:51 +00:00
Erik Johnston
8450114098 Merge pull request #554 from matrix-org/erikj/event_push
Change event_push_actions_rm_tokens schema
2016-02-03 15:02:47 +00:00
David Baker
24277fbb97 Don't return null if profile display name / avatar url isn't set: omit them instead 2016-02-03 14:59:19 +00:00
Daniel Wagner-Hall
66bb255fcd Merge pull request #556 from matrix-org/daniel/config
Rename config field to reflect yaml name
2016-02-03 14:55:54 +00:00
Daniel Wagner-Hall
5054806ec1 Rename config field to reflect yaml name 2016-02-03 14:42:01 +00:00
Erik Johnston
430e496050 Merge pull request #552 from matrix-org/erikj/public_room_fix
Change the way we do public room list fetching
2016-02-03 13:53:59 +00:00
Erik Johnston
d4f72a5bfb Allowing tagging log contexts 2016-02-03 13:52:27 +00:00
Erik Johnston
f8aae79a72 Simplify get_rooms 2016-02-03 13:24:35 +00:00
Erik Johnston
9cd80a7b5c PEP8 2016-02-03 11:52:57 +00:00
Erik Johnston
772b45c745 Remove unused method 2016-02-03 11:43:26 +00:00
Daniel Wagner-Hall
5f280837a6 Add macaroon inspection script 2016-02-03 11:27:39 +00:00
Erik Johnston
6f52e90065 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/public_room_fix 2016-02-03 11:06:29 +00:00
Erik Johnston
771528ab13 Change event_push_actions_rm_tokens schema 2016-02-03 10:50:49 +00:00
Erik Johnston
b32121a5d1 Unused import 2016-02-03 10:30:56 +00:00
Daniel Wagner-Hall
2e36689df3 Merge pull request #553 from matrix-org/daniel/accesstokenlogging
Log more diagnostics for unrecognised access tokens
2016-02-02 19:22:43 +00:00
Daniel Wagner-Hall
2df6114bc4 Log more diagnostics for unrecognised access tokens 2016-02-02 19:21:49 +00:00
Daniel Wagner-Hall
a644ac6d2c Explain what W503 is 2016-02-02 17:23:12 +00:00
Daniel Wagner-Hall
de11b5b9b5 Merge pull request #551 from matrix-org/daniel/flake8
Fix flake8 warnings for new flake8
2016-02-02 17:18:54 +00:00
Daniel Wagner-Hall
d83d004ccd Fix flake8 warnings for new flake8 2016-02-02 17:18:50 +00:00
Erik Johnston
43e13dbd4d Merge pull request #549 from matrix-org/erikj/sync
Fetch events in a separate transaction.
2016-02-02 16:23:54 +00:00
Erik Johnston
8a391e33ae s/get_room_changes_for_user/get_membership_changes_for_user/ 2016-02-02 16:12:10 +00:00
Erik Johnston
477b1ed6cf Fetch events in a separate transaction.
This has a couple of benefits:

- It reduces the time of transactions, allowing other database requests
  to run.
- Fetching events is given a dedicated database thread, and so can't
  starve other database requests.
2016-02-02 15:58:14 +00:00
Erik Johnston
04ad93e6fd Merge pull request #545 from matrix-org/erikj/sync
Move /sync state calculations from rest to handler
2016-02-02 15:28:43 +00:00
Erik Johnston
65e92eca49 Change the way we do public room list fetching 2016-02-02 15:21:10 +00:00
David Baker
9039904f4c Merge pull request #548 from matrix-org/dbkr/fix_guest_db_column
Pass make_guest when we autogen a user ID
2016-02-02 14:46:25 +00:00
David Baker
69214ea671 Pass make_guest whne we autogen a user ID 2016-02-02 14:42:31 +00:00
Erik Johnston
b023995538 WARN if we get a topo token instead of stream. 2016-02-02 14:11:14 +00:00
David Baker
793369791a Merge pull request #547 from matrix-org/dbkr/fix_guest_upgrade
Pull guest access token out of the auth session params
2016-02-02 09:47:30 +00:00
Erik Johnston
7a8ea7e78b Merge pull request #546 from matrix-org/erikj/events
Switch over /events to use per room caches
2016-02-01 17:23:44 +00:00
Erik Johnston
854ca32f10 Comments 2016-02-01 16:52:27 +00:00
David Baker
d7ac861d3b Pull guest access token out of the auth session params, otherwise it will break if you open the email on a different device. 2016-02-01 16:33:19 +00:00
Erik Johnston
89b40b225c Order things correctly 2016-02-01 16:32:46 +00:00
Erik Johnston
4bf448be25 Switch over /events to use per room caches 2016-02-01 16:26:51 +00:00
Erik Johnston
fa48020a52 Move state calculations from rest to handler 2016-02-01 15:59:40 +00:00
Erik Johnston
1ef7cae41b Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-02-01 15:59:12 +00:00
Erik Johnston
2d3837bec7 Merge pull request #543 from matrix-org/erikj/sync
Cache get_room_changes_for_user
2016-02-01 15:05:06 +00:00
Erik Johnston
498c2e60fd Merge pull request #544 from matrix-org/erikj/stream_rooms
Only use room_ids if in get_room_events_stream if is_guest
2016-02-01 15:04:57 +00:00
Erik Johnston
ceb6b8680a Only use room_ids if in get_room_events_stream if is_guest 2016-02-01 10:33:52 +00:00
Erik Johnston
b264b9548f Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-02-01 09:34:17 +00:00
Erik Johnston
d98a9f2583 Don't use before_token. Its wrong. Use actual limit. 2016-01-31 13:31:15 +00:00
Erik Johnston
226a9a5fa6 Merge pull request #542 from matrix-org/erikj/cache_fix
Cache fixes
2016-01-29 16:54:34 +00:00
Erik Johnston
25c311eaf6 Cache get_room_changes_for_user 2016-01-29 16:52:48 +00:00
Erik Johnston
cc9c97e0dc Invalidate _account_data_stream_cache correctly 2016-01-29 16:41:51 +00:00
Erik Johnston
e70165039c If stream pos is greater then earliest known key and entity hasn't changed, then entity hasn't changed 2016-01-29 16:41:32 +00:00
Erik Johnston
c1de91aca4 Merge pull request #540 from matrix-org/erikj/sync
Prefill stream change caches
2016-01-29 15:52:42 +00:00
Erik Johnston
b55b90bfb4 Merge pull request #541 from matrix-org/erikj/fixsomeofpush
Make /events always return a newer token, if one exists
2016-01-29 15:45:35 +00:00
Erik Johnston
8da95b6f1b Comment. Remove superfluous order by 2016-01-29 15:39:17 +00:00
Mark Haines
b91baae09d Merge pull request #539 from matrix-org/markjh/3pid
Fix up the /account/3pid API
2016-01-29 16:34:13 +01:00
Erik Johnston
13724569ec Deal with None limit 2016-01-29 15:33:44 +00:00
Erik Johnston
4a6eb5eb45 Make /events always return a newer token, if one exists 2016-01-29 15:22:17 +00:00
Mark Haines
6927d0e091 Add missing param to the log line 2016-01-29 15:01:26 +00:00
Erik Johnston
b5dbced938 Don't prefill account data 2016-01-29 14:53:59 +00:00
Mark Haines
f2d5ff5bf2 Fix the mock homserver used in the tests 2016-01-29 14:53:14 +00:00
Erik Johnston
3d60686c0c Actually use cache 2016-01-29 14:49:11 +00:00
Erik Johnston
45488e0ffa Max is not a function 2016-01-29 14:42:01 +00:00
Erik Johnston
f67d60496a Convert param style 2016-01-29 14:41:16 +00:00
Erik Johnston
18579534ea Prefill stream change caches 2016-01-29 14:37:59 +00:00
Mark Haines
47374a33fc Merge remote-tracking branch 'origin/develop' into markjh/3pid 2016-01-29 14:15:12 +00:00
Mark Haines
0fcafbece8 Add config option for setting the trusted id servers, disabling checking the ID server in integration tests 2016-01-29 14:12:26 +00:00
Erik Johnston
96bb4bf38a Update CHANGES 2016-01-29 13:56:22 +00:00
Erik Johnston
fd142c29d9 Merge branch 'develop' of github.com:matrix-org/synapse into release-v0.12.1 2016-01-29 13:52:12 +00:00
Erik Johnston
ebc5f00efe Bump AccountDataAndTagsChangeCache size 2016-01-29 13:37:40 +00:00
Erik Johnston
ea320d3464 Don't work out unread_notifs_for_room_id unless needed 2016-01-29 13:34:48 +00:00
Mark Haines
5687a00e4e Allow three_pid_creds as well as threePidCreds in /account/3pid 2016-01-29 13:26:15 +00:00
Erik Johnston
b18114e19e Merge pull request #536 from matrix-org/erikj/sync
Make /sync "better".
2016-01-29 13:04:51 +00:00
Erik Johnston
02a9c3be6c Merge pull request #538 from matrix-org/erikj/fix_lru_cache
Fix LruCache. Make TreeCache track its own size.
2016-01-29 11:53:55 +00:00
Erik Johnston
4fce59f274 Add tests 2016-01-29 11:33:11 +00:00
Erik Johnston
fb7299800f Directly set self.value 2016-01-29 11:29:14 +00:00
Erik Johnston
c046630c33 Remove spurious self.size 2016-01-29 11:17:54 +00:00
Erik Johnston
a30364c1f9 Correctly bookkeep the size of TreeCache 2016-01-29 10:44:46 +00:00
Erik Johnston
766526e114 Make TreeCache keep track of its own size. 2016-01-29 10:11:21 +00:00
Erik Johnston
50e18938a9 Reset size on clear 2016-01-29 10:00:45 +00:00
Erik Johnston
f3af1840cb Merge pull request #537 from matrix-org/erikj/cache_filters
Cache user filters.
2016-01-28 18:26:16 +00:00
Erik Johnston
467c27a1f9 Amalgamate tags and account data stream caches 2016-01-28 18:20:00 +00:00
Erik Johnston
3f5dd18bd4 If the same as the earliest key, assume nothing has changed. 2016-01-28 18:11:41 +00:00
Erik Johnston
40431251cb Correctly update _entity_to_key 2016-01-28 18:05:43 +00:00
Erik Johnston
82cf3a8043 Fix inequalities 2016-01-28 17:44:04 +00:00
Erik Johnston
03b2c2577c Don't use defer.returnValue 2016-01-28 17:29:24 +00:00
Erik Johnston
0663c5bd52 Include cache hits with has_entity_changed 2016-01-28 17:27:28 +00:00
Erik Johnston
35981c8b71 Fix test 2016-01-28 17:20:05 +00:00
Erik Johnston
8fe8951a8d Cache filters 2016-01-28 17:09:09 +00:00
Erik Johnston
fdca8ec418 Add events index 2016-01-28 16:41:59 +00:00
Erik Johnston
45cf827c8f Change name and doc has_entity_changed 2016-01-28 16:39:18 +00:00
Erik Johnston
00cb3eb24b Cache tags and account data 2016-01-28 16:37:41 +00:00
Erik Johnston
c23a8c7833 Ensure keys to RoomStreamChangeCache are ints 2016-01-28 15:55:26 +00:00
Erik Johnston
e1941442d4 Invalidate caches properly. Remove unused arg 2016-01-28 15:02:41 +00:00
Daniel Wagner-Hall
49c328a892 Prune on fetching
So we don't try to checkout a stale ref
2016-01-28 14:57:40 +00:00
Daniel Wagner-Hall
0935802f1e Pin pynacl to 0.3.0
Something has gone wrong in the packaging of 1.* which causes it not to
compile.
2016-01-28 14:47:03 +00:00
Erik Johnston
19fd425928 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-01-28 14:46:08 +00:00
Erik Johnston
167d1df699 Merge pull request #534 from matrix-org/erikj/setup
Add a Homeserver.setup method
2016-01-28 14:45:36 +00:00
Erik Johnston
7ed2bbeb11 Clean up a bit. Add comment 2016-01-28 14:32:05 +00:00
Daniel Wagner-Hall
9101193242 Quot all the things 2016-01-28 14:18:45 +00:00
Erik Johnston
571a566399 Change load limit params 2016-01-28 14:11:16 +00:00
Erik Johnston
3c6518ddbf Amalgamate incremental and full sync for user 2016-01-28 14:03:48 +00:00
Erik Johnston
4e7948b47a Allow paginating backwards from stream token 2016-01-28 11:52:34 +00:00
Erik Johnston
ba8931829b Return correct type of token 2016-01-28 11:34:17 +00:00
Erik Johnston
61eaa6ec64 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-01-28 10:50:57 +00:00
Erik Johnston
c5e7c0e436 Up get_rooms_for_user cache size 2016-01-28 09:58:45 +00:00
Richard van der Hoff
e26390ca46 Merge pull request #535 from matrix-org/rav/paginate_from_stream_token
Make it possible to paginate forwards from stream tokens
2016-01-28 09:49:46 +00:00
Erik Johnston
a6477d5933 Remove chdir 2016-01-28 09:19:55 +00:00
Richard van der Hoff
5cba88ea7c Make it possible to paginate forwards from stream tokens
In order that we can fill the gap after a /sync, make it possible to paginate
forwards from a stream token.
2016-01-27 17:42:45 +00:00
Erik Johnston
5fc9b17518 No chdir 2016-01-27 17:39:20 +00:00
Erik Johnston
fa90c180ee Merge branch 'develop' of github.com:matrix-org/synapse into erikj/setup 2016-01-27 17:37:33 +00:00
Erik Johnston
5610880003 Merge pull request #530 from matrix-org/erikj/server_refactor
Remove redundant BaseHomeServer
2016-01-27 17:36:31 +00:00
Erik Johnston
e7febf4fbb PEP8 2016-01-27 17:33:27 +00:00
Erik Johnston
aca3193efb Use the same path for incremental with gap or without gap 2016-01-27 17:33:27 +00:00
Erik Johnston
b97f6626b6 Add cache to room stream 2016-01-27 17:33:26 +00:00
Erik Johnston
f93ecf8783 Don't turn on profiling 2016-01-27 17:33:26 +00:00
Erik Johnston
0487c9441f Fix tests 2016-01-27 17:33:13 +00:00
Erik Johnston
a955cbfa49 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/setup 2016-01-27 17:22:35 +00:00
Richard van der Hoff
8c97b49886 Merge pull request #533 from matrix-org/rav/hashtest_federation
Fix federation for #test:matrix.org
2016-01-27 17:19:36 +00:00
Erik Johnston
2152b320c5 PEP 8 2016-01-27 17:09:17 +00:00
Richard van der Hoff
d6d60b4d6c Federation: drop events which cause SynapseErrors
... rather than rejecting any attempt to federate channels which contain such
events.
2016-01-27 17:02:10 +00:00
Mark Haines
d6c831bd3d Merge pull request #531 from matrix-org/markjh/relative_push_rules
Fix adding push rules relative to other rules
2016-01-27 17:13:36 +01:00
Florent VIOLLEAU
97b364cb25 Update documentation
Signed-off-by: Florent VIOLLEAU <floviolleau@gmail.com>
2016-01-27 14:11:05 +01:00
Erik Johnston
03f4569dc3 Bump version and changelog 2016-01-27 10:35:30 +00:00
Mark Haines
8c94833b72 Fix adding push rules relative to other rules 2016-01-27 10:24:20 +00:00
Erik Johnston
9fda8b5193 Don't turn on profiling 2016-01-26 18:27:23 +00:00
Erik Johnston
e4e33c743e Merge branch 'develop' of github.com:matrix-org/synapse into erikj/server_refactor 2016-01-26 17:30:00 +00:00
Erik Johnston
87f9477b10 Add a Homeserver.setup method.
This is for setting up dependencies that require work on startup. This
is useful for the DataStore that wants to read a bunch from the database
before initiliazing.
2016-01-26 15:51:06 +00:00
Erik Johnston
9959d9ece8 Remove redundated BaseHomeServer 2016-01-26 13:52:29 +00:00
David Baker
27b9775073 Merge pull request #529 from matrix-org/dbkr/one_to_one_only_messages
Only notify for messages in one to one rooms, not every event
2016-01-26 11:50:26 +00:00
David Baker
766c24b2e6 Only notify for messages in one to one rooms, not every event
Fixes the fact that candidate events and hangups generated notifications.
2016-01-26 10:21:41 +00:00
Mark Haines
7179fdd550 Merge pull request #528 from matrix-org/markjh/missing_yield
Add missing yield in push_rules set enabled
2016-01-25 21:26:30 +01:00
Erik Johnston
c887c4cbd5 Merge pull request #524 from matrix-org/erikj/sync
Move some sync logic from rest to handlers pacakege
2016-01-25 16:58:39 +00:00
Mark Haines
e18257f0e5 Add missing yield in push_rules set enabled 2016-01-25 16:51:56 +00:00
Erik Johnston
8431f62ebb Merge pull request #525 from matrix-org/erikj/select_many
Implement a `_simple_select_many_batch`
2016-01-25 16:30:36 +00:00
Erik Johnston
f091b73e69 Merge pull request #527 from matrix-org/erikj/push_cache
Push: Use storage apis that are cached
2016-01-25 16:16:34 +00:00
Erik Johnston
ce6fbbea94 Merge pull request #526 from matrix-org/erikj/push_index
Add index to event_push_actions
2016-01-25 16:06:07 +00:00
Erik Johnston
aea5da0ef6 Guard against empty iterables 2016-01-25 15:59:29 +00:00
Erik Johnston
3a75159832 Merge pull request #521 from matrix-org/erikj/underscores
Underscores are allowed in user ids
2016-01-25 15:56:31 +00:00
Erik Johnston
1ebf5e3d03 Correct docstring 2016-01-25 15:53:36 +00:00
Erik Johnston
dc2647cd3d PEP8 2016-01-25 15:48:54 +00:00
Erik Johnston
86896408b0 Add index to event_push_actions 2016-01-25 15:30:32 +00:00
Erik Johnston
53cb173663 Push: Use storage apis that are cached 2016-01-25 13:55:18 +00:00
Erik Johnston
d59c58bc95 Remove weird stuff 2016-01-25 13:38:53 +00:00
Erik Johnston
ddd25def01 Implement a _simple_select_many_batch 2016-01-25 13:36:02 +00:00
Erik Johnston
8c6012a4af Fix tests 2016-01-25 13:12:35 +00:00
Erik Johnston
42deca50c2 Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-01-25 12:49:45 +00:00
Erik Johnston
d685ae73b4 Merge branch 'erikj/filters' of github.com:matrix-org/synapse into develop 2016-01-25 12:35:25 +00:00
Erik Johnston
4021f95261 Move logic from rest/ to handlers/ 2016-01-25 10:10:44 +00:00
David Baker
f92fe15897 Merge pull request #523 from matrix-org/dbkr/no_push_unless_notify
Better fix for actions with both dont_notify and tweaks
2016-01-22 17:27:25 +00:00
David Baker
3fe8c56736 Better fix for actions with both dont_notify and tweaks 2016-01-22 17:21:58 +00:00
David Baker
60965bd7e5 Revert b4a41aa542 as it's just broken. 2016-01-22 17:21:15 +00:00
David Baker
0e0e441b33 Merge pull request #522 from matrix-org/dbkr/no_push_unless_notify
Don't add notifications to the table unless there's actually a 'notify' action
2016-01-22 17:06:52 +00:00
David Baker
b4a41aa542 Don't add notifications to the table unless there's actually a 'notify' action 2016-01-22 16:56:48 +00:00
Erik Johnston
db6e26bb8c Don't mutate cached values 2016-01-22 16:03:55 +00:00
Erik Johnston
88baa3865e Merge branch 'develop' of github.com:matrix-org/synapse into erikj/sync 2016-01-22 15:57:12 +00:00
David Baker
74f49f99f9 Merge pull request #520 from matrix-org/dbkr/bulk_push_overlay_enabled
Overlay the push_rules_enabled map for users
2016-01-22 15:25:52 +00:00
David Baker
7065b75bfd Don't crash if a user has no push rule enabled entries 2016-01-22 15:13:44 +00:00
Erik Johnston
7959e8b764 Underscores are allowed in user ids 2016-01-22 14:59:49 +00:00
David Baker
52bdd1b834 Overlay the push_rules_enabled map for users, otherwise they won't be able to disable server default rules. 2016-01-22 14:58:19 +00:00
David Baker
7a3fe48ba4 Merge pull request #519 from matrix-org/dbkr/treecache
Make LRU caching tree-based so subtrees of the cache can be invalidated cheaply.
2016-01-22 14:47:48 +00:00
David Baker
7cd418d38e Don't add the member functiopn if we're not using treecache 2016-01-22 13:40:37 +00:00
David Baker
cd80019eec docs 2016-01-22 12:21:13 +00:00
David Baker
d552861346 Revert all the bits changing keys of eeverything that used LRUCaches to tuples 2016-01-22 12:18:14 +00:00
David Baker
10f76dc5da Make LRU cache not default to treecache & add options to use it 2016-01-22 12:10:33 +00:00
David Baker
5b142788d2 Add __contains__ 2016-01-22 11:49:59 +00:00
David Baker
eaa836e8ca Docs for treecache 2016-01-22 11:47:22 +00:00
David Baker
42eae4634f Use new invalidate_many cache invalidation to invalidate the event_push_actions cache appropriately. 2016-01-22 11:22:48 +00:00
David Baker
8acc5cb60f Add invalidate_many here too 2016-01-22 11:22:32 +00:00
David Baker
31a051b677 Test treecache directly 2016-01-22 11:22:00 +00:00
Erik Johnston
8f9c74e9f1 Fix tests 2016-01-22 10:48:27 +00:00
Erik Johnston
975903ae17 Sanitize filters 2016-01-22 10:41:30 +00:00
David Baker
4efcaa43c8 Add tests for treecache directly and test del_multi at the LruCache level too. 2016-01-22 10:37:37 +00:00
David Baker
330be18ec5 peppate 2016-01-21 19:17:32 +00:00
David Baker
f1f8122120 Change LRUCache to be tree-based so we can delete subtrees. 2016-01-21 19:16:25 +00:00
Erik Johnston
297eded261 Merge pull request #517 from matrix-org/erikj/push_only_room
Only fetch events for rooms and receipts
2016-01-21 16:12:39 +00:00
Erik Johnston
0e07f2e15d Only fetch events for rooms and receipts 2016-01-21 16:10:37 +00:00
Erik Johnston
82b46f556d Merge pull request #516 from matrix-org/erikj/push_perf
Reduce number of calls to get_unread_event_push_actions_by_room
2016-01-21 15:12:56 +00:00
Erik Johnston
8f66fe6392 Cache get_unread_event_push_actions_by_room_for_user 2016-01-21 15:02:07 +00:00
Erik Johnston
3a00f13436 Only compute badge count when necessary.
This reverts commit d726597737.
2016-01-21 14:56:11 +00:00
Erik Johnston
c6549117a2 Fix AttributeError 2016-01-21 14:02:14 +00:00
Erik Johnston
ed1d189e10 Merge pull request #515 from matrix-org/erikj/syn-606
SYN-606: Peeking does not wake up /events
2016-01-21 13:40:20 +00:00
Erik Johnston
dfe1273d14 Merge pull request #509 from matrix-org/erikj/dns_cache
Cache dns lookups, and use the cache if we fail to lookup servers later
2016-01-21 13:37:23 +00:00
Erik Johnston
91a222c66d SYN-606: Peeking does not wake up /events
If a real user attempted to first peek into one room, and then another,
their room event stream would not be woken up for events in the later
room.
2016-01-21 13:22:26 +00:00
David Baker
0503bdb316 Merge pull request #514 from matrix-org/remove_member_event_rule
Remove member event rule as per SYN-607
2016-01-21 12:00:26 +00:00
David Baker
930ba003f8 Remove member event rule as per SYN-607 2016-01-21 11:50:27 +00:00
Erik Johnston
d54005059c Merge branch 'develop' of github.com:matrix-org/synapse into develop 2016-01-21 10:19:15 +00:00
Erik Johnston
d7c85ad916 Add another graph contrib 2016-01-21 10:19:05 +00:00
David Baker
c1a3021771 Merge pull request #507 from matrix-org/push_badge_counts
Push badge counts
2016-01-21 10:09:11 +00:00
Erik Johnston
d049e81b10 Merge pull request #513 from matrix-org/erikj/register_user_chars
Don't explode when given a unicode username in /register/
2016-01-21 09:53:35 +00:00
Erik Johnston
c43b6dcc75 Fix change_password 2016-01-20 16:14:48 +00:00
David Baker
367cfab4e6 peppate 2016-01-20 16:05:09 +00:00
Erik Johnston
69adf8c384 Merge pull request #512 from matrix-org/erikj/whine_on_from
Whine if we give a from param to /sync
2016-01-20 15:58:44 +00:00
Erik Johnston
73ca8e5834 Whine if we give a from param to /sync 2016-01-20 15:42:57 +00:00
Erik Johnston
b088291f14 Don't explode when given a unicode username in /register/ 2016-01-20 15:40:25 +00:00
Daniel Wagner-Hall
a2ae01cc0f Merge pull request #510 from matrix-org/daniel/nonguestpeeking
Allow non-guests to peek on rooms using /events
2016-01-20 15:34:18 +00:00
Daniel Wagner-Hall
da417aa56d Allow non-guests to peek on rooms using /events 2016-01-20 15:34:07 +00:00
David Baker
d4315bbf6b Add index by user id on receipts_linearized 2016-01-20 15:33:27 +00:00
David Baker
3fa344c037 Add storage function to get all receipts for a user. Also add some cache invalidation to the receipts storage because there wasn't any, and remove a method that was unused. 2016-01-20 15:30:31 +00:00
David Baker
7cc047455e Inline membership specifier 2016-01-20 13:50:28 +00:00
David Baker
d726597737 Simplify badge updating code by just updating it every time we get woken up and it's not an event 2016-01-20 13:49:00 +00:00
David Baker
2309450a76 Merge branch 'develop' into push_badge_counts 2016-01-20 13:45:13 +00:00
David Baker
ea5eea2424 Merge branch 'dbkr/no_push_for_own_events' into develop 2016-01-20 13:44:46 +00:00
David Baker
746f6e0eb3 'filtered' is a list of zero or 1 2016-01-20 13:44:04 +00:00
David Baker
7441d8cc0c Merge remote-tracking branch 'origin/develop' into push_badge_counts 2016-01-20 13:40:22 +00:00
David Baker
ccf9387d57 Merge branch 'develop' into push_badge_counts 2016-01-20 13:33:45 +00:00
David Baker
d4cefb6289 Merge pull request #511 from matrix-org/dbkr/no_push_for_own_events
Don't generate push actions for our own events
2016-01-20 13:33:25 +00:00
David Baker
259d1ecd1d Don't generate push actions for our own events 2016-01-20 13:24:59 +00:00
Erik Johnston
191070123d Cache dns lookups, and use the cache if we fail to lookup servers later 2016-01-20 11:34:09 +00:00
David Baker
afb7b377f2 Merge branch 'develop' into push_badge_counts 2016-01-19 18:17:23 +00:00
Erik Johnston
af30140621 Merge pull request #506 from matrix-org/erikj/push_fast
Only compute unread notifications for rooms we send down stream
2016-01-19 17:31:10 +00:00
Erik Johnston
ac2842ff1e Only compute unread notifications for rooms we send down stream 2016-01-19 17:19:53 +00:00
Erik Johnston
892ee473d9 Don't use form of get_state_for_events with None state_key 2016-01-19 17:14:46 +00:00
Erik Johnston
40d9765123 Merge pull request #505 from matrix-org/erikj/push_fast
Push actions perf
2016-01-19 16:16:05 +00:00
Erik Johnston
2818a000aa Use split rather than endswith 2016-01-19 16:11:39 +00:00
Erik Johnston
fb5d8e58ff Change regex cache size to 5000 2016-01-19 16:07:07 +00:00
Erik Johnston
5a7d1ecffc Add regex cache. Only caculate push actions for users that have sent read receipts, and are on that server 2016-01-19 16:01:05 +00:00
Erik Johnston
d056a0a3d8 Handle glob -> regex errors 2016-01-19 14:43:24 +00:00
Erik Johnston
7a079adc8f Merge pull request #477 from matrix-org/erikj/access_token_log
Don't log urlencoded access_tokens
2016-01-19 14:28:29 +00:00
Erik Johnston
b8518ffe65 Use all_ephemeral_by_room in incremental_sync_with_gap_for_room 2016-01-19 14:26:58 +00:00
Erik Johnston
9654ee0848 Return don't break 2016-01-19 14:24:59 +00:00
Erik Johnston
7ecd211163 Except truthy values 2016-01-19 14:22:02 +00:00
Erik Johnston
05f78b3b52 Merge pull request #504 from matrix-org/erikj/highlight_count
Return highlight_count in /sync
2016-01-19 13:21:48 +00:00
Erik Johnston
f5fc8f2928 Merge pull request #486 from matrix-org/default_notify
Change default pushrules back to notifying for all messages.
2016-01-19 12:49:29 +00:00
Erik Johnston
9a8949f022 Merge branch 'develop' of github.com:matrix-org/synapse into default_notify 2016-01-19 11:37:05 +00:00
Erik Johnston
3adcc4c86a Return highlight_count in /sync 2016-01-19 11:35:50 +00:00
Erik Johnston
47e7963e50 Merge pull request #502 from matrix-org/erikj/push_notif_perf
Unread notification performance.
2016-01-19 11:27:27 +00:00
Daniel Wagner-Hall
88af7bb48b Merge pull request #503 from matrix-org/daniel/nonghosts
Don't error on AS non-ghost user use
2016-01-19 11:27:00 +00:00
Erik Johnston
0d241e1114 Take a deepcopy of push rules before mutating them 2016-01-19 10:15:12 +00:00
Erik Johnston
f750a442f7 Update _id 2016-01-19 10:14:53 +00:00
Erik Johnston
003853e702 Preserve truthiness 2016-01-18 17:34:02 +00:00
Erik Johnston
a284ad4092 You need to escape backslashes 2016-01-18 17:20:44 +00:00
Erik Johnston
47f82e4408 Fix branch didn't check word_boundary 2016-01-18 17:04:36 +00:00
Erik Johnston
5cd2126a6a Remove dead code 2016-01-18 16:49:46 +00:00
Erik Johnston
29c353c553 Don't split at word boundaries, actually use regex 2016-01-18 16:48:17 +00:00
Daniel Wagner-Hall
808a8aedab Don't error on AS non-ghost user use
This will probably go away either when we fix our existing ASes, or when
we kill the concept of non-ghost users.
2016-01-18 16:33:05 +00:00
Daniel Wagner-Hall
74474a6d63 Pull out app service user lookup
I find this a lot simpler than nested try-catches and stuff
2016-01-18 16:32:33 +00:00
Erik Johnston
d16dcf642e Drop log levels 2016-01-18 15:44:04 +00:00
Erik Johnston
7dd14e5d1c Add comments and remove dead code 2016-01-18 15:42:23 +00:00
Erik Johnston
866fe27e78 Do for loop once at start 2016-01-18 15:29:41 +00:00
Erik Johnston
d1f56f732e Use static for const dicts 2016-01-18 15:17:56 +00:00
Erik Johnston
0e39dcd135 Remove internal ids 2016-01-18 14:50:17 +00:00
Erik Johnston
345ff2196a Don't edit ruleset 2016-01-18 14:50:17 +00:00
Erik Johnston
2c176e02ae Make unit tests work 2016-01-18 14:48:50 +00:00
Erik Johnston
63485b3029 Re-enable urnead notifications 2016-01-18 14:48:30 +00:00
Erik Johnston
f59b564507 Make notifications go quicker 2016-01-18 14:48:29 +00:00
Erik Johnston
2068678b8c Make Event objects behave more like dicts 2016-01-18 14:43:50 +00:00
Erik Johnston
cc66a9a5e3 Allow filtering events for multiple users at once 2016-01-18 14:43:50 +00:00
Daniel Wagner-Hall
5de1563997 Merge pull request #501 from matrix-org/daniel/unban
Require unbanning before other membership changes
2016-01-15 16:27:29 +00:00
Daniel Wagner-Hall
ac5a4477ad Require unbanning before other membership changes 2016-01-15 16:27:26 +00:00
Daniel Wagner-Hall
c049f60d4a Merge pull request #500 from matrix-org/daniel/cleanup
Remove unused parameters
2016-01-15 16:02:22 +00:00
Daniel Wagner-Hall
b5ce4f0427 Remove unused parameters 2016-01-15 13:54:03 +00:00
David Baker
ac12b6d332 Merge branch 'invalid_user_name' into develop 2016-01-15 10:07:01 +00:00
David Baker
5819b7a78c M_INVALID_USERNAME to be consistent with the parameter name 2016-01-15 10:06:34 +00:00
David Baker
5bf1a3d6dc Merge pull request #499 from matrix-org/invalid_user_name
Add specific error code for invalid user names.
2016-01-15 09:18:02 +00:00
David Baker
3f8db3d597 Add specific error code for invalid user names. 2016-01-14 17:21:04 +00:00
Erik Johnston
a50013fd99 Merge pull request #497 from matrix-org/erikj/max_limit
Clamp pagination limits to at most 1000
2016-01-14 16:28:52 +00:00
Richard van der Hoff
2978053d16 Merge branch 'release-v0.12.1' into develop 2016-01-14 15:04:08 +00:00
Daniel Wagner-Hall
2c760372d6 Merge pull request #496 from matrix-org/daniel/3
Require ID and as_token be unique for ASs

Defaults ID to as_token if not specified. This will change
when IDs are fully supported.
2016-01-14 14:34:11 +00:00
Daniel Wagner-Hall
2680043bc6 Require ID and as_token be unique for ASs
Defaults ID to as_token if not specified. This will change
when IDs are fully supported.
2016-01-14 14:34:01 +00:00
David Baker
8db451f652 Merge pull request #498 from matrix-org/push_rule_enabled_fix
Fix enabling & disabling push rules
2016-01-14 13:34:05 +00:00
Richard van der Hoff
430d3d74f6 Merge branch 'release-v0.12.1' into develop 2016-01-14 11:17:47 +00:00
Richard van der Hoff
7ee1879ed4 Merge pull request #492 from matrix-org/rav/event_context_shiz_release
Add 'event' result to 'context' endpoint
2016-01-14 11:17:29 +00:00
Daniel Wagner-Hall
d14fcfd24a Merge pull request #487 from matrix-org/daniel/forceregistration
Require AS users to be registered before use
2016-01-14 11:06:43 +00:00
Daniel Wagner-Hall
27927463a1 Merge pull request #494 from matrix-org/daniel/2
Don't start server if ASes are invalidly configured
2016-01-14 11:06:19 +00:00
Daniel Wagner-Hall
fcb6df45e5 Merge pull request #493 from matrix-org/daniel/1
Delete unused code
2016-01-14 11:06:14 +00:00
David Baker
a7927c13fd Fix enabling & disabling push rules 2016-01-14 10:53:44 +00:00
Erik Johnston
339c8f0133 Clamp pagination limits to at most 1000 2016-01-14 10:22:02 +00:00
Erik Johnston
bce602eb4e Use logger not logging 2016-01-14 09:56:26 +00:00
Erik Johnston
939cbd7057 Merge pull request #495 from matrix-org/erikj/disable_notification
Temporarily disable notification branch
2016-01-14 09:46:09 +00:00
David Baker
12623c99b6 Use the unread notification count to send accurate badge counts in push notifications. 2016-01-13 18:55:57 +00:00
Erik Johnston
2655d61d70 Don't change signature. Return empty list 2016-01-13 17:43:39 +00:00
Erik Johnston
fcb05b4c82 Temporarily disable notification branch 2016-01-13 17:39:58 +00:00
Mark Haines
806bae1ee7 Merge pull request #488 from matrix-org/markjh/user_name
Rename 'user_name' to 'user_id' in push
2016-01-13 18:11:01 +01:00
Daniel Wagner-Hall
f6fcff3602 Don't start server if ASes are invalidly configured 2016-01-13 17:09:24 +00:00
Daniel Wagner-Hall
244b356a37 Delete unused code 2016-01-13 17:03:58 +00:00
Richard van der Hoff
49f33f6438 Add 'event' result to 'context' endpoint
... because the context isn't much use without the event.
2016-01-13 16:42:14 +00:00
Daniel Wagner-Hall
93afb40cd4 Skip, rather than erroring, invalid guest requests
Erroring causes problems when people make illegal requests, because they
don't know what limit parameter they should pass.

This is definitely buggy. It leaks message counts for rooms people don't
have permission to see, via tokens. But apparently we already
consciously decided to allow that as a team, so this preserves that
behaviour.
2016-01-13 16:41:42 +00:00
Mark Haines
9c1f853d58 Rename 'user_name' to 'user_id' in push to make it consistent with the rest of the code 2016-01-13 13:32:59 +00:00
Daniel Wagner-Hall
7d09ab8915 Require AS users to be registered before use 2016-01-13 13:19:47 +00:00
David Baker
d9db819e23 Change default pushrules back to notifying for all messages. 2016-01-13 13:15:53 +00:00
Mark Haines
37716d55ed Merge pull request #482 from matrix-org/markjh/table_name
Finish removing the .*Table objects from the storage layer.
2016-01-13 13:45:39 +01:00
Erik Johnston
4399684582 Merge pull request #483 from matrix-org/erikj/bulk_get_push_rules
bulk_get_push_rules should handle empty lists
2016-01-13 12:35:11 +00:00
Erik Johnston
44b4fc5f50 Use compiled regex 2016-01-13 11:47:32 +00:00
Mark Haines
f4dad9f639 Merge remote-tracking branch 'origin/erikj/bulk_get_push_rules' into markjh/table_name
Conflicts:
	synapse/storage/push_rule.py
2016-01-13 11:46:07 +00:00
Erik Johnston
8740e4e94a bulk_get_push_rules should handle empty lists 2016-01-13 11:37:17 +00:00
Mark Haines
c0a279e808 Delete the table objects from TransactionStore 2016-01-13 11:15:20 +00:00
Mark Haines
96e400fee5 Remove the RoomsTable object 2016-01-13 11:07:32 +00:00
Erik Johnston
72ba26679b Merge pull request #480 from matrix-org/erikj/guest_event_tightloop
Dont fire user_joined_room when guest hits /events
2016-01-13 11:00:50 +00:00
Erik Johnston
ea47760bd8 Merge pull request #481 from matrix-org/erikj/SYN-589
Don't include old left rooms in /sync
2016-01-12 16:55:55 +00:00
Erik Johnston
70dfe4dc96 Don't include old left rooms 2016-01-12 15:01:56 +00:00
Mark Haines
a8e9e0b916 Remove the PushersTable and EventPushActionsTable objects 2016-01-12 14:41:26 +00:00
Mark Haines
31de2953a3 Remove the PushRuleTable and PushRuleEnableTable objects 2016-01-12 14:36:16 +00:00
Erik Johnston
fd5c28dc52 Dont fire user_joined_room when guest hits /events
Firing the 'user_joined_room' signal everytime a guest hits /events
causes all presence for that room to be returned in the stream. This may
sound helpful, but causes clients to tightloop calling /events.

In general, guest users should get the initial presence from (room)
intial sync and so we don't require presence to sbsequently come down
the event stream.
2016-01-12 11:04:06 +00:00
Daniel Wagner-Hall
42aa1f3f33 Merge pull request #478 from matrix-org/daniel/userobject
Introduce a User object

I'm sick of passing around more and more things as tuple items around
the whole world, and needing to edit every call site every time there is
more information about a user. So pass them around together as an
object.

This object has incredibly poorly named fields because we have a
convention that `user` indicates a UserID object, and `user_id`
indicates a string. I tried to clean up the whole repo to fix this, but
gave up. So instead, I introduce a second convention. A user_object is a
User, and a user_id_object is a UserId. I may have cried a little bit.
2016-01-11 17:50:22 +00:00
Daniel Wagner-Hall
2110e35fd6 Introduce a Requester object
This tracks data about the entity which made the request. This is
instead of passing around a tuple, which requires call-site
modifications every time a new piece of optional context is passed
around.

I tried to introduce a User object. I gave up.
2016-01-11 17:48:45 +00:00
David Baker
b5d33a656f Postgres doesn't like booleans 2016-01-11 17:13:52 +00:00
David Baker
fe56138142 Remove rogue 'admin' 2016-01-11 17:09:03 +00:00
Matthew Hodgson
c110eb92f8 Merge pull request #476 from koobs/patch-1
Add FreeBSD install section & instructions
2016-01-08 17:55:25 +00:00
Erik Johnston
8f8b884430 Don't log urlencoded access_tokens 2016-01-08 17:48:08 +00:00
Kubilay Kocak
a8cd1eb996 Add FreeBSD install section & instructions
Add FreeBSD installation instructions for both Ports and Packages now that matrix-synapse has landed [1] in the tree.

[1] https://svnweb.freebsd.org/changeset/ports/405527
2016-01-09 04:36:22 +11:00
Erik Johnston
29e595e5d4 Merge pull request #474 from matrix-org/erikj/core_dump
Turn on core dumps
2016-01-08 16:52:15 +00:00
David Baker
c232780081 Merge pull request #456 from matrix-org/store_event_actions
Send unread notification counts
2016-01-08 14:47:15 +00:00
Mark Haines
7c816de442 Merge pull request #475 from matrix-org/markjh/fix_thumbnail
Only use cropped thumbnails when asked for a cropped thumbnail.
2016-01-07 20:07:53 +01:00
Mark Haines
8677b7d698 Only use cropped thumbnails when asked for a cropped thumbnail.
Even though ones cropped with scale might be technically valid.
2016-01-07 18:57:15 +00:00
Erik Johnston
33bef689c1 Turn on core dumps 2016-01-07 15:34:30 +00:00
Erik Johnston
fcbe63eaad Use logger not logging 2016-01-07 15:28:17 +00:00
Erik Johnston
5727922106 Merge pull request #473 from matrix-org/erikj/ssh_manhole
Change manhole to use ssh
2016-01-07 14:36:16 +00:00
Erik Johnston
5dc5e29b9c s/telnet/ssh/ 2016-01-07 14:02:57 +00:00
Erik Johnston
3deffcdb1e Merge pull request #465 from matrix-org/syn-453
Update readme to reflect change in signing key name
2016-01-07 14:01:03 +00:00
Erik Johnston
c9ae1d1ee5 Change manhole to use ssh 2016-01-07 13:59:02 +00:00
David Baker
daadcf36c0 This comma is actually important 2016-01-07 10:15:35 +00:00
David Baker
823b679232 more commas 2016-01-07 10:02:47 +00:00
Matthew Hodgson
6c28ac260c copyrights 2016-01-07 04:26:29 +00:00
Matthew Hodgson
49c34dfd36 Merge pull request #472 from roblabla/patch-1
Config Comment mixup in captcha public/private key
2016-01-06 22:24:28 +00:00
Robin Lambertz
4106477e7f Config Comment mixup in captcha public/private key 2016-01-06 23:19:33 +01:00
Daniel Wagner-Hall
7ac6ca7311 Merge pull request #468 from matrix-org/daniel/versionsyo
Add /_matrix/versions to report supported versions
2016-01-06 18:09:04 +00:00
Daniel Wagner-Hall
11a974da21 Add /_matrix/versions to report supported versions 2016-01-06 18:08:52 +00:00
David Baker
09dc9854cd comma style 2016-01-06 17:44:10 +00:00
David Baker
442fcc02f7 Merge remote-tracking branch 'origin/develop' into store_event_actions 2016-01-06 17:28:55 +00:00
David Baker
b6a585348a Adding is_guest here won't work because it just constructs a dict of uid -> password hash 2016-01-06 17:16:02 +00:00
Mark Haines
c582f178b7 Merge pull request #469 from matrix-org/markjh/joined_guest_access
Guest users must be joined to a room to see it in /sync
2016-01-06 18:10:11 +01:00
Mark Haines
4cec90a260 Pass whether the user was a guest to some of the event streams 2016-01-06 16:54:57 +00:00
David Baker
0e48f7f245 fix tests 2016-01-06 16:46:41 +00:00
Mark Haines
392773ccb2 Guest users must be joined to a room to see it in /sync 2016-01-06 16:44:13 +00:00
Daniel Wagner-Hall
bf32922e5a Log when starting stats reporting 2016-01-06 14:13:34 +00:00
Daniel Wagner-Hall
797691f908 Log on stats scheduling 2016-01-06 14:04:27 +00:00
Daniel Wagner-Hall
e5ea4fad78 Merge pull request #466 from matrix-org/daniel/logloglog
Log when we skip daily messages
2016-01-06 13:52:19 +00:00
Daniel Wagner-Hall
5880de186b Log when we skip daily messages 2016-01-06 13:52:16 +00:00
David Baker
992928304f Delete notifications for redacted events 2016-01-06 11:58:46 +00:00
David Baker
ae1262a241 Add schema change file for is_guest flag 2016-01-06 11:58:20 +00:00
David Baker
c79f221192 Add is_guest flag to users db to track whether a user is a guest user or not. Use this so we can run _filter_events_for_client when calculating event_push_actions. 2016-01-06 11:38:09 +00:00
Erik Johnston
8ce5679813 Update readme to reflect change in signing key name 2016-01-06 11:19:51 +00:00
David Baker
eb03625626 Merge remote-tracking branch 'origin/develop' into store_event_actions 2016-01-05 18:39:50 +00:00
Daniel Wagner-Hall
87d577e023 Merge pull request #463 from matrix-org/daniel/hashtagnofilter
Skip, rather than erroring, invalid guest requests

Erroring causes problems when people make illegal requests, because they
don't know what limit parameter they should pass.

This is definitely buggy. It leaks message counts for rooms people don't
have permission to see, via tokens. But apparently we already
consciously decided to allow that as a team, so this preserves that
behaviour.
2016-01-05 18:12:40 +00:00
Daniel Wagner-Hall
2ef6de928d Skip, rather than erroring, invalid guest requests
Erroring causes problems when people make illegal requests, because they
don't know what limit parameter they should pass.

This is definitely buggy. It leaks message counts for rooms people don't
have permission to see, via tokens. But apparently we already
consciously decided to allow that as a team, so this preserves that
behaviour.
2016-01-05 18:12:37 +00:00
Daniel Wagner-Hall
29e131df43 Merge pull request #462 from matrix-org/daniel/guestupgrade
Allow guests to upgrade their accounts
2016-01-05 18:01:29 +00:00
Daniel Wagner-Hall
cfd07aafff Allow guests to upgrade their accounts 2016-01-05 18:01:18 +00:00
Erik Johnston
a178eb1bc8 Merge pull request #464 from matrix-org/erikj/crop_correct
Use larger thumbnails rather than smaller when method=crop
2016-01-05 17:41:07 +00:00
Erik Johnston
8737ead008 Use larger thumbnail rather than smaller. 2016-01-05 17:30:30 +00:00
Erik Johnston
90921981be Merge pull request #461 from matrix-org/erikj/sync_leave
Return /sync when something under the 'leave' key has changed
2016-01-05 14:59:05 +00:00
Erik Johnston
acb19068d0 Return /sync when something under the 'leave' key has changed 2016-01-05 14:49:06 +00:00
Erik Johnston
d74c4e90d4 Merge pull request #460 from matrix-org/erikj/create_room_3pid_invite
Support inviting 3pids in /createRoom
2016-01-05 14:06:23 +00:00
David Baker
85ca8cb90c comment typo 2016-01-05 13:32:39 +00:00
Erik Johnston
c3ea36304b Use named args 2016-01-05 12:57:45 +00:00
Erik Johnston
1b5642604b Support inviting 3pids in /createRoom 2016-01-05 11:56:21 +00:00
Erik Johnston
07c33eff43 Merge branch 'master' of github.com:matrix-org/synapse into develop 2016-01-05 10:25:13 +00:00
David Baker
4eb7b950c8 = not == in sql 2016-01-04 18:11:17 +00:00
Matthew Hodgson
dc65d0ae9d typoe 2016-01-04 17:23:15 +00:00
Matthew Hodgson
b18b99eb14 fix another whitespace rst bug 2016-01-04 17:17:27 +00:00
Matthew Hodgson
5680be70ae fix whitespace bug 2016-01-04 17:13:52 +00:00
David Baker
c77e7e60fc Only joined rooms have unread_notif_count 2016-01-04 15:49:06 +00:00
David Baker
d74c6ace24 comma 2016-01-04 15:32:00 +00:00
David Baker
f1b67730fa Add unread_notif_count in incremental_sync_with_gap 2016-01-04 14:50:36 +00:00
David Baker
92a1e74b20 fix tests 2016-01-04 14:17:35 +00:00
David Baker
c914d67cda Rename event-actions to event_push_actions as per PR request 2016-01-04 14:05:37 +00:00
Mark Haines
f35f8d06ea Merge remote-tracking branch 'origin/release-v0.12.0' 2016-01-04 14:02:50 +00:00
Mark Haines
d2709a5389 Bump changelog and version for v0.12.0 2016-01-04 13:57:39 +00:00
David Baker
928c575c6f Merge remote-tracking branch 'origin/develop' into store_event_actions 2016-01-04 13:39:51 +00:00
David Baker
3051c9d002 Address minor PR issues 2016-01-04 13:39:29 +00:00
Richard van der Hoff
34c09f33da Update CHANGES 2016-01-04 12:45:58 +00:00
Richard van der Hoff
cf3282d103 Merge pull request #459 from matrix-org/rav/fix_r0_login
Expose /login under r0
2016-01-04 12:41:53 +00:00
Richard van der Hoff
32d9fd0b26 Expose /login under r0
The spec says /login should be available at r0 and 'unstable', so make it so.
2016-01-02 17:24:28 +00:00
Mark Haines
c6e79c84de Bump version and update changelog for v0.12.0-rc3 2015-12-23 16:15:54 +00:00
Mark Haines
8d6dde7825 Merge pull request #457 from matrix-org/markjh/cached_sync
Add a cache for initialSync responses that expires after 5 minutes
2015-12-23 16:04:52 +00:00
Mark Haines
d12c00bdc3 Add some docstring explaining the snapshot cache does 2015-12-23 15:18:11 +00:00
Mark Haines
ba39d3d5d7 Merge pull request #458 from matrix-org/markjh/guest_auth
Missing yield on guest access auth check

Needs matrix-org/sytest#125 to land first
2015-12-23 14:10:09 +00:00
Mark Haines
f3948e001f Missing yield on guest access auth check
Needs matrix-org/sytest#125 to land first
2015-12-23 14:10:06 +00:00
Mark Haines
7fa71e3267 Add a unit test for the snapshot cache 2015-12-23 11:48:03 +00:00
Mark Haines
517fb9a023 Move the doc string to the public facing method 2015-12-22 18:53:47 +00:00
Mark Haines
9ac417fa88 Add a cache for initialSync responses that expires after 5 minutes 2015-12-22 18:27:56 +00:00
David Baker
d2a92c6bde Fix merge fail with anon access stuff 2015-12-22 18:25:04 +00:00
David Baker
d79e90f078 Add mocks to make tests work again 2015-12-22 17:56:56 +00:00
David Baker
9b4cd0cd0f pep8 & unused variable 2015-12-22 17:25:09 +00:00
David Baker
140a50f641 Merge remote-tracking branch 'origin/develop' into store_event_actions 2015-12-22 17:23:35 +00:00
David Baker
5645d9747b Add some comments to areas that could be optimised. 2015-12-22 17:19:22 +00:00
David Baker
3fbb031745 Remove the list of problems (moved to jira issues) 2015-12-22 17:13:47 +00:00
David Baker
4c8f6a7e42 Insert push actions in a single db query rather than one per user/profile_tag 2015-12-22 17:04:31 +00:00
Mark Haines
7df276d219 Merge pull request #455 from matrix-org/markjh/guest_access
Allow guest access to /sync
2015-12-22 16:09:23 +00:00
Mark Haines
0ee0138325 Include the list of bad room ids in the error 2015-12-22 15:49:32 +00:00
David Baker
77f06856b6 clarify problems 2015-12-22 15:22:26 +00:00
David Baker
65c451cb38 Add bulk push rule evaluator which actually still evaluates rules one by one, but does far fewer db queries to fetch the rules 2015-12-22 15:19:34 +00:00
Mark Haines
251aafccca Use a list comprehension 2015-12-22 14:03:24 +00:00
Mark Haines
c058625959 Merge remote-tracking branch 'origin/develop' into markjh/guest_access
Conflicts:
	synapse/api/filtering.py
2015-12-22 13:58:18 +00:00
Mark Haines
cdd04f7055 Hook up read receipts and typing notifications for guest access 2015-12-22 11:59:55 +00:00
Mark Haines
542ab0f886 Merge branch 'develop' into markjh/guest_access 2015-12-22 11:49:12 +00:00
Mark Haines
e525b46f12 Merge pull request #454 from matrix-org/markjh/room_filtering
Add top level filters for filtering by room id

Documented by matrix-org/matrix-doc#246
2015-12-22 11:40:35 +00:00
Mark Haines
b9b4466d0d Add top level filters for filtering by room id
Documented by matrix-org/matrix-doc#246
2015-12-22 11:40:32 +00:00
Mark Haines
c3fff251a9 Allow guest access to /sync 2015-12-22 11:21:03 +00:00
Mark Haines
45a9e0ae0c Allow guest access if the user provides a list of rooms in the filter 2015-12-22 10:25:46 +00:00
Mark Haines
489a4cd1cf Add top level filtering by room id 2015-12-21 21:10:41 +00:00
Mark Haines
2a2b2ef834 Remove bogus comment about branch coverage 2015-12-21 20:21:52 +00:00
Daniel Wagner-Hall
2e2eeb43a6 Merge pull request #453 from matrix-org/daniel/avatarurls
Return room avatar URLs in /publicRooms

Spec: https://github.com/matrix-org/matrix-doc/pull/244
Tests: https://github.com/matrix-org/sytest/pull/121
2015-12-21 20:38:16 +01:00
Daniel Wagner-Hall
7f3148865c Return room avatar URLs in /publicRooms
Spec: https://github.com/matrix-org/matrix-doc/pull/244
Tests: https://github.com/matrix-org/sytest/pull/121
2015-12-21 19:38:04 +00:00
Mark Haines
bb9c7f2dd9 Delete all the .coverage files, including the combined .coverage 2015-12-21 17:51:57 +00:00
Mark Haines
9036d2d6a8 Use an absolute path when specifying the directory for synapse in jenkins.sh 2015-12-21 17:15:05 +00:00
David Baker
c061b47c57 Merge remote-tracking branch 'origin/develop' into store_event_actions 2015-12-21 15:30:26 +00:00
David Baker
f73f154ec2 Only run pushers for users on this hs! 2015-12-21 15:28:54 +00:00
Mark Haines
64b6606824 Remove accidentally committed debug logging 2015-12-21 15:22:03 +00:00
Mark Haines
42a7a09eea Merge pull request #452 from matrix-org/paul/SYN-558
Actually look up required remote server key IDs
2015-12-21 11:01:56 +00:00
David Baker
091c545c4f pep8 2015-12-21 10:14:57 +00:00
Paul "LeoNerd" Evans
a6ba41e078 Actually look up required remote server key IDs
set.union() is a side-effect-free function that returns the union of two
sets. This clearly wanted .update(), which is the side-effecting mutator
version.
2015-12-18 21:36:42 +00:00
Mark Haines
f85949bde0 Merge pull request #451 from matrix-org/markjh/branch_coverage
Generate code coverage report when running jenkins.sh
2015-12-18 20:44:50 +00:00
Mark Haines
2f871ad143 Generate code coverage report when running jenkins.sh 2015-12-18 20:44:47 +00:00
Matthew Hodgson
c8ea2d5b1f Merge pull request #450 from matrix-org/matthew/no-identicons
Matthew/no identicons
2015-12-18 18:14:06 +00:00
David Baker
b131fb1fe2 add list of things I want to fix with this branch 2015-12-18 18:04:45 +00:00
David Baker
413d0d6a24 Make unread notification count sending work: put the correct count in incremental syncs too, where necessary, and fix silly bugs like only select the event actions for that user... 2015-12-18 17:47:00 +00:00
David Baker
0a2d73fd60 Merge branch 'release-v0.12.0' into develop 2015-12-18 10:07:48 +00:00
David Baker
ce4999268a Fix typo that broke registration on the mobile clients 2015-12-18 10:07:28 +00:00
Daniel Wagner-Hall
633ceb9bb1 Merge pull request #449 from matrix-org/daniel/3pid
Add display_name to 3pid invite in m.room.member invites
2015-12-18 09:55:59 +01:00
Matthew Hodgson
64374bda5b fix indentation level 2015-12-17 23:04:53 +00:00
Matthew Hodgson
772ad4f715 stop generating default identicons. reverts most of 582019f870 and solves vector-web/vector-im#346 2015-12-17 23:04:20 +00:00
Daniel Wagner-Hall
bdacee476d Add display_name to 3pid invite in m.room.member invites 2015-12-17 18:55:08 +01:00
Daniel Wagner-Hall
10f82b4bea Merge pull request #448 from matrix-org/daniel/3pid
Strip address and such out of 3pid invites
2015-12-17 18:29:22 +01:00
Daniel Wagner-Hall
8c5f252edb Strip address and such out of 3pid invites
We're not meant to leak that into the graph
2015-12-17 18:09:51 +01:00
Mark Haines
8b9f471d27 Merge pull request #447 from matrix-org/rav/fix_search_pagination
Fix 500 error when back-paginating search results
2015-12-17 16:43:50 +00:00
Richard van der Hoff
a64f9bbfe0 Fix 500 error when back-paginating search results
We were mistakenly adding pagination clauses to the count query, which then
failed because the count query doesn't join to the events table.
2015-12-17 12:50:46 +00:00
David Baker
42ad49f5b7 still very WIP, but now sends unread_notifications_count in the room object on sync (only actually corrrect in a full sync: hardcoded to 0 in incremental syncs). 2015-12-16 18:42:09 +00:00
Daniel Wagner-Hall
2b0f8a9482 Fix typo 2015-12-16 17:59:44 +01:00
Daniel Wagner-Hall
af4422c42a Merge pull request #446 from matrix-org/daniel/invitemetadata
Give the IS a bunch more 3pid invite context

This allows it to form richer emails
2015-12-16 14:05:46 +01:00
Daniel Wagner-Hall
0311612ce9 Give the IS a bunch more 3pid invite context
This allows it to form richer emails
2015-12-16 13:05:32 +00:00
Mark Haines
5fc03449c8 Merge pull request #440 from matrix-org/daniel/ise
Include errcode on Internal Server Error
2015-12-16 11:26:55 +00:00
Mark Haines
4fab578b43 Merge branch 'release-v0.12.0' into develop 2015-12-16 11:18:45 +00:00
Mark Haines
661b76615b Merge pull request #445 from matrix-org/markjh/rebind_threepid
Allow users to change which account a 3pid is bound to
2015-12-15 17:07:47 +00:00
Mark Haines
dcfc70e8ed Allow users to change which account a 3pid is bound to 2015-12-15 17:02:21 +00:00
Mark Haines
63fdd9fe0b Changelog and version bump for v0.12.0-rc2 2015-12-14 16:26:59 +00:00
Mark Haines
910956b0ec Merge pull request #443 from matrix-org/markjh/commentary
Add commentary for fix in PR #442
2015-12-14 15:48:00 +00:00
Oddvar Lovaas
d3ac8fd87d Added info abou Martin Giess' auto-deployment process with vagrant/ansible 2015-12-14 15:27:27 +00:00
Mark Haines
e98e00558a Merge pull request #444 from matrix-org/markjh/presence_race
Fix a race between started/stopped stream
2015-12-14 15:21:52 +00:00
Mark Haines
3ddf0b9722 Fix spacing 2015-12-14 15:20:59 +00:00
Mark Haines
2acae8300f Fix logging to lie less 2015-12-14 15:19:37 +00:00
Mark Haines
dbe7892e03 Fix a race between started/stopped stream 2015-12-14 15:09:41 +00:00
Mark Haines
28c5181dfe Add commentary for fix in PR#442 2015-12-14 14:50:51 +00:00
Mark Haines
15e9885197 Merge branch 'release-v0.12.0' into develop 2015-12-14 14:46:55 +00:00
Mark Haines
8505a4ddc3 Merge pull request #441 from matrix-org/markjh/fts_skip_invalid
Skip events that where the body, name or topic isn't a string
2015-12-14 14:42:35 +00:00
Mark Haines
6051266924 Merge pull request #442 from matrix-org/markjh/missing_prev_content
Check whether prev_content or prev_sender is set …
2015-12-14 14:39:07 +00:00
Mark Haines
070e28e203 Combine the prev content tests 2015-12-14 14:34:04 +00:00
Mark Haines
834924248f Check whether prev_content or prev_sender is set before trying to rollback state 2015-12-14 14:09:21 +00:00
Mark Haines
98dfa7d24f Skip events that where the body, name or topic isn't a string when back populating the FTS index 2015-12-14 13:55:46 +00:00
Daniel Wagner-Hall
338c0a8a69 Include errcode on Internal Server Error 2015-12-14 13:50:50 +00:00
Mark Haines
a874c0894a Merge pull request #437 from matrix-org/markjh/parallel_sync
Do the /sync in parallel across the rooms like /initialSync does
2015-12-14 13:34:28 +00:00
Daniel Wagner-Hall
f382a3bb7e Merge pull request #439 from matrix-org/daniel/typo
Fix typo
2015-12-14 12:46:34 +00:00
Daniel Wagner-Hall
76e69cc8de Fix typo 2015-12-14 12:38:55 +00:00
Mark Haines
fde412b240 Merge pull request #438 from matrix-org/markjh/fix_search_sql
Fix typo in sql for full text search on sqlite3
2015-12-14 11:43:49 +00:00
Mark Haines
bfc52a2342 Fix typo in sql for full text search on sqlite3 2015-12-14 11:38:11 +00:00
Mark Haines
deeebbfcb7 Merge branch 'release-v0.12.0' into develop 2015-12-12 14:21:49 +00:00
Mark Haines
1ee7280c4c Do the /sync in parallel accross the rooms like /initialSync does 2015-12-11 16:48:20 +00:00
Erik Johnston
cde49d3d2b Merge pull request #435 from matrix-org/erikj/search
Include approximate count of search results
2015-12-11 16:27:38 +00:00
Paul Evans
e738920156 Merge pull request #433 from matrix-org/paul/tiny-fixes
Ensure that the event that gets persisted is the one that was signed
2015-12-11 15:29:27 +00:00
Mark Haines
0065e554e0 Merge pull request #436 from matrix-org/markjh/pip_instructions
SYN-90: We don't need --proccess-dependency-links
2015-12-11 15:11:30 +00:00
Mark Haines
5a3e4e43d8 SYN-90: We don't need --proccess-dependency-links
When installing synapse since all its dependencies are on PyPI
2015-12-11 14:39:46 +00:00
Erik Johnston
d9a5c56930 Include approximate count of search results 2015-12-11 11:40:23 +00:00
Erik Johnston
51fb590c0e Use more efficient query form 2015-12-11 11:12:57 +00:00
Matthew Hodgson
5577a61090 throwaway 1-liner for generating password hashes 2015-12-10 19:03:06 +00:00
David Baker
5e909c73d7 Store nothing instead of ['dont_notify'] for events with no notification required: much as it would be nice to be able to tell between the event not having been processed and there being no notification for it, this isn't worth filling up the table with ['dont_notify'] I think. Consequently treat the empty actions array as dont_notify and filter dont_notify out of the result. 2015-12-10 18:40:28 +00:00
Mark Haines
e0c9f30efa Merge pull request #434 from matrix-org/markjh/forget_rooms
Add caches for whether a room has been forgotten by a user
2015-12-10 18:01:46 +00:00
Mark Haines
515548a47a Missing yield 2015-12-10 17:54:23 +00:00
David Baker
aa667ee396 Save event actions to the db 2015-12-10 17:51:15 +00:00
Mark Haines
7d6b313312 Add caches for whether a room has been forgotten by a user 2015-12-10 17:49:34 +00:00
David Baker
a84a693327 Having consulted The Erikle, this should go at the end of on_receive_pdu, otherwise it will be triggered whenever we backfill too. 2015-12-10 17:18:46 +00:00
Paul "LeoNerd" Evans
99afb4b750 Ensure that the event that gets persisted is the one that was signed 2015-12-10 17:08:21 +00:00
David Baker
21f135ba76 Very first cut of calculating actions for events as they come in. Doesn't store them yet. Not very efficient. 2015-12-10 16:26:08 +00:00
Paul "LeoNerd" Evans
d7ee7b589f Merge branch 'develop' into paul/tiny-fixes 2015-12-10 16:21:00 +00:00
Mark Haines
a8589d1ff3 Mark the version as a -rc1 release candidate 2015-12-10 11:39:00 +00:00
Mark Haines
dd9430e758 Update release date 2015-12-10 11:26:58 +00:00
Mark Haines
05f6cb42db Bump synapse version to v0.12.0 2015-12-09 17:48:02 +00:00
Mark Haines
5bdb93c2a6 Add to changelog 2015-12-09 17:45:35 +00:00
Mark Haines
613748804a Changelog for v0.12.0 2015-12-09 17:35:55 +00:00
David Baker
86345a511f Merge pull request #432 from matrix-org/pushrules_refactor
Split out the push rule evaluator into a separate file
2015-12-09 16:13:57 +00:00
David Baker
a24eedada7 pep8 2015-12-09 15:57:42 +00:00
David Baker
4a728beba1 Split out the push rule evaluator into a separate file so it can be more readily reused. Should be functionally identical. 2015-12-09 15:51:34 +00:00
Mark Haines
019597555f Merge pull request #431 from matrix-org/markjh/filter_inline
Allow filter JSON object in the filter query parameter in /sync

Documented by matrix-org/matrix-doc#224
2015-12-09 12:56:53 +00:00
Mark Haines
e4bfe50e8f Allow filter JSON object in the filter query parameter in /sync
Documented by matrix-org/matrix-doc#224
2015-12-09 12:56:50 +00:00
Daniel Wagner-Hall
0f826b0b0d Merge pull request #430 from matrix-org/daniel/unstable
Merge pull request #430 from matrix-org/daniel/unstable
2015-12-09 11:34:22 +00:00
Erik Johnston
7c2ff8c889 Merge pull request #405 from matrix-org/erikj/search-ts
Change the result dict to be a list in /search response
2015-12-08 16:15:27 +00:00
Daniel Wagner-Hall
7a8ba4c9a0 Actually host r0 and unstable prefixes 2015-12-08 15:26:52 +00:00
Mark Haines
219027f580 Merge pull request #429 from matrix-org/markjh/db_counters
Track the time spent in the database per request.
2015-12-08 11:52:35 +00:00
Mark Haines
6a5ff5f223 Track the time spent in the database per request.
and track the number of transactions that request started.
2015-12-07 17:56:11 +00:00
Mark Haines
f7a1cdbbc6 Merge pull request #423 from matrix-org/markjh/archived_flag
Only include the archived rooms if a include_leave flag in set in the…
2015-12-07 13:16:03 +00:00
Mark Haines
d547afeae0 Merge remote-tracking branch 'origin/master' into develop 2015-12-07 13:13:43 +00:00
Mark Haines
dd108286df Merge pull request #426 from OlegGirko/fix_mock_import
Fix mock import in tests.
2015-12-07 13:13:09 +00:00
David Baker
266df8a9b8 Merge pull request #428 from matrix-org/pusher_api_log
Add logging to pushers API to log the body of the request
2015-12-07 12:44:15 +00:00
David Baker
9c9b2829ae also do more structured logging 2015-12-07 12:01:00 +00:00
David Baker
50e5886de1 pep8 2015-12-07 11:57:48 +00:00
David Baker
ba1d740239 Add logging to pushers API to log the body of the request 2015-12-07 11:52:20 +00:00
Mark Haines
a190b2e85e Merge pull request #427 from matrix-org/markjh/log_context
Add a setter for the current log context.
2015-12-07 11:00:12 +00:00
Mark Haines
3dd1630848 Add a setter for the current log context.
Move the resource tracking inside that setter so that it is easier
to make sure that the resource tracking isn't double counting the
resource usage.
2015-12-07 10:51:18 +00:00
Daniel Wagner-Hall
07d18dcab1 Merge pull request #424 from matrix-org/daniel/pushdictification
Take object not bool

Allows bool as legacy fallback

See https://github.com/matrix-org/matrix-doc/pull/212
2015-12-07 10:44:35 +00:00
Daniel Wagner-Hall
41905784f7 Take object not bool
Allows bool as legacy fallback

See https://github.com/matrix-org/matrix-doc/pull/212
2015-12-07 10:44:33 +00:00
Oleg Girko
4013216fcc Fix mock import in tests.
For some reason, one test imports Mock class from mock.mock
rather than from mock.
This change fixes this error.

Signed-off-by: Oleg Girko <ol@infoserver.lv>
2015-12-06 20:50:11 +00:00
Matthew Hodgson
84f2ad5dea Merge pull request #425 from MadsRC/develop
Added installation instructions for postgres on CentOS 7
2015-12-05 14:46:09 +00:00
Mads R. Christensen
44b2bf91be Added installation instructions for postgres on CentOS 7 2015-12-05 15:09:20 +01:00
Mark Haines
660dee94af Only include the archived rooms if a include_leave flag in set in the filter 2015-12-04 17:32:09 +00:00
Mark Haines
262a97f02b Merge pull request #422 from matrix-org/markjh/schema
Bump schema version.
2015-12-04 15:45:44 +00:00
Mark Haines
bd0fa9e2d2 Merge pull request #421 from matrix-org/markjh/resource_metrics
Add metrics to track the cpu on the main thread consumed by each type…
2015-12-04 15:29:31 +00:00
Mark Haines
d57c5cda71 Bump schema version.
As we released version 26 in v0.11.1
2015-12-04 15:28:39 +00:00
Mark Haines
99e1d6777f Add metrics to track the cpu on the main thread consumed by each type of request 2015-12-04 14:42:24 +00:00
Mark Haines
3c85a317d6 Merge pull request #420 from matrix-org/markjh/resource_usage
Track the cpu used in the main thread by each logging context
2015-12-04 14:15:41 +00:00
Mark Haines
5231737369 Add comments to explain why we are hardcoding RUSAGE_THREAD 2015-12-04 11:53:38 +00:00
Mark Haines
d6059bdd2a Fix warnings 2015-12-04 11:34:23 +00:00
Mark Haines
48a2526d62 Track the cpu used in the main thread by each logging context 2015-12-03 21:03:01 +00:00
Mark Haines
b29d2fd7f8 Merge pull request #419 from matrix-org/markjh/reuse_captcha_client
Reuse the captcha client rather than creating a new one for each request
2015-12-03 17:00:59 +00:00
Daniel Wagner-Hall
edfcb83473 Flatten devices into a dict, not a list 2015-12-03 16:19:21 +00:00
Mark Haines
478b4e3ed4 Reuse the captcha client rather than creating a new one for each request 2015-12-03 13:48:55 +00:00
Erik Johnston
b8680b82c3 Merge pull request #414 from matrix-org/erikj/if_not_exists
Older versions of SQLite don't like IF NOT EXISTS in virtual tables
2015-12-03 13:21:09 +00:00
Daniel Wagner-Hall
ac213c2e08 Merge pull request #415 from matrix-org/daniel/endpoints
Merge pull request #415 from matrix-org/daniel/endpoints
2015-12-03 12:19:12 +00:00
Daniel Wagner-Hall
e880164c59 Merge pull request #418 from matrix-org/daniel/whois
Merge pull request #418 from matrix-org/daniel/whois
2015-12-03 12:18:01 +00:00
Daniel Wagner-Hall
526bc33e02 Fix implementation of /admin/whois 2015-12-02 17:29:47 +00:00
David Baker
181616deed Merge pull request #417 from matrix-org/fix_db_v15_postgres
Fix schema delta 15 on postgres
2015-12-02 17:26:17 +00:00
David Baker
e515b48929 Just replace the table definition with the one from full_schema 16 2015-12-02 17:23:52 +00:00
David Baker
8810eb8c39 Fix schema delta 15 on postgres in the very unlikley event that anyone upgrades to 15... 2015-12-02 17:19:11 +00:00
Mark Haines
748c0f5efa Merge pull request #416 from matrix-org/markjh/idempotent_state
Make state updates in the C+S API idempotent
2015-12-02 17:14:45 +00:00
Mark Haines
491f3d16dc Make state updates in the C+S API idempotent 2015-12-02 15:50:50 +00:00
Daniel Wagner-Hall
872c134807 Update endpoints to reflect current spec 2015-12-02 15:45:04 +00:00
Erik Johnston
f721fdbf87 Merge pull request #412 from matrix-org/erikj/search
Search: Add prefix matching support
2015-12-02 13:56:41 +00:00
Erik Johnston
976cb5aaa8 Throw if unrecognized DB type 2015-12-02 13:50:43 +00:00
Erik Johnston
b2def42bfd Older versions of SQLite don't like IF NOT EXISTS in virtual tables 2015-12-02 13:29:14 +00:00
Erik Johnston
b9acef5301 Fix so highlight matching works again 2015-12-02 13:28:13 +00:00
Mark Haines
58d0927767 Merge pull request #410 from matrix-org/markjh/edu_frequency
Only fire user_joined_room if the user has actually joined.
2015-12-02 13:17:15 +00:00
Erik Johnston
7dd6e5efca Remove deuplication. Add comment about regex. 2015-12-02 13:09:37 +00:00
Mark Haines
5dc09e82c4 Merge pull request #413 from matrix-org/markjh/reuse_http_client
Reuse a single http client, rather than creating new ones
2015-12-02 12:56:23 +00:00
Mark Haines
c2c70f7daf Use the context returned by _handle_new_event 2015-12-02 12:01:24 +00:00
Erik Johnston
477da77b46 Search: Add prefix matching support 2015-12-02 11:40:52 +00:00
Mark Haines
37b2d69bbc Reuse a single http client, rather than creating new ones 2015-12-02 11:36:02 +00:00
David Baker
addb248e0b Merge pull request #411 from matrix-org/default_dont_notify
Change the m.room.message rule to be disabled by default
2015-12-02 11:32:56 +00:00
David Baker
4b1281f9b7 Change the m.room.message rule to be disabled by default so we only notify for 1:1 rooms / highlights out-of-the-box 2015-12-02 11:26:49 +00:00
Mark Haines
dede14f689 Merge branch 'develop' into markjh/edu_frequency 2015-12-02 10:57:51 +00:00
Mark Haines
5eb4d13aaa Fix typo in collect_presencelike_data 2015-12-02 10:50:58 +00:00
Mark Haines
c30cdb0d68 Add comments 2015-12-02 10:49:35 +00:00
Mark Haines
2a0ec3b89d Merge branch 'develop' into markjh/edu_frequency
Conflicts:
	synapse/handlers/federation.py
	synapse/handlers/room.py
2015-12-02 10:40:22 +00:00
Erik Johnston
03b2a6a8aa Merge pull request #409 from MadsRC/develop
Develop
2015-12-02 10:08:18 +00:00
Mark Haines
9fbd504b4e Merge pull request #408 from matrix-org/markjh/distributor_facade
Wrap calls to distributor.fire in appropriately named functions
2015-12-02 09:44:56 +00:00
Erik Johnston
9670f226e3 Merge pull request #406 from matrix-org/erikj/search
Search: Don't disregard grouping info in pagination tokens
2015-12-02 09:13:20 +00:00
Mads R. Christensen
6863466653 Added a single line to explain what the server_name is used for 2015-12-02 00:37:55 +01:00
Mads R. Christensen
3d5c5e8be5 Added a few lines to better explain how to run Synapse on a FQDN that is not part of the UserID 2015-12-02 00:35:45 +01:00
Matthew Hodgson
65a9bf2dd5 various fixes - thanks to Mark White for pointing out you need to run synapse before you try to register a new user 2015-12-01 23:05:12 +00:00
Mark Haines
ae9f8cda7e Merge branch 'develop' into markjh/distributor_facade 2015-12-01 21:18:13 +00:00
Mark Haines
5d321e4b9a Fix definitions script 2015-12-01 21:17:58 +00:00
Mark Haines
a9526831a4 Wrap calls to distributor.fire in appropriately named functions so that static analysis can work out want is calling what 2015-12-01 20:53:04 +00:00
Mark Haines
ed0f79bdc5 Only fire user_joined_room if the membership has changed 2015-12-01 19:46:15 +00:00
Mads R. Christensen
98ee629d00 Added --report-status=yes|no as Synapse won't generate the config without it 2015-12-01 20:20:53 +01:00
Mark Haines
f73ea0bda2 Merge branch 'develop' into markjh/edu_frequency 2015-12-01 19:15:27 +00:00
Mads R. Christensen
c533f69d38 Added libffi-devel in CentOS 7 installation requirements and fixed indentation of yum groupinstall. Signed-off-by: Mads Robin Christensen <mads@v42.dk> 2015-12-01 20:00:41 +01:00
Mark Haines
a2922bb944 Merge pull request #392 from matrix-org/markjh/client_config
Add API for setting per user account data at the top level or room level.
2015-12-01 18:42:59 +00:00
Mark Haines
95f30ecd1f Add API for setting account_data globaly or on a per room basis 2015-12-01 18:41:32 +00:00
Matthew Hodgson
f487355364 Merge pull request #407 from MadsRC/develop
Develop
2015-12-01 18:41:31 +00:00
Daniel Wagner-Hall
6e70979973 Merge pull request #400 from matrix-org/daniel/versioning
Merge pull request # 400 from matrix-org/daniel/versioning
2015-12-01 17:36:37 +00:00
Daniel Wagner-Hall
14d7acfad4 Host /unstable and /r0 versions of r0 APIs 2015-12-01 17:34:32 +00:00
Erik Johnston
27c5e1b374 Search: Don't disregard grouping info in pagination tokens 2015-12-01 16:47:18 +00:00
Mark Haines
af96c6f4d3 Merge pull request #404 from matrix-org/markjh/trivial_rename
Rename presence_handler.send_invite
2015-12-01 16:11:03 +00:00
Mark Haines
d32db0bc45 Merge pull request #402 from matrix-org/markjh/event_formatting
Copy rather than move the fields to shuffle between a v1 and a v2 event.
2015-12-01 16:10:24 +00:00
Mark Haines
7b593af7e1 rename the method in the tests as well 2015-12-01 16:06:17 +00:00
Mark Haines
3d3da2b460 Only fire user_joined_room on the distributor if the user has actually joined the room 2015-12-01 16:03:08 +00:00
Mark Haines
31069ecf6a Rename presence_handler.send_invite to presence_handler.send_presence_invite to distinguish it from normal invites 2015-12-01 15:59:45 +00:00
Erik Johnston
71578e2bf2 Change the result tict to be a list 2015-12-01 14:48:35 +00:00
Erik Johnston
2430fcd462 Merge pull request #403 from matrix-org/erikj/search-ts
Allow paginating search ordered by recents
2015-12-01 14:46:27 +00:00
Mark Haines
f593a6e5f8 Add options to definitions.py to fetch referrers and to output dot 2015-12-01 14:29:42 +00:00
Mark Haines
c91a05776f Merge pull request #398 from matrix-org/markjh/jenkins_postgres
Run sytest against postgresql
2015-12-01 13:25:44 +00:00
Mark Haines
5f9a2cb337 Write the tap results for each database to different files when running sytest 2015-12-01 13:24:43 +00:00
Mark Haines
8c902431ba Set the port when running sytest under postgresql 2015-12-01 12:01:29 +00:00
Mark Haines
a33c0748e3 Use a PORT_BASE environment variable to configure the ports that sytest uses 2015-12-01 11:48:20 +00:00
Mark Haines
306415391d Only add the user_id if the sender is present 2015-12-01 11:14:48 +00:00
Erik Johnston
d0f28b46cd Merge pull request #399 from matrix-org/erikj/search
Return words to highlight in search results
2015-12-01 11:13:07 +00:00
Erik Johnston
da7dd58641 Tidy up a bit 2015-12-01 11:06:40 +00:00
Mark Haines
bde8d78b8a Copy rather than move the fields to shuffle between a v1 and a v2 event.
This should make all v1 APIs compatible with v2 clients. While still
allowing v1 clients to access the fields.

This makes the documentation easier since we can just document the v2
format and explain that some of the fields, in some of the APIs are
duplicated for backwards compatibility, rather than having to document
two separate event formats.
2015-11-30 17:46:35 +00:00
Erik Johnston
4dcaa42b6d Allow paginating search ordered by recents 2015-11-30 17:45:31 +00:00
Erik Johnston
76936f43ae Return words to highlight in search results 2015-11-27 16:40:42 +00:00
Mark Haines
f280726037 Run sytest against postgresql if appropriate databases exist for it to run against 2015-11-26 16:50:44 +00:00
Erik Johnston
6cd595e438 Merge pull request #397 from matrix-org/erikj/redaction_inequality
Allow user to redact with an equal power
2015-11-26 13:23:09 +00:00
Erik Johnston
17dd5071ef Allow user to redact with an equal power
Users only need their power level to be equal to the redact level for
them to be allowed to redact events.
2015-11-26 11:17:57 +00:00
Daniel Wagner-Hall
df7cf6c0eb Fix SQL for postgres again 2015-11-23 18:54:41 +00:00
Daniel Wagner-Hall
3e573a5c6b Fix SQL for postgres 2015-11-23 18:48:53 +00:00
Daniel Wagner-Hall
7dfa455508 Remove size specifier for database column
Postgres doesn't support them like this.

We don't have a bool type in common between postgres and sqlite.
2015-11-23 18:35:25 +00:00
Daniel Wagner-Hall
924d85a75e Merge pull request #375 from matrix-org/daniel/guestroominitialsync
Clean up room initialSync for guest users
2015-11-23 16:10:49 +00:00
Matthew Hodgson
91695150cc Merge pull request #396 from MadsRC/develop
CentOS 7 dep instructions from MadsRC
2015-11-21 10:51:32 +00:00
Mads R. Christensen
3dd09a8795 Added myself to AUTHORS.rst
Signed-off-by: Mads Robin Christensen <mads@v42.dk>
2015-11-20 22:39:10 +01:00
Mads R. Christensen
d7739c4e37 Added prerequisite instructions for CentOS 7
Signed-off-by: Mads Robin Christensen <mads@v42.dk>
2015-11-20 22:37:23 +01:00
Mark Haines
c6a15f5026 Merge pull request #385 from matrix-org/daniel/forgetrooms
Allow users to forget rooms
2015-11-20 18:07:26 +00:00
Erik Johnston
2ca01ed747 Merge branch 'release-v0.11.1' of github.com:matrix-org/synapse 2015-11-20 17:38:58 +00:00
Erik Johnston
1b64cb019e Merge branch 'erikj/perspective_limiter' into release-v0.11.1 2015-11-20 17:24:23 +00:00
Erik Johnston
8c3af5bc62 Merge pull request #395 from matrix-org/erikj/perspective_limiter
Don't limit connections to perspective servers
2015-11-20 17:23:26 +00:00
Erik Johnston
0eabfa55f6 Fix typo 2015-11-20 17:17:58 +00:00
Erik Johnston
6408541075 Don't limit connections to perspective servers 2015-11-20 17:15:44 +00:00
Erik Johnston
13130c2c9f Mention report_stats in upgrade.rst 2015-11-20 16:48:43 +00:00
Erik Johnston
7680ae16c9 Fix english 2015-11-20 16:40:55 +00:00
Erik Johnston
2c1bc4392f Bump changes and version 2015-11-20 16:35:01 +00:00
Erik Johnston
93f7bb8dd5 Merge pull request #394 from matrix-org/erikj/search
Add options for including state in search results
2015-11-20 16:31:24 +00:00
Richard van der Hoff
1d9c1d4166 Merge pull request #389 from matrix-org/rav/flatten_sync_response
v2 sync: Get rid of the event_map, and rename the keys of the rooms obj
2015-11-20 15:18:55 +00:00
Erik Johnston
3f151da314 Merge pull request #391 from matrix-org/erikj/remove_token_from_flow
Remove m.login.token from advertised flows.
2015-11-20 14:16:59 +00:00
Erik Johnston
6b95a79724 Add option to include the current room state 2015-11-20 14:16:42 +00:00
Erik Johnston
e3dae653e8 Comment 2015-11-20 14:05:22 +00:00
Erik Johnston
9de1f328ad Merge pull request #393 from matrix-org/erikj/destination_retry_max
Use min and not max to set an upper bound on retry interval
2015-11-20 13:41:20 +00:00
Erik Johnston
506874cca9 Optionally include historic profile info 2015-11-20 11:39:44 +00:00
Erik Johnston
2f2bbb4d06 Use min and not max to set an upper bound on retry interval 2015-11-20 09:34:58 +00:00
Daniel Wagner-Hall
95c3306798 Merge branch 'daniel/forgetrooms' of github.com:matrix-org/synapse into daniel/forgetrooms 2015-11-19 15:00:14 -05:00
Daniel Wagner-Hall
df6824a008 Ignore forgotten rooms in v2 sync 2015-11-19 14:54:47 -05:00
Paul "LeoNerd" Evans
dd11bf8a79 Merge branch 'develop' into rav/flatten_sync_response 2015-11-19 17:21:03 +00:00
Paul "LeoNerd" Evans
1cfda3d2d8 Merge branch 'develop' into daniel/forgetrooms 2015-11-19 16:53:13 +00:00
Erik Johnston
8b5349c7bc Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-11-19 16:17:54 +00:00
Erik Johnston
37de8a7f4a Remove m.login.token from advertised flows. 2015-11-19 16:16:49 +00:00
Mark Haines
7a802ec0ff Merge pull request #386 from matrix-org/markjh/rename_pud_to_account_data
s/private_user_data/account_data/
2015-11-19 15:21:35 +00:00
Daniel Wagner-Hall
d2ecde2cbb Merge pull request #382 from matrix-org/daniel/macarooncleanup
Take a boolean not a list of lambdas
2015-11-19 15:16:32 +00:00
Daniel Wagner-Hall
248cfd5eb3 Take a boolean not a list of lambdas 2015-11-19 15:16:25 +00:00
Daniel Wagner-Hall
9da4c5340d Simplify code 2015-11-19 10:07:21 -05:00
Erik Johnston
57a76c9aee Merge pull request #388 from matrix-org/erikj/messages
Split out text for missing config options.
2015-11-19 13:08:09 +00:00
Erik Johnston
e5d91b8e57 Merge pull request #387 from matrix-org/erikj/fix_port_script
Fix database port script to work with new event_search table
2015-11-19 13:04:28 +00:00
Richard van der Hoff
f6e092f6cc Put back the 'state.events' subobject
We're keeping 'events', in case we decide to add more keys later.
2015-11-19 12:23:42 +00:00
Richard van der Hoff
24ae0eee8e v2 /sync: Rename the keys of the 'rooms' object to match member states
joined->join
invited->invite
archived->leave
2015-11-19 10:51:12 +00:00
Richard van der Hoff
3c3fc6b268 Flatten the /sync response to remove the event_map 2015-11-19 10:51:11 +00:00
Daniel Wagner-Hall
f0ee1d515b Merge pull request #381 from matrix-org/daniel/jenkins-sytest-cached
Share sytest clone across runs

Depends on https://github.com/matrix-org/synapse/pull/380
2015-11-18 23:22:34 +00:00
Daniel Wagner-Hall
628ba81a77 Share sytest clone across runs
Depends on https://github.com/matrix-org/synapse/pull/380
2015-11-18 23:22:27 +00:00
Daniel Wagner-Hall
bed7889703 Apply forgetting properly to historical events 2015-11-18 18:11:12 -05:00
Erik Johnston
03204f54ac Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-11-18 18:38:06 +00:00
Erik Johnston
1fe973fa5a Merge branch 'master' of github.com:matrix-org/synapse into develop 2015-11-18 15:53:39 +00:00
Mark Haines
d153f482dd Rename the database table 2015-11-18 15:33:02 +00:00
Mark Haines
1c960fbb80 s/private_user_data/account_data/ 2015-11-18 15:31:04 +00:00
Daniel Wagner-Hall
ba26eb3d5d Allow users to forget rooms 2015-11-17 17:17:30 -05:00
Erik Johnston
b697a842a5 Merge pull request #384 from matrix-org/erikj/shorter_retries
Only retry federation requests for a long time for background requests
2015-11-17 18:46:04 +00:00
Erik Johnston
a9770e5d24 Merge branch 'hotfixes-v0.10.0-r1' of github.com:matrix-org/synapse into develop 2015-11-17 18:27:17 +00:00
Erik Johnston
391f2aa56c Merge branch 'release-v0.11.0' of github.com:matrix-org/synapse into develop 2015-11-17 15:10:49 +00:00
Daniel Wagner-Hall
641420c5e0 Clean up room initialSync for guest users 2015-11-13 15:44:57 +00:00
369 changed files with 14275 additions and 9561 deletions

View File

@@ -29,7 +29,7 @@ Matthew Hodgson <matthew at matrix.org>
Emmanuel Rohee <manu at matrix.org>
* Supporting iOS clients (testability and fallback registration)
Turned to Dust <dwinslow86 at gmail.com>
* ArchLinux installation instructions
@@ -48,3 +48,12 @@ Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
Steven Hammerton <steven.hammerton at openmarket.com>
* Add CAS support for registration and login.
Mads Robin Christensen <mads at v42 dot dk>
* CentOS 7 installation instructions.
Florent Violleau <floviolleau at gmail dot com>
* Add Raspberry Pi installation instructions and general troubleshooting items
Niklas Riekenbrauck <nikriek at gmail dot.com>
* Add JWT support for registration and login

View File

@@ -1,3 +1,242 @@
Changes in synapse v0.14.0 (2016-03-30)
=======================================
No changes from v0.14.0-rc2
Changes in synapse v0.14.0-rc2 (2016-03-23)
===========================================
Features:
* Add published room list API (PR #657)
Changes:
* Change various caches to consume less memory (PR #656, #658, #660, #662,
#663, #665)
* Allow rooms to be published without requiring an alias (PR #664)
* Intern common strings in caches to reduce memory footprint (#666)
Bug fixes:
* Fix reject invites over federation (PR #646)
* Fix bug where registration was not idempotent (PR #649)
* Update aliases event after deleting aliases (PR #652)
* Fix unread notification count, which was sometimes wrong (PR #661)
Changes in synapse v0.14.0-rc1 (2016-03-14)
===========================================
Features:
* Add event_id to response to state event PUT (PR #581)
* Allow guest users access to messages in rooms they have joined (PR #587)
* Add config for what state is included in a room invite (PR #598)
* Send the inviter's member event in room invite state (PR #607)
* Add error codes for malformed/bad JSON in /login (PR #608)
* Add support for changing the actions for default rules (PR #609)
* Add environment variable SYNAPSE_CACHE_FACTOR, default it to 0.1 (PR #612)
* Add ability for alias creators to delete aliases (PR #614)
* Add profile information to invites (PR #624)
Changes:
* Enforce user_id exclusivity for AS registrations (PR #572)
* Make adding push rules idempotent (PR #587)
* Improve presence performance (PR #582, #586)
* Change presence semantics for ``last_active_ago`` (PR #582, #586)
* Don't allow ``m.room.create`` to be changed (PR #596)
* Add 800x600 to default list of valid thumbnail sizes (PR #616)
* Always include kicks and bans in full /sync (PR #625)
* Send history visibility on boundary changes (PR #626)
* Register endpoint now returns a refresh_token (PR #637)
Bug fixes:
* Fix bug where we returned incorrect state in /sync (PR #573)
* Always return a JSON object from push rule API (PR #606)
* Fix bug where registering without a user id sometimes failed (PR #610)
* Report size of ExpiringCache in cache size metrics (PR #611)
* Fix rejection of invites to empty rooms (PR #615)
* Fix usage of ``bcrypt`` to not use ``checkpw`` (PR #619)
* Pin ``pysaml2`` dependency (PR #634)
* Fix bug in ``/sync`` where timeline order was incorrect for backfilled events
(PR #635)
Changes in synapse v0.13.3 (2016-02-11)
=======================================
* Fix bug where ``/sync`` would occasionally return events in the wrong room.
Changes in synapse v0.13.2 (2016-02-11)
=======================================
* Fix bug where ``/events`` would fail to skip some events if there had been
more events than the limit specified since the last request (PR #570)
Changes in synapse v0.13.1 (2016-02-10)
=======================================
* Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to
pull in the fix for SYWEB-361 so that the default client can display
HTML messages again(!)
Changes in synapse v0.13.0 (2016-02-10)
=======================================
This version includes an upgrade of the schema, specifically adding an index to
the ``events`` table. This may cause synapse to pause for several minutes the
first time it is started after the upgrade.
Changes:
* Improve general performance (PR #540, #543. #544, #54, #549, #567)
* Change guest user ids to be incrementing integers (PR #550)
* Improve performance of public room list API (PR #552)
* Change profile API to omit keys rather than return null (PR #557)
* Add ``/media/r0`` endpoint prefix, which is equivalent to ``/media/v1/``
(PR #595)
Bug fixes:
* Fix bug with upgrading guest accounts where it would fail if you opened the
registration email on a different device (PR #547)
* Fix bug where unread count could be wrong (PR #568)
Changes in synapse v0.12.1-rc1 (2016-01-29)
===========================================
Features:
* Add unread notification counts in ``/sync`` (PR #456)
* Add support for inviting 3pids in ``/createRoom`` (PR #460)
* Add ability for guest accounts to upgrade (PR #462)
* Add ``/versions`` API (PR #468)
* Add ``event`` to ``/context`` API (PR #492)
* Add specific error code for invalid user names in ``/register`` (PR #499)
* Add support for push badge counts (PR #507)
* Add support for non-guest users to peek in rooms using ``/events`` (PR #510)
Changes:
* Change ``/sync`` so that guest users only get rooms they've joined (PR #469)
* Change to require unbanning before other membership changes (PR #501)
* Change default push rules to notify for all messages (PR #486)
* Change default push rules to not notify on membership changes (PR #514)
* Change default push rules in one to one rooms to only notify for events that
are messages (PR #529)
* Change ``/sync`` to reject requests with a ``from`` query param (PR #512)
* Change server manhole to use SSH rather than telnet (PR #473)
* Change server to require AS users to be registered before use (PR #487)
* Change server not to start when ASes are invalidly configured (PR #494)
* Change server to require ID and ``as_token`` to be unique for AS's (PR #496)
* Change maximum pagination limit to 1000 (PR #497)
Bug fixes:
* Fix bug where ``/sync`` didn't return when something under the leave key
changed (PR #461)
* Fix bug where we returned smaller rather than larger than requested
thumbnails when ``method=crop`` (PR #464)
* Fix thumbnails API to only return cropped thumbnails when asking for a
cropped thumbnail (PR #475)
* Fix bug where we occasionally still logged access tokens (PR #477)
* Fix bug where ``/events`` would always return immediately for guest users
(PR #480)
* Fix bug where ``/sync`` unexpectedly returned old left rooms (PR #481)
* Fix enabling and disabling push rules (PR #498)
* Fix bug where ``/register`` returned 500 when given unicode username
(PR #513)
Changes in synapse v0.12.0 (2016-01-04)
=======================================
* Expose ``/login`` under ``r0`` (PR #459)
Changes in synapse v0.12.0-rc3 (2015-12-23)
===========================================
* Allow guest accounts access to ``/sync`` (PR #455)
* Allow filters to include/exclude rooms at the room level
rather than just from the components of the sync for each
room. (PR #454)
* Include urls for room avatars in the response to ``/publicRooms`` (PR #453)
* Don't set a identicon as the avatar for a user when they register (PR #450)
* Add a ``display_name`` to third-party invites (PR #449)
* Send more information to the identity server for third-party invites so that
it can send richer messages to the invitee (PR #446)
* Cache the responses to ``/initialSync`` for 5 minutes. If a client
retries a request to ``/initialSync`` before the a response was computed
to the first request then the same response is used for both requests
(PR #457)
* Fix a bug where synapse would always request the signing keys of
remote servers even when the key was cached locally (PR #452)
* Fix 500 when pagination search results (PR #447)
* Fix a bug where synapse was leaking raw email address in third-party invites
(PR #448)
Changes in synapse v0.12.0-rc2 (2015-12-14)
===========================================
* Add caches for whether rooms have been forgotten by a user (PR #434)
* Remove instructions to use ``--process-dependency-link`` since all of the
dependencies of synapse are on PyPI (PR #436)
* Parallelise the processing of ``/sync`` requests (PR #437)
* Fix race updating presence in ``/events`` (PR #444)
* Fix bug back-populating search results (PR #441)
* Fix bug calculating state in ``/sync`` requests (PR #442)
Changes in synapse v0.12.0-rc1 (2015-12-10)
===========================================
* Host the client APIs released as r0 by
https://matrix.org/docs/spec/r0.0.0/client_server.html
on paths prefixed by ``/_matrix/client/r0``. (PR #430, PR #415, PR #400)
* Updates the client APIs to match r0 of the matrix specification.
* All APIs return events in the new event format, old APIs also include
the fields needed to parse the event using the old format for
compatibility. (PR #402)
* Search results are now given as a JSON array rather than
a JSON object (PR #405)
* Miscellaneous changes to search (PR #403, PR #406, PR #412)
* Filter JSON objects may now be passed as query parameters to ``/sync``
(PR #431)
* Fix implementation of ``/admin/whois`` (PR #418)
* Only include the rooms that user has left in ``/sync`` if the client
requests them in the filter (PR #423)
* Don't push for ``m.room.message`` by default (PR #411)
* Add API for setting per account user data (PR #392)
* Allow users to forget rooms (PR #385)
* Performance improvements and monitoring:
* Add per-request counters for CPU time spent on the main python thread.
(PR #421, PR #420)
* Add per-request counters for time spent in the database (PR #429)
* Make state updates in the C+S API idempotent (PR #416)
* Only fire ``user_joined_room`` if the user has actually joined. (PR #410)
* Reuse a single http client, rather than creating new ones (PR #413)
* Fixed a bug upgrading from older versions of synapse on postgresql (PR #417)
Changes in synapse v0.11.1 (2015-11-20)
=======================================
* Add extra options to search API (PR #394)
* Fix bug where we did not correctly cap federation retry timers. This meant it
could take several hours for servers to start talking to ressurected servers,
even when they were receiving traffic from them (PR #393)
* Don't advertise login token flow unless CAS is enabled. This caused issues
where some clients would always use the fallback API if they did not
recognize all login flows (PR #391)
* Change /v2 sync API to rename ``private_user_data`` to ``account_data``
(PR #386)
* Change /v2 sync API to remove the ``event_map`` and rename keys in ``rooms``
object (PR #389)
Changes in synapse v0.11.0-r2 (2015-11-19)
==========================================

View File

@@ -21,5 +21,6 @@ recursive-include synapse/static *.html
recursive-include synapse/static *.js
exclude jenkins.sh
exclude jenkins*.sh
prune demo/etc

View File

@@ -111,18 +111,35 @@ Installing prerequisites on ArchLinux::
sudo pacman -S base-devel python2 python-pip \
python-setuptools python-virtualenv sqlite3
Installing prerequisites on CentOS 7::
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
lcms2-devel libwebp-devel tcl-devel tk-devel \
python-virtualenv libffi-devel openssl-devel
sudo yum groupinstall "Development Tools"
Installing prerequisites on Mac OS X::
xcode-select --install
sudo easy_install pip
sudo pip install virtualenv
Installing prerequisites on Raspbian::
sudo apt-get install build-essential python2.7-dev libffi-dev \
python-pip python-setuptools sqlite3 \
libssl-dev python-virtualenv libjpeg-dev
sudo pip install --upgrade pip
sudo pip install --upgrade ndg-httpsclient
sudo pip install --upgrade virtualenv
To install the synapse homeserver run::
virtualenv -p python2.7 ~/.synapse
source ~/.synapse/bin/activate
pip install --upgrade setuptools
pip install --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
pip install https://github.com/matrix-org/synapse/tarball/master
This installs synapse, along with the libraries it uses, into a virtual
environment under ``~/.synapse``. Feel free to pick a different directory
@@ -138,15 +155,20 @@ Note that these packages do not include a client - choose one from
https://matrix.org/blog/try-matrix-now/ (or build your own with
https://github.com/matrix-org/matrix-js-sdk/).
Finally, Martin Giess has created an auto-deployment process with vagrant/ansible,
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
for details.
To set up your homeserver, run (in your virtualenv, as before)::
cd ~/.synapse
python -m synapse.app.homeserver \
--server-name machine.my.domain.name \
--config-path homeserver.yaml \
--generate-config
--generate-config \
--report-stats=[yes|no]
Substituting your host and domain name as appropriate.
...substituting your host and domain name as appropriate.
This will generate you a config file that you can then customise, but it will
also generate a set of keys for you. These keys will allow your Home Server to
@@ -154,15 +176,15 @@ identify itself to other Home Servers, so don't lose or delete them. It would be
wise to back them up somewhere safe. If, for whatever reason, you do need to
change your Home Server's keys, you may find that other Home Servers have the
old key cached. If you update the signing key, you should change the name of the
key in the <server name>.signing.key file (the second word, which by default is
, 'auto') to something different.
key in the <server name>.signing.key file (the second word) to something different.
By default, registration of new users is disabled. You can either enable
registration in the config by specifying ``enable_registration: true``
(it is then recommended to also set up CAPTCHA), or
(it is then recommended to also set up CAPTCHA - see docs/CAPTCHA_SETUP), or
you can use the command line to register new users::
$ source ~/.synapse/bin/activate
$ synctl start # if not already running
$ register_new_matrix_user -c homeserver.yaml https://localhost:8448
New user localpart: erikj
Password:
@@ -172,6 +194,16 @@ you can use the command line to register new users::
For reliable VoIP calls to be routed via this homeserver, you MUST configure
a TURN server. See docs/turn-howto.rst for details.
Running Synapse
===============
To actually run your new homeserver, pick a working directory for Synapse to
run (e.g. ``~/.synapse``), and::
cd ~/.synapse
source ./bin/activate
synctl start
Using PostgreSQL
================
@@ -194,16 +226,6 @@ may have a few regressions relative to SQLite.
For information on how to install and use PostgreSQL, please see
`docs/postgres.rst <docs/postgres.rst>`_.
Running Synapse
===============
To actually run your new homeserver, pick a working directory for Synapse to
run (e.g. ``~/.synapse``), and::
cd ~/.synapse
source ./bin/activate
synctl start
Platform Specific Instructions
==============================
@@ -225,8 +247,7 @@ pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 )::
You also may need to explicitly specify python 2.7 again during the install
request::
pip2.7 install --process-dependency-links \
https://github.com/matrix-org/synapse/tarball/master
pip2.7 install https://github.com/matrix-org/synapse/tarball/master
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
@@ -246,6 +267,14 @@ During setup of Synapse you need to call python2.7 directly again::
...substituting your host and domain name as appropriate.
FreeBSD
-------
Synapse can be installed via FreeBSD Ports or Packages:
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
- Packages: ``pkg install py27-matrix-synapse``
Windows Install
---------------
Synapse can be installed on Cygwin. It requires the following Cygwin packages:
@@ -285,12 +314,23 @@ Troubleshooting
Troubleshooting Installation
----------------------------
Synapse requires pip 1.7 or later, so if your OS provides too old a version and
you get errors about ``error: no such option: --process-dependency-links`` you
Synapse requires pip 1.7 or later, so if your OS provides too old a version you
may need to manually upgrade it::
sudo pip install --upgrade pip
Installing may fail with ``Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)``.
You can fix this by manually upgrading pip and virtualenv::
sudo pip install --upgrade virtualenv
You can next rerun ``virtualenv -p python2.7 synapse`` to update the virtual env.
Installing may fail during installing virtualenv with ``InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.``
You can fix this by manually installing ndg-httpsclient::
pip install --upgrade ndg-httpsclient
Installing may fail with ``mock requires setuptools>=17.1. Aborting installation``.
You can fix this by upgrading setuptools::
@@ -430,6 +470,10 @@ SRV record, as that is the name other machines will expect it to have::
python -m synapse.app.homeserver --config-path homeserver.yaml
If you've already generated the config file, you need to edit the "server_name"
in you ```homeserver.yaml``` file. If you've already started Synapse and a
database has been created, you will have to recreate the database.
You may additionally want to pass one or more "-v" options, in order to
increase the verbosity of logging output; at least for initial testing.
@@ -481,7 +525,6 @@ Logging In To An Existing Account
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
the form and click the Login button.
Identity Servers
================
@@ -501,6 +544,26 @@ as the primary means of identity and E2E encryption is not complete. As such,
we are running a single identity server (https://matrix.org) at the current
time.
Password reset
==============
If a user has registered an email address to their account using an identity
server, they can request a password-reset token via clients such as Vector.
A manual password reset can be done via direct database access as follows.
First calculate the hash of the new password:
$ source ~/.synapse/bin/activate
$ ./scripts/hash_password
Password:
Confirm password:
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
Then update the `users` table in the database:
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
WHERE name='@test:test.com';
Where's the spec?!
==================
@@ -522,3 +585,20 @@ Building internal API documentation::
python setup.py build_sphinx
Halp!! Synapse eats all my RAM!
===============================
Synapse's architecture is quite RAM hungry currently - we deliberately
cache a lot of recent room data and metadata in RAM in order to speed up
common requests. We'll improve this in future, but for now the easiest
way to either reduce the RAM usage (at the risk of slowing things down)
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
variable. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1.0 will max out
at around 3-4GB of resident memory - this is what we currently run the
matrix.org on. The default setting is currently 0.1, which is probably
around a ~700MB footprint. You can dial it down further to 0.02 if
desired, which targets roughly ~512MB. Conversely you can dial it up if
you need performance for lots of users and have a box with a lot of RAM.

View File

@@ -30,6 +30,19 @@ running:
python synapse/python_dependencies.py | xargs -n1 pip install
Upgrading to v0.11.0
====================
This release includes the option to send anonymous usage stats to matrix.org,
and requires that administrators explictly opt in or out by setting the
``report_stats`` option to either ``true`` or ``false``.
We would really appreciate it if you could help our project out by reporting
anonymized usage statistics from your homeserver. Only very basic aggregate
data (e.g. number of users) will be reported, but it helps us to track the
growth of the Matrix community, and helps us to make Matrix a success, as well
as to convince other networks that they should peer with us.
Upgrading to v0.9.0
===================

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

151
contrib/graph/graph3.py Normal file
View File

@@ -0,0 +1,151 @@
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydot
import cgi
import simplejson as json
import datetime
import argparse
from synapse.events import FrozenEvent
from synapse.util.frozenutils import unfreeze
def make_graph(file_name, room_id, file_prefix, limit):
print "Reading lines"
with open(file_name) as f:
lines = f.readlines()
print "Read lines"
events = [FrozenEvent(json.loads(line)) for line in lines]
print "Loaded events."
events.sort(key=lambda e: e.depth)
print "Sorted events"
if limit:
events = events[-int(limit):]
node_map = {}
graph = pydot.Dot(graph_name="Test")
for event in events:
t = datetime.datetime.fromtimestamp(
float(event.origin_server_ts) / 1000
).strftime('%Y-%m-%d %H:%M:%S,%f')
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
content = content.replace("\n", "<br/>\n")
print content
content = []
for key, value in unfreeze(event.get_dict()["content"]).items():
if value is None:
value = "<null>"
elif isinstance(value, basestring):
pass
else:
value = json.dumps(value)
content.append(
"<b>%s</b>: %s," % (
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
)
)
content = "<br/>\n".join(content)
print content
label = (
"<"
"<b>%(name)s </b><br/>"
"Type: <b>%(type)s </b><br/>"
"State key: <b>%(state_key)s </b><br/>"
"Content: <b>%(content)s </b><br/>"
"Time: <b>%(time)s </b><br/>"
"Depth: <b>%(depth)s </b><br/>"
">"
) % {
"name": event.event_id,
"type": event.type,
"state_key": event.get("state_key", None),
"content": content,
"time": t,
"depth": event.depth,
}
node = pydot.Node(
name=event.event_id,
label=label,
)
node_map[event.event_id] = node
graph.add_node(node)
print "Created Nodes"
for event in events:
for prev_id, _ in event.prev_events:
try:
end_node = node_map[prev_id]
except:
end_node = pydot.Node(
name=prev_id,
label="<<b>%s</b>>" % (prev_id,),
)
node_map[prev_id] = end_node
graph.add_node(end_node)
edge = pydot.Edge(node_map[event.event_id], end_node)
graph.add_edge(edge)
print "Created edges"
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
print "Created Dot"
graph.write_svg("%s.svg" % file_prefix, prog='dot')
print "Created svg"
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a PDU graph for a given room by reading "
"from a file with line deliminated events. \n"
"Requires pydot."
)
parser.add_argument(
"-p", "--prefix", dest="prefix",
help="String to prefix output files with",
default="graph_output"
)
parser.add_argument(
"-l", "--limit",
help="Only retrieve the last N events.",
)
parser.add_argument('event_file')
parser.add_argument('room')
args = parser.parse_args()
make_graph(args.event_file, args.room, args.prefix, args.limit)

View File

@@ -18,8 +18,8 @@ encoding use, e.g.::
This would create an appropriate database named ``synapse`` owned by the
``synapse_user`` user (which must already exist).
Set up client
=============
Set up client in Debian/Ubuntu
===========================
Postgres support depends on the postgres python connector ``psycopg2``. In the
virtual env::
@@ -27,6 +27,19 @@ virtual env::
sudo apt-get install libpq-dev
pip install psycopg2
Set up client in RHEL/CentOs 7
==============================
Make sure you have the appropriate version of postgres-devel installed. For a
postgres 9.4, use the postgres 9.4 packages from
[here](https://wiki.postgresql.org/wiki/YUM_Installation).
As with Debian/Ubuntu, postgres support depends on the postgres python connector
``psycopg2``. In the virtual env::
sudo yum install postgresql-devel libpqxx-devel.x86_64
export PATH=/usr/pgsql-9.4/bin/:$PATH
pip install psycopg2
Synapse config
==============

22
jenkins-flake8.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
# Output test results as junit xml
export TRIAL_FLAGS="--reporter=subunit"
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
# Write coverage reports to a separate file for each process
export COVERAGE_OPTS="-p"
export DUMP_COVERAGE_COMMAND="coverage help"
# Output flake8 violations to violations.flake8.log
export PEP8SUFFIX="--output-file=violations.flake8.log"
rm .coverage* || echo "No coverage files to remove"
tox -e packaging -e pep8

61
jenkins-postgres.sh Executable file
View File

@@ -0,0 +1,61 @@
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
# Output test results as junit xml
export TRIAL_FLAGS="--reporter=subunit"
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
# Write coverage reports to a separate file for each process
export COVERAGE_OPTS="-p"
export DUMP_COVERAGE_COMMAND="coverage help"
# Output flake8 violations to violations.flake8.log
# Don't exit with non-0 status code on Jenkins,
# so that the build steps continue and a later step can decided whether to
# UNSTABLE or FAILURE this build.
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
rm .coverage* || echo "No coverage files to remove"
tox --notest -e py27
TOX_BIN=$WORKSPACE/.tox/py27/bin
$TOX_BIN/pip install psycopg2
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
if [[ ! -e .sytest-base ]]; then
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
else
(cd .sytest-base; git fetch -p)
fi
rm -rf sytest
git clone .sytest-base sytest --shared
cd sytest
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
: ${PORT_BASE:=8000}
./jenkins/prep_sytest_for_postgres.sh
echo >&2 "Running sytest with PostgreSQL";
./jenkins/install_and_run.sh --coverage \
--python $TOX_BIN/python \
--synapse-directory $WORKSPACE \
--port-base $PORT_BASE
cd ..
cp sytest/.coverage.* .
# Combine the coverage reports
echo "Combining:" .coverage.*
$TOX_BIN/python -m coverage combine
# Output coverage to coverage.xml
$TOX_BIN/coverage xml -o coverage.xml

55
jenkins-sqlite.sh Executable file
View File

@@ -0,0 +1,55 @@
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
# Output test results as junit xml
export TRIAL_FLAGS="--reporter=subunit"
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
# Write coverage reports to a separate file for each process
export COVERAGE_OPTS="-p"
export DUMP_COVERAGE_COMMAND="coverage help"
# Output flake8 violations to violations.flake8.log
# Don't exit with non-0 status code on Jenkins,
# so that the build steps continue and a later step can decided whether to
# UNSTABLE or FAILURE this build.
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
rm .coverage* || echo "No coverage files to remove"
tox --notest -e py27
TOX_BIN=$WORKSPACE/.tox/py27/bin
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
if [[ ! -e .sytest-base ]]; then
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
else
(cd .sytest-base; git fetch -p)
fi
rm -rf sytest
git clone .sytest-base sytest --shared
cd sytest
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
: ${PORT_BASE:=8500}
./jenkins/install_and_run.sh --coverage \
--python $TOX_BIN/python \
--synapse-directory $WORKSPACE \
--port-base $PORT_BASE
cd ..
cp sytest/.coverage.* .
# Combine the coverage reports
echo "Combining:" .coverage.*
$TOX_BIN/python -m coverage combine
# Output coverage to coverage.xml
$TOX_BIN/coverage xml -o coverage.xml

25
jenkins-unittests.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
# Output test results as junit xml
export TRIAL_FLAGS="--reporter=subunit"
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
# Write coverage reports to a separate file for each process
export COVERAGE_OPTS="-p"
export DUMP_COVERAGE_COMMAND="coverage help"
# Output flake8 violations to violations.flake8.log
# Don't exit with non-0 status code on Jenkins,
# so that the build steps continue and a later step can decided whether to
# UNSTABLE or FAILURE this build.
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
rm .coverage* || echo "No coverage files to remove"
tox -e py27

View File

@@ -1,13 +1,18 @@
#!/bin/bash -eu
#!/bin/bash
set -eux
: ${WORKSPACE:="$(pwd)"}
export PYTHONDONTWRITEBYTECODE=yep
export SYNAPSE_CACHE_FACTOR=1
# Output test results as junit xml
export TRIAL_FLAGS="--reporter=subunit"
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
# Output coverage to coverage.xml
export DUMP_COVERAGE_COMMAND="coverage xml -o coverage.xml"
# Write coverage reports to a separate file for each process
export COVERAGE_OPTS="-p"
export DUMP_COVERAGE_COMMAND="coverage help"
# Output flake8 violations to violations.flake8.log
# Don't exit with non-0 status code on Jenkins,
@@ -15,16 +20,22 @@ export DUMP_COVERAGE_COMMAND="coverage xml -o coverage.xml"
# UNSTABLE or FAILURE this build.
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
rm .coverage* || echo "No coverage files to remove"
tox
: ${GIT_BRANCH:="$(git rev-parse --abbrev-ref HEAD)"}
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
set +u
. .tox/py27/bin/activate
set -u
TOX_BIN=$WORKSPACE/.tox/py27/bin
if [[ ! -e .sytest-base ]]; then
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
else
(cd .sytest-base; git fetch -p)
fi
rm -rf sytest
git clone https://github.com/matrix-org/sytest.git sytest
git clone .sytest-base sytest --shared
cd sytest
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
@@ -36,4 +47,40 @@ export PERL5LIB PERL_MB_OPT PERL_MM_OPT
./install-deps.pl
./run-tests.pl -O tap --synapse-directory .. --all > results.tap
: ${PORT_BASE:=8000}
echo >&2 "Running sytest with SQLite3";
./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \
--python $TOX_BIN/python --all --port-base $PORT_BASE > results-sqlite3.tap
RUN_POSTGRES=""
for port in $(($PORT_BASE + 1)) $(($PORT_BASE + 2)); do
if psql synapse_jenkins_$port <<< ""; then
RUN_POSTGRES="$RUN_POSTGRES:$port"
cat > localhost-$port/database.yaml << EOF
name: psycopg2
args:
database: synapse_jenkins_$port
EOF
fi
done
# Run if both postgresql databases exist
if test "$RUN_POSTGRES" = ":$(($PORT_BASE + 1)):$(($PORT_BASE + 2))"; then
echo >&2 "Running sytest with PostgreSQL";
$TOX_BIN/pip install psycopg2
./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \
--python $TOX_BIN/python --all --port-base $PORT_BASE > results-postgresql.tap
else
echo >&2 "Skipping running sytest with PostgreSQL, $RUN_POSTGRES"
fi
cd ..
cp sytest/.coverage.* .
# Combine the coverage reports
echo "Combining:" .coverage.*
$TOX_BIN/python -m coverage combine
# Output coverage to coverage.xml
$TOX_BIN/coverage xml -o coverage.xml

View File

@@ -1,5 +1,5 @@
#!/usr/bin/perl -pi
# Copyright 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
# limitations under the License.
$copyright = <<EOT;
/* Copyright 2015 OpenMarket Ltd
/* Copyright 2016 OpenMarket Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
#!/usr/bin/perl -pi
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
# limitations under the License.
$copyright = <<EOT;
# Copyright 2015 OpenMarket Ltd
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -79,16 +79,19 @@ def defined_names(prefix, defs, names):
defined_names(prefix + name + ".", funcs, names)
def used_names(prefix, defs, names):
def used_names(prefix, item, defs, names):
for name, funcs in defs.get('def', {}).items():
used_names(prefix + name + ".", funcs, names)
used_names(prefix + name + ".", name, funcs, names)
for name, funcs in defs.get('class', {}).items():
used_names(prefix + name + ".", funcs, names)
used_names(prefix + name + ".", name, funcs, names)
path = prefix.rstrip('.')
for used in defs.get('uses', ()):
if used in names:
names[used].setdefault('used', []).append(prefix.rstrip('.'))
if item:
names[item].setdefault('uses', []).append(used)
names[used].setdefault('used', {}).setdefault(item, []).append(path)
if __name__ == '__main__':
@@ -109,6 +112,18 @@ if __name__ == '__main__':
"directories", nargs='+', metavar="DIR",
help="Directories to search for definitions"
)
parser.add_argument(
"--referrers", default=0, type=int,
help="Include referrers up to the given depth"
)
parser.add_argument(
"--referred", default=0, type=int,
help="Include referred down to the given depth"
)
parser.add_argument(
"--format", default="yaml",
help="Output format, one of 'yaml' or 'dot'"
)
args = parser.parse_args()
definitions = {}
@@ -124,7 +139,7 @@ if __name__ == '__main__':
defined_names(filepath + ":", defs, names)
for filepath, defs in definitions.items():
used_names(filepath + ":", defs, names)
used_names(filepath + ":", None, defs, names)
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
@@ -139,4 +154,43 @@ if __name__ == '__main__':
continue
result[name] = definition
yaml.dump(result, sys.stdout, default_flow_style=False)
referrer_depth = args.referrers
referrers = set()
while referrer_depth:
referrer_depth -= 1
for entry in result.values():
for used_by in entry.get("used", ()):
referrers.add(used_by)
for name, definition in names.items():
if not name in referrers:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
result[name] = definition
referred_depth = args.referred
referred = set()
while referred_depth:
referred_depth -= 1
for entry in result.values():
for uses in entry.get("uses", ()):
referred.add(uses)
for name, definition in names.items():
if not name in referred:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
result[name] = definition
if args.format == 'yaml':
yaml.dump(result, sys.stdout, default_flow_style=False)
elif args.format == 'dot':
print "digraph {"
for name, entry in result.items():
print name
for used_by in entry.get("used", ()):
if used_by in result:
print used_by, "->", name
print "}"
else:
raise ValueError("Unknown format %r" % (args.format))

24
scripts-dev/dump_macaroon.py Executable file
View File

@@ -0,0 +1,24 @@
#!/usr/bin/env python2
import pymacaroons
import sys
if len(sys.argv) == 1:
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
sys.exit(1)
macaroon_string = sys.argv[1]
key = sys.argv[2] if len(sys.argv) > 2 else None
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
print macaroon.inspect()
print ""
verifier = pymacaroons.Verifier()
verifier.satisfy_general(lambda c: True)
try:
verifier.verify(macaroon, key)
print "Signature is correct"
except Exception as e:
print e.message

View File

@@ -0,0 +1,62 @@
#! /usr/bin/python
import ast
import argparse
import os
import sys
import yaml
PATTERNS_V1 = []
PATTERNS_V2 = []
RESULT = {
"v1": PATTERNS_V1,
"v2": PATTERNS_V2,
}
class CallVisitor(ast.NodeVisitor):
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
name = node.func.id
else:
return
if name == "client_path_patterns":
PATTERNS_V1.append(node.args[0].s)
elif name == "client_v2_patterns":
PATTERNS_V2.append(node.args[0].s)
def find_patterns_in_code(input_code):
input_ast = ast.parse(input_code)
visitor = CallVisitor()
visitor.visit(input_ast)
def find_patterns_in_file(filepath):
with open(filepath) as f:
find_patterns_in_code(f.read())
parser = argparse.ArgumentParser(description='Find url patterns.')
parser.add_argument(
"directories", nargs='+', metavar="DIR",
help="Directories to search for definitions"
)
args = parser.parse_args()
for directory in args.directories:
for root, dirs, files in os.walk(directory):
for filename in files:
if filename.endswith(".py"):
filepath = os.path.join(root, filename)
find_patterns_in_file(filepath)
PATTERNS_V1.sort()
PATTERNS_V2.sort()
yaml.dump(RESULT, sys.stdout, default_flow_style=False)

View File

@@ -0,0 +1,67 @@
import requests
import collections
import sys
import time
import json
Entry = collections.namedtuple("Entry", "name position rows")
ROW_TYPES = {}
def row_type_for_columns(name, column_names):
column_names = tuple(column_names)
row_type = ROW_TYPES.get((name, column_names))
if row_type is None:
row_type = collections.namedtuple(name, column_names)
ROW_TYPES[(name, column_names)] = row_type
return row_type
def parse_response(content):
streams = json.loads(content)
result = {}
for name, value in streams.items():
row_type = row_type_for_columns(name, value["field_names"])
position = value["position"]
rows = [row_type(*row) for row in value["rows"]]
result[name] = Entry(name, position, rows)
return result
def replicate(server, streams):
return parse_response(requests.get(
server + "/_synapse/replication",
verify=False,
params=streams
).content)
def main():
server = sys.argv[1]
streams = None
while not streams:
try:
streams = {
row.name: row.position
for row in replicate(server, {"streams":"-1"})["streams"].rows
}
except requests.exceptions.ConnectionError as e:
time.sleep(0.1)
while True:
try:
results = replicate(server, streams)
except:
sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
break
for update in results.values():
for row in update.rows:
sys.stdout.write(repr(row) + "\n")
streams[update.name] = update.position
if __name__=='__main__':
main()

39
scripts/hash_password Executable file
View File

@@ -0,0 +1,39 @@
#!/usr/bin/env python
import argparse
import bcrypt
import getpass
bcrypt_rounds=12
def prompt_for_pass():
password = getpass.getpass("Password: ")
if not password:
raise Exception("Password cannot be blank.")
confirm_password = getpass.getpass("Confirm password: ")
if password != confirm_password:
raise Exception("Passwords do not match.")
return password
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Calculate the hash of a new password, so that passwords"
" can be reset")
parser.add_argument(
"-p", "--password",
default=None,
help="New password for user. Will prompt if omitted.",
)
args = parser.parse_args()
password = args.password
if not password:
password = prompt_for_pass()
print bcrypt.hashpw(password, bcrypt.gensalt(bcrypt_rounds))

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -309,8 +309,8 @@ class Porter(object):
**self.postgres_config["args"]
)
sqlite_engine = create_engine("sqlite3")
postgres_engine = create_engine("psycopg2")
sqlite_engine = create_engine(FakeConfig(sqlite_config))
postgres_engine = create_engine(FakeConfig(postgres_config))
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
self.postgres_store = Store(postgres_db_pool, postgres_engine)
@@ -792,3 +792,8 @@ if __name__ == "__main__":
if end_error_exec_info:
exc_type, exc_value, exc_traceback = end_error_exec_info
traceback.print_exception(exc_type, exc_value, exc_traceback)
class FakeConfig:
def __init__(self, database_config):
self.database_config = database_config

View File

@@ -16,3 +16,4 @@ ignore =
[flake8]
max-line-length = 90
ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# Copyright 2014 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.11.0-r2"
__version__ = "0.14.0"

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,8 +22,9 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership, JoinRules
from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError
from synapse.types import RoomID, UserID, EventID
from synapse.types import Requester, RoomID, UserID, EventID
from synapse.util.logutils import log_function
from synapse.util.logcontext import preserve_context_over_fn
from unpaddedbase64 import decode_base64
import logging
@@ -207,6 +208,13 @@ class Auth(object):
user_id, room_id
))
if membership == Membership.LEAVE:
forgot = yield self.store.did_forget(user_id, room_id)
if forgot:
raise AuthError(403, "User %s not in room %s" % (
user_id, room_id
))
defer.returnValue(member)
@defer.inlineCallbacks
@@ -426,31 +434,46 @@ class Auth(object):
if event.user_id != invite_event.user_id:
return False
try:
public_key = invite_event.content["public_key"]
if signed["mxid"] != event.state_key:
return False
if signed["token"] != token:
return False
for server, signature_block in signed["signatures"].items():
for key_name, encoded_signature in signature_block.items():
if not key_name.startswith("ed25519:"):
return False
verify_key = decode_verify_key_bytes(
key_name,
decode_base64(public_key)
)
verify_signed_json(signed, server, verify_key)
# We got the public key from the invite, so we know that the
# correct server signed the signed bundle.
# The caller is responsible for checking that the signing
# server has not revoked that public key.
return True
if signed["mxid"] != event.state_key:
return False
except (KeyError, SignatureVerifyException,):
if signed["token"] != token:
return False
for public_key_object in self.get_public_keys(invite_event):
public_key = public_key_object["public_key"]
try:
for server, signature_block in signed["signatures"].items():
for key_name, encoded_signature in signature_block.items():
if not key_name.startswith("ed25519:"):
continue
verify_key = decode_verify_key_bytes(
key_name,
decode_base64(public_key)
)
verify_signed_json(signed, server, verify_key)
# We got the public key from the invite, so we know that the
# correct server signed the signed bundle.
# The caller is responsible for checking that the signing
# server has not revoked that public key.
return True
except (KeyError, SignatureVerifyException,):
continue
return False
def get_public_keys(self, invite_event):
public_keys = []
if "public_key" in invite_event.content:
o = {
"public_key": invite_event.content["public_key"],
}
if "key_validity_url" in invite_event.content:
o["key_validity_url"] = invite_event.content["key_validity_url"]
public_keys.append(o)
public_keys.extend(invite_event.content.get("public_keys", []))
return public_keys
def _get_power_level_event(self, auth_events):
key = (EventTypes.PowerLevels, "", )
return auth_events.get(key)
@@ -503,36 +526,15 @@ class Auth(object):
"""
# Can optionally look elsewhere in the request (e.g. headers)
try:
access_token = request.args["access_token"][0]
# Check for application service tokens with a user_id override
try:
app_service = yield self.store.get_app_service_by_token(
access_token
)
if not app_service:
raise KeyError
user_id = app_service.sender
if "user_id" in request.args:
user_id = request.args["user_id"][0]
if not app_service.is_interested_in_user(user_id):
raise AuthError(
403,
"Application service cannot masquerade as this user."
)
if not user_id:
raise KeyError
user_id = yield self._get_appservice_user_id(request.args)
if user_id:
request.authenticated_entity = user_id
defer.returnValue(
Requester(UserID.from_string(user_id), "", False)
)
defer.returnValue((UserID.from_string(user_id), "", False))
return
except KeyError:
pass # normal users won't have the user_id query parameter set.
user_info = yield self._get_user_by_access_token(access_token)
access_token = request.args["access_token"][0]
user_info = yield self.get_user_by_access_token(access_token)
user = user_info["user"]
token_id = user_info["token_id"]
is_guest = user_info["is_guest"]
@@ -543,7 +545,8 @@ class Auth(object):
default=[""]
)[0]
if user and access_token and ip_addr:
self.store.insert_client_ip(
preserve_context_over_fn(
self.store.insert_client_ip,
user=user,
access_token=access_token,
ip=ip_addr,
@@ -557,7 +560,7 @@ class Auth(object):
request.authenticated_entity = user.to_string()
defer.returnValue((user, token_id, is_guest,))
defer.returnValue(Requester(user, token_id, is_guest))
except KeyError:
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.",
@@ -565,7 +568,34 @@ class Auth(object):
)
@defer.inlineCallbacks
def _get_user_by_access_token(self, token):
def _get_appservice_user_id(self, request_args):
app_service = yield self.store.get_app_service_by_token(
request_args["access_token"][0]
)
if app_service is None:
defer.returnValue(None)
if "user_id" not in request_args:
defer.returnValue(app_service.sender)
user_id = request_args["user_id"][0]
if app_service.sender == user_id:
defer.returnValue(app_service.sender)
if not app_service.is_interested_in_user(user_id):
raise AuthError(
403,
"Application service cannot masquerade as this user."
)
if not (yield self.store.get_user_by_id(user_id)):
raise AuthError(
403,
"Application service has not registered this user"
)
defer.returnValue(user_id)
@defer.inlineCallbacks
def get_user_by_access_token(self, token):
""" Get a registered user's ID.
Args:
@@ -576,7 +606,7 @@ class Auth(object):
AuthError if no user by that token exists or the token is invalid.
"""
try:
ret = yield self._get_user_from_macaroon(token)
ret = yield self.get_user_from_macaroon(token)
except AuthError:
# TODO(daniel): Remove this fallback when all existing access tokens
# have been re-issued as macaroons.
@@ -584,13 +614,10 @@ class Auth(object):
defer.returnValue(ret)
@defer.inlineCallbacks
def _get_user_from_macaroon(self, macaroon_str):
def get_user_from_macaroon(self, macaroon_str):
try:
macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
self.validate_macaroon(
macaroon, "access",
[lambda c: c.startswith("time < ")]
)
self.validate_macaroon(macaroon, "access", False)
user_prefix = "user_id = "
user = None
@@ -638,22 +665,34 @@ class Auth(object):
errcode=Codes.UNKNOWN_TOKEN
)
def validate_macaroon(self, macaroon, type_string, additional_validation_functions):
def validate_macaroon(self, macaroon, type_string, verify_expiry):
"""
validate that a Macaroon is understood by and was signed by this server.
Args:
macaroon(pymacaroons.Macaroon): The macaroon to validate
type_string(str): The kind of token this is (e.g. "access", "refresh")
verify_expiry(bool): Whether to verify whether the macaroon has expired.
This should really always be True, but no clients currently implement
token refresh, so we can't enforce expiry yet.
"""
v = pymacaroons.Verifier()
v.satisfy_exact("gen = 1")
v.satisfy_exact("type = " + type_string)
v.satisfy_general(lambda c: c.startswith("user_id = "))
v.satisfy_exact("guest = true")
if verify_expiry:
v.satisfy_general(self._verify_expiry)
else:
v.satisfy_general(lambda c: c.startswith("time < "))
for validation_function in additional_validation_functions:
v.satisfy_general(validation_function)
v.verify(macaroon, self.hs.config.macaroon_secret_key)
v = pymacaroons.Verifier()
v.satisfy_general(self._verify_recognizes_caveats)
v.verify(macaroon, self.hs.config.macaroon_secret_key)
def verify_expiry(self, caveat):
def _verify_expiry(self, caveat):
prefix = "time < "
if not caveat.startswith(prefix):
return False
@@ -674,6 +713,7 @@ class Auth(object):
def _look_up_user_by_access_token(self, token):
ret = yield self.store.get_user_by_access_token(token)
if not ret:
logger.warn("Unrecognised access token - not in store: %s" % (token,))
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN
@@ -691,6 +731,7 @@ class Auth(object):
token = request.args["access_token"][0]
service = yield self.store.get_app_service_by_token(token)
if not service:
logger.warn("Unrecognised appservice access token: %s" % (token,))
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",
@@ -762,7 +803,7 @@ class Auth(object):
if "third_party_invite" in event.content:
key = (
EventTypes.ThirdPartyInvite,
event.content["third_party_invite"]["token"]
event.content["third_party_invite"]["signed"]["token"]
)
third_party_invite = current_state.get(key)
if third_party_invite:
@@ -773,17 +814,16 @@ class Auth(object):
return auth_ids
@log_function
def _can_send_event(self, event, auth_events):
def _get_send_level(self, etype, state_key, auth_events):
key = (EventTypes.PowerLevels, "", )
send_level_event = auth_events.get(key)
send_level = None
if send_level_event:
send_level = send_level_event.content.get("events", {}).get(
event.type
etype
)
if send_level is None:
if hasattr(event, "state_key"):
if state_key is not None:
send_level = send_level_event.content.get(
"state_default", 50
)
@@ -797,6 +837,13 @@ class Auth(object):
else:
send_level = 0
return send_level
@log_function
def _can_send_event(self, event, auth_events):
send_level = self._get_send_level(
event.type, event.get("state_key", None), auth_events
)
user_level = self._get_user_power_level(event.user_id, auth_events)
if user_level < send_level:
@@ -844,7 +891,7 @@ class Auth(object):
redact_level = self._get_named_level(auth_events, "redact", 50)
if user_level > redact_level:
if user_level >= redact_level:
return False
redacter_domain = EventID.from_string(event.event_id).domain
@@ -941,3 +988,43 @@ class Auth(object):
"You don't have permission to add ops level greater "
"than your own"
)
@defer.inlineCallbacks
def check_can_change_room_list(self, room_id, user):
"""Check if the user is allowed to edit the room's entry in the
published room list.
Args:
room_id (str)
user (UserID)
"""
is_admin = yield self.is_server_admin(user)
if is_admin:
defer.returnValue(True)
user_id = user.to_string()
yield self.check_joined_room(room_id, user_id)
# We currently require the user is a "moderator" in the room. We do this
# by checking if they would (theoretically) be able to change the
# m.room.aliases events
power_level_event = yield self.state.get_current_state(
room_id, EventTypes.PowerLevels, ""
)
auth_events = {}
if power_level_event:
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
send_level = self._get_send_level(
EventTypes.Aliases, "", auth_events
)
user_level = self._get_user_power_level(user_id, auth_events)
if user_level < send_level:
raise AuthError(
403,
"This server requires you to be a moderator in the room to"
" edit its room list entry"
)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -32,7 +32,6 @@ class PresenceState(object):
OFFLINE = u"offline"
UNAVAILABLE = u"unavailable"
ONLINE = u"online"
FREE_FOR_CHAT = u"free_for_chat"
class JoinRules(object):

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,6 +29,7 @@ class Codes(object):
USER_IN_USE = "M_USER_IN_USE"
ROOM_IN_USE = "M_ROOM_IN_USE"
BAD_PAGINATION = "M_BAD_PAGINATION"
BAD_STATE = "M_BAD_STATE"
UNKNOWN = "M_UNKNOWN"
NOT_FOUND = "M_NOT_FOUND"
MISSING_TOKEN = "M_MISSING_TOKEN"
@@ -42,6 +43,7 @@ class Codes(object):
EXCLUSIVE = "M_EXCLUSIVE"
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
THREEPID_IN_USE = "THREEPID_IN_USE"
INVALID_USERNAME = "M_INVALID_USERNAME"
class CodeMessageException(RuntimeError):

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,6 +15,8 @@
from synapse.api.errors import SynapseError
from synapse.types import UserID, RoomID
import ujson as json
class Filtering(object):
@@ -28,14 +30,14 @@ class Filtering(object):
return result
def add_user_filter(self, user_localpart, user_filter):
self._check_valid_filter(user_filter)
self.check_valid_filter(user_filter)
return self.store.add_user_filter(user_localpart, user_filter)
# TODO(paul): surely we should probably add a delete_user_filter or
# replace_user_filter at some point? There's no REST API specified for
# them however
def _check_valid_filter(self, user_filter_json):
def check_valid_filter(self, user_filter_json):
"""Check if the provided filter is valid.
This inspects all definitions contained within the filter.
@@ -50,11 +52,11 @@ class Filtering(object):
# many definitions.
top_level_definitions = [
"presence"
"presence", "account_data"
]
room_level_definitions = [
"state", "timeline", "ephemeral", "private_user_data"
"state", "timeline", "ephemeral", "account_data"
]
for key in top_level_definitions:
@@ -62,10 +64,29 @@ class Filtering(object):
self._check_definition(user_filter_json[key])
if "room" in user_filter_json:
self._check_definition_room_lists(user_filter_json["room"])
for key in room_level_definitions:
if key in user_filter_json["room"]:
self._check_definition(user_filter_json["room"][key])
def _check_definition_room_lists(self, definition):
"""Check that "rooms" and "not_rooms" are lists of room ids if they
are present
Args:
definition(dict): The filter definition
Raises:
SynapseError: If there was a problem with this definition.
"""
# check rooms are valid room IDs
room_id_keys = ["rooms", "not_rooms"]
for key in room_id_keys:
if key in definition:
if type(definition[key]) != list:
raise SynapseError(400, "Expected %s to be a list." % key)
for room_id in definition[key]:
RoomID.from_string(room_id)
def _check_definition(self, definition):
"""Check if the provided definition is valid.
@@ -85,14 +106,7 @@ class Filtering(object):
400, "Expected JSON object, not %s" % (definition,)
)
# check rooms are valid room IDs
room_id_keys = ["rooms", "not_rooms"]
for key in room_id_keys:
if key in definition:
if type(definition[key]) != list:
raise SynapseError(400, "Expected %s to be a list." % key)
for room_id in definition[key]:
RoomID.from_string(room_id)
self._check_definition_room_lists(definition)
# check senders are valid user IDs
user_id_keys = ["senders", "not_senders"]
@@ -117,51 +131,58 @@ class Filtering(object):
class FilterCollection(object):
def __init__(self, filter_json):
self.filter_json = filter_json
self._filter_json = filter_json
self.room_timeline_filter = Filter(
self.filter_json.get("room", {}).get("timeline", {})
room_filter_json = self._filter_json.get("room", {})
self._room_filter = Filter({
k: v for k, v in room_filter_json.items()
if k in ("rooms", "not_rooms")
})
self._room_timeline_filter = Filter(room_filter_json.get("timeline", {}))
self._room_state_filter = Filter(room_filter_json.get("state", {}))
self._room_ephemeral_filter = Filter(room_filter_json.get("ephemeral", {}))
self._room_account_data = Filter(room_filter_json.get("account_data", {}))
self._presence_filter = Filter(filter_json.get("presence", {}))
self._account_data = Filter(filter_json.get("account_data", {}))
self.include_leave = filter_json.get("room", {}).get(
"include_leave", False
)
self.room_state_filter = Filter(
self.filter_json.get("room", {}).get("state", {})
)
def __repr__(self):
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
self.room_ephemeral_filter = Filter(
self.filter_json.get("room", {}).get("ephemeral", {})
)
self.room_private_user_data = Filter(
self.filter_json.get("room", {}).get("private_user_data", {})
)
self.presence_filter = Filter(
self.filter_json.get("presence", {})
)
def get_filter_json(self):
return self._filter_json
def timeline_limit(self):
return self.room_timeline_filter.limit()
return self._room_timeline_filter.limit()
def presence_limit(self):
return self.presence_filter.limit()
return self._presence_filter.limit()
def ephemeral_limit(self):
return self.room_ephemeral_filter.limit()
return self._room_ephemeral_filter.limit()
def filter_presence(self, events):
return self.presence_filter.filter(events)
return self._presence_filter.filter(events)
def filter_account_data(self, events):
return self._account_data.filter(events)
def filter_room_state(self, events):
return self.room_state_filter.filter(events)
return self._room_state_filter.filter(self._room_filter.filter(events))
def filter_room_timeline(self, events):
return self.room_timeline_filter.filter(events)
return self._room_timeline_filter.filter(self._room_filter.filter(events))
def filter_room_ephemeral(self, events):
return self.room_ephemeral_filter.filter(events)
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
def filter_room_private_user_data(self, events):
return self.room_private_user_data.filter(events)
def filter_room_account_data(self, events):
return self._room_account_data.filter(self._room_filter.filter(events))
class Filter(object):
@@ -174,18 +195,19 @@ class Filter(object):
Returns:
bool: True if the event matches
"""
if isinstance(event, dict):
return self.check_fields(
event.get("room_id", None),
event.get("sender", None),
event.get("type", None),
)
else:
return self.check_fields(
getattr(event, "room_id", None),
getattr(event, "sender", None),
event.type,
)
sender = event.get("sender", None)
if not sender:
# Presence events have their 'sender' in content.user_id
content = event.get("content")
# account_data has been allowed to have non-dict content, so check type first
if isinstance(content, dict):
sender = content.get("user_id")
return self.check_fields(
event.get("room_id", None),
sender,
event.get("type", None),
)
def check_fields(self, room_id, sender, event_type):
"""Checks whether the filter matches the given event fields.
@@ -245,3 +267,6 @@ def _matches_wildcard(actual_value, filter_value):
return actual_value.startswith(type_prefix)
else:
return actual_value == filter_value
DEFAULT_FILTER_COLLECTION = FilterCollection({})

View File

@@ -1,4 +1,4 @@
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,5 +23,6 @@ WEB_CLIENT_PREFIX = "/_matrix/client"
CONTENT_REPO_PREFIX = "/_matrix/content"
SERVER_KEY_PREFIX = "/_matrix/key/v1"
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
MEDIA_PREFIX = "/_matrix/media/v1"
MEDIA_PREFIX = "/_matrix/media/r0"
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,3 +12,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.dont_write_bytecode = True
from synapse.python_dependencies import (
check_requirements, MissingRequirementError
) # NOQA
try:
check_requirements()
except MissingRequirementError as e:
message = "\n".join([
"Missing Requirement: %s" % (e.message,),
"To install run:",
" pip install --upgrade --force \"%s\"" % (e.dependency,),
"",
])
sys.stderr.writelines(message)
sys.exit(1)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,61 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.dont_write_bytecode = True
from synapse.python_dependencies import (
check_requirements, DEPENDENCY_LINKS, MissingRequirementError
)
if __name__ == '__main__':
try:
check_requirements()
except MissingRequirementError as e:
message = "\n".join([
"Missing Requirement: %s" % (e.message,),
"To install run:",
" pip install --upgrade --force \"%s\"" % (e.dependency,),
"",
])
sys.stderr.writelines(message)
sys.exit(1)
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
from synapse.storage import are_all_users_on_domain
from synapse.storage.prepare_database import UpgradeDatabaseException
from synapse.server import HomeServer
from twisted.internet import reactor, task, defer
from twisted.application import service
from twisted.enterprise import adbapi
from twisted.web.resource import Resource, EncodingResourceWrapper
from twisted.web.static import File
from twisted.web.server import Site, GzipEncoderFactory, Request
from synapse.http.server import JsonResource, RootRedirect
from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
from synapse.rest.key.v1.server_key_resource import LocalKey
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
from synapse.api.urls import (
CLIENT_PREFIX, FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
SERVER_KEY_PREFIX, MEDIA_PREFIX, CLIENT_V2_ALPHA_PREFIX, STATIC_PREFIX,
SERVER_KEY_V2_PREFIX,
)
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory
from synapse.util.logcontext import LoggingContext
from synapse.rest.client.v1 import ClientV1RestResource
from synapse.rest.client.v2_alpha import ClientV2AlphaRestResource
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse import events
from daemonize import Daemonize
import twisted.manhole.telnet
import synapse
import contextlib
@@ -77,93 +22,95 @@ import os
import re
import resource
import subprocess
import sys
import time
from synapse.config._base import ConfigError
from synapse.python_dependencies import (
check_requirements, DEPENDENCY_LINKS
)
from synapse.rest import ClientRestResource
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
from synapse.storage import are_all_users_on_domain
from synapse.storage.prepare_database import UpgradeDatabaseException
from synapse.server import HomeServer
from twisted.conch.manhole import ColoredManhole
from twisted.conch.insults import insults
from twisted.conch import manhole_ssh
from twisted.cred import checkers, portal
from twisted.internet import reactor, task, defer
from twisted.application import service
from twisted.web.resource import Resource, EncodingResourceWrapper
from twisted.web.static import File
from twisted.web.server import Site, GzipEncoderFactory, Request
from synapse.http.server import RootRedirect
from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
from synapse.rest.key.v1.server_key_resource import LocalKey
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.api.urls import (
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
SERVER_KEY_V2_PREFIX,
)
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory
from synapse.util.logcontext import LoggingContext
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
from synapse.federation.transport.server import TransportLayerServer
from synapse import events
from daemonize import Daemonize
logger = logging.getLogger("synapse.app.homeserver")
ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$')
def gz_wrap(r):
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
def build_resource_for_web_client(hs):
webclient_path = hs.get_config().web_client_location
if not webclient_path:
try:
import syweb
except ImportError:
quit_with_error(
"Could not find a webclient.\n\n"
"Please either install the matrix-angular-sdk or configure\n"
"the location of the source to serve via the configuration\n"
"option `web_client_location`\n\n"
"To install the `matrix-angular-sdk` via pip, run:\n\n"
" pip install '%(dep)s'\n"
"\n"
"You can also disable hosting of the webclient via the\n"
"configuration option `web_client`\n"
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
)
syweb_path = os.path.dirname(syweb.__file__)
webclient_path = os.path.join(syweb_path, "webclient")
# GZip is disabled here due to
# https://twistedmatrix.com/trac/ticket/7678
# (It can stay enabled for the API resources: they call
# write() with the whole body and then finish() straight
# after and so do not trigger the bug.
# GzipFile was removed in commit 184ba09
# return GzipFile(webclient_path) # TODO configurable?
return File(webclient_path) # TODO configurable?
class SynapseHomeServer(HomeServer):
def build_http_client(self):
return MatrixFederationHttpClient(self)
def build_resource_for_client(self):
return ClientV1RestResource(self)
def build_resource_for_client_v2_alpha(self):
return ClientV2AlphaRestResource(self)
def build_resource_for_federation(self):
return JsonResource(self)
def build_resource_for_web_client(self):
webclient_path = self.get_config().web_client_location
if not webclient_path:
try:
import syweb
except ImportError:
quit_with_error(
"Could not find a webclient.\n\n"
"Please either install the matrix-angular-sdk or configure\n"
"the location of the source to serve via the configuration\n"
"option `web_client_location`\n\n"
"To install the `matrix-angular-sdk` via pip, run:\n\n"
" pip install '%(dep)s'\n"
"\n"
"You can also disable hosting of the webclient via the\n"
"configuration option `web_client`\n"
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
)
syweb_path = os.path.dirname(syweb.__file__)
webclient_path = os.path.join(syweb_path, "webclient")
# GZip is disabled here due to
# https://twistedmatrix.com/trac/ticket/7678
# (It can stay enabled for the API resources: they call
# write() with the whole body and then finish() straight
# after and so do not trigger the bug.
# GzipFile was removed in commit 184ba09
# return GzipFile(webclient_path) # TODO configurable?
return File(webclient_path) # TODO configurable?
def build_resource_for_static_content(self):
# This is old and should go away: not going to bother adding gzip
return File(
os.path.join(os.path.dirname(synapse.__file__), "static")
)
def build_resource_for_content_repo(self):
return ContentRepoResource(
self, self.config.uploads_path, self.auth, self.content_addr
)
def build_resource_for_media_repository(self):
return MediaRepositoryResource(self)
def build_resource_for_server_key(self):
return LocalKey(self)
def build_resource_for_server_key_v2(self):
return KeyApiV2Resource(self)
def build_resource_for_metrics(self):
if self.get_config().enable_metrics:
return MetricsResource(self)
else:
return None
def build_db_pool(self):
name = self.db_config["name"]
return adbapi.ConnectionPool(
name,
**self.db_config.get("args", {})
)
def _listener_http(self, config, listener_config):
port = listener_config["port"]
bind_address = listener_config.get("bind_address", "")
@@ -173,51 +120,58 @@ class SynapseHomeServer(HomeServer):
if tls and config.no_tls:
return
metrics_resource = self.get_resource_for_metrics()
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "client":
client_resource = ClientRestResource(self)
if res["compress"]:
client_v1 = gz_wrap(self.get_resource_for_client())
client_v2 = gz_wrap(self.get_resource_for_client_v2_alpha())
else:
client_v1 = self.get_resource_for_client()
client_v2 = self.get_resource_for_client_v2_alpha()
client_resource = gz_wrap(client_resource)
resources.update({
CLIENT_PREFIX: client_v1,
CLIENT_V2_ALPHA_PREFIX: client_v2,
"/_matrix/client/api/v1": client_resource,
"/_matrix/client/r0": client_resource,
"/_matrix/client/unstable": client_resource,
"/_matrix/client/v2_alpha": client_resource,
"/_matrix/client/versions": client_resource,
})
if name == "federation":
resources.update({
FEDERATION_PREFIX: self.get_resource_for_federation(),
FEDERATION_PREFIX: TransportLayerServer(self),
})
if name in ["static", "client"]:
resources.update({
STATIC_PREFIX: self.get_resource_for_static_content(),
STATIC_PREFIX: File(
os.path.join(os.path.dirname(synapse.__file__), "static")
),
})
if name in ["media", "federation", "client"]:
media_repo = MediaRepositoryResource(self)
resources.update({
MEDIA_PREFIX: self.get_resource_for_media_repository(),
CONTENT_REPO_PREFIX: self.get_resource_for_content_repo(),
MEDIA_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
CONTENT_REPO_PREFIX: ContentRepoResource(
self, self.config.uploads_path, self.auth, self.content_addr
),
})
if name in ["keys", "federation"]:
resources.update({
SERVER_KEY_PREFIX: self.get_resource_for_server_key(),
SERVER_KEY_V2_PREFIX: self.get_resource_for_server_key_v2(),
SERVER_KEY_PREFIX: LocalKey(self),
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
})
if name == "webclient":
resources[WEB_CLIENT_PREFIX] = self.get_resource_for_web_client()
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
if name == "metrics" and metrics_resource:
resources[METRICS_PREFIX] = metrics_resource
if name == "metrics" and self.get_config().enable_metrics:
resources[METRICS_PREFIX] = MetricsResource(self)
if name == "replication":
resources[REPLICATION_PREFIX] = ReplicationResource(self)
root_resource = create_resource_tree(resources)
if tls:
@@ -252,10 +206,21 @@ class SynapseHomeServer(HomeServer):
if listener["type"] == "http":
self._listener_http(config, listener)
elif listener["type"] == "manhole":
f = twisted.manhole.telnet.ShellFactory()
f.username = "matrix"
f.password = "rabbithole"
f.namespace['hs'] = self
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(
matrix="rabbithole"
)
rlm = manhole_ssh.TerminalRealm()
rlm.chainedProtocolFactory = lambda: insults.ServerProtocol(
ColoredManhole,
{
"__name__": "__console__",
"hs": self,
}
)
f = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker]))
reactor.listenTCP(
listener["port"],
f,
@@ -280,6 +245,18 @@ class SynapseHomeServer(HomeServer):
except IncorrectDatabaseSetup as e:
quit_with_error(e.message)
def get_db_conn(self):
# Any param beginning with cp_ is a parameter for adbapi, and should
# not be passed to the database engine.
db_params = {
k: v for k, v in self.db_config.get("args", {}).items()
if not k.startswith("cp_")
}
db_conn = self.database_engine.module.connect(**db_params)
self.database_engine.on_new_connection(db_conn)
return db_conn
def quit_with_error(error_string):
message_lines = error_string.split("\n")
@@ -362,10 +339,13 @@ def change_resource_limit(soft_file_no):
soft_file_no = hard
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
logger.info("Set file limit to: %d", soft_file_no)
resource.setrlimit(
resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
)
except (ValueError, resource.error) as e:
logger.warn("Failed to set file limit: %s", e)
logger.warn("Failed to set file or core limit: %s", e)
def setup(config_options):
@@ -377,11 +357,20 @@ def setup(config_options):
Returns:
HomeServer
"""
config = HomeServerConfig.load_config(
"Synapse Homeserver",
config_options,
generate_section="Homeserver"
)
try:
config = HomeServerConfig.load_config(
"Synapse Homeserver",
config_options,
generate_section="Homeserver"
)
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
if not config:
# If a config isn't returned, and an exception isn't raised, we're just
# generating config files and shouldn't try to continue.
sys.exit(0)
config.setup_logging()
@@ -397,7 +386,7 @@ def setup(config_options):
tls_server_context_factory = context_factory.ServerContextFactory(config)
database_engine = create_engine(config.database_config["name"])
database_engine = create_engine(config)
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
hs = SynapseHomeServer(
@@ -413,13 +402,7 @@ def setup(config_options):
logger.info("Preparing database: %s...", config.database_config['name'])
try:
db_conn = database_engine.module.connect(
**{
k: v for k, v in config.database_config.get("args", {}).items()
if not k.startswith("cp_")
}
)
db_conn = hs.get_db_conn()
database_engine.prepare_database(db_conn)
hs.run_startup_checks(db_conn, database_engine)
@@ -434,13 +417,17 @@ def setup(config_options):
logger.info("Database prepared in %s.", config.database_config['name'])
hs.setup()
hs.start_listening()
hs.get_pusherpool().start()
hs.get_state_handler().start_caching()
hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates()
hs.get_replication_layer().start_get_pdu_cache()
def start():
hs.get_pusherpool().start()
hs.get_state_handler().start_caching()
hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates()
hs.get_replication_layer().start_get_pdu_cache()
reactor.callWhenRunning(start)
return hs
@@ -479,9 +466,8 @@ class SynapseRequest(Request):
)
def get_redacted_uri(self):
return re.sub(
r'(\?.*access_token=)[^&]*(.*)$',
r'\1<redacted>\2',
return ACCESS_TOKEN_RE.sub(
r'\1<redacted>\3',
self.uri
)
@@ -499,13 +485,28 @@ class SynapseRequest(Request):
self.start_time = int(time.time() * 1000)
def finished_processing(self):
try:
context = LoggingContext.current_context()
ru_utime, ru_stime = context.get_resource_usage()
db_txn_count = context.db_txn_count
db_txn_duration = context.db_txn_duration
except:
ru_utime, ru_stime = (0, 0)
db_txn_count, db_txn_duration = (0, 0)
self.site.access_logger.info(
"%s - %s - {%s}"
" Processed request: %dms %sB %s \"%s %s %s\" \"%s\"",
" Processed request: %dms (%dms, %dms) (%dms/%d)"
" %sB %s \"%s %s %s\" \"%s\"",
self.getClientIP(),
self.site.site_tag,
self.authenticated_entity,
int(time.time() * 1000) - self.start_time,
int(ru_utime * 1000),
int(ru_stime * 1000),
int(db_txn_duration * 1000),
int(db_txn_count),
self.sentLength,
self.code,
self.method,
@@ -642,7 +643,7 @@ def _resource_id(resource, path_seg):
the mapping should looks like _resource_id(A,C) = B.
Args:
resource (Resource): The *parent* Resource
resource (Resource): The *parent* Resourceb
path_seg (str): The name of the child Resource to be attached.
Returns:
str: A unique string which can be a key to the child Resource.
@@ -677,6 +678,7 @@ def run(hs):
@defer.inlineCallbacks
def phone_stats_home():
logger.info("Gathering stats for reporting")
now = int(hs.get_clock().time())
uptime = int(now - start_time)
if uptime < 0:
@@ -688,8 +690,8 @@ def run(hs):
stats["uptime_seconds"] = uptime
stats["total_users"] = yield hs.get_datastore().count_all_users()
all_rooms = yield hs.get_datastore().get_rooms(False)
stats["total_room_count"] = len(all_rooms)
room_count = yield hs.get_datastore().get_room_count()
stats["total_room_count"] = room_count
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
daily_messages = yield hs.get_datastore().count_daily_messages()
@@ -707,9 +709,12 @@ def run(hs):
if hs.config.report_stats:
phone_home_task = task.LoopingCall(phone_stats_home)
logger.info("Scheduling stats reporting for 24 hour intervals")
phone_home_task.start(60 * 60 * 24, now=False)
def in_thread():
# Uncomment to enable tracing of log context changes.
# sys.settrace(logcontext_tracer)
with LoggingContext("run"):
change_resource_limit(hs.config.soft_file_limit)
reactor.run()
@@ -717,7 +722,7 @@ def run(hs):
if hs.config.daemonize:
if hs.config.print_pidfile:
print hs.config.pid_file
print (hs.config.pid_file)
daemon = Daemonize(
app="synapse-homeserver",

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,13 +29,13 @@ NORMAL = "\x1b[m"
def start(configfile):
print "Starting ...",
print ("Starting ...")
args = SYNAPSE
args.extend(["--daemonize", "-c", configfile])
try:
subprocess.check_call(args)
print GREEN + "started" + NORMAL
print (GREEN + "started" + NORMAL)
except subprocess.CalledProcessError as e:
print (
RED +
@@ -48,7 +48,7 @@ def stop(pidfile):
if os.path.exists(pidfile):
pid = int(open(pidfile).read())
os.kill(pid, signal.SIGTERM)
print GREEN + "stopped" + NORMAL
print (GREEN + "stopped" + NORMAL)
def main():

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@ class ApplicationServiceApi(SimpleHttpClient):
pushing.
"""
def __init__(self, hs):
def __init__(self, hs):
super(ApplicationServiceApi, self).__init__(hs)
self.clock = hs.get_clock()

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.config._base import ConfigError
if __name__ == "__main__":
import sys
@@ -21,9 +22,13 @@ if __name__ == "__main__":
if action == "read":
key = sys.argv[2]
config = HomeServerConfig.load_config("", sys.argv[3:])
try:
config = HomeServerConfig.load_config("", sys.argv[3:])
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
print getattr(config, key)
print (getattr(config, key))
sys.exit(0)
else:
sys.stderr.write("Unknown command %r\n" % (action,))

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,7 +17,6 @@ import argparse
import errno
import os
import yaml
import sys
from textwrap import dedent
@@ -105,7 +104,7 @@ class Config(object):
dir_path = cls.abspath(dir_path)
try:
os.makedirs(dir_path)
except OSError, e:
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(dir_path):
@@ -136,13 +135,20 @@ class Config(object):
results.append(getattr(cls, name)(self, *args, **kargs))
return results
def generate_config(self, config_dir_path, server_name, report_stats=None):
def generate_config(
self,
config_dir_path,
server_name,
is_generating_file,
report_stats=None,
):
default_config = "# vim:ft=yaml\n"
default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all(
"default_config",
config_dir_path=config_dir_path,
server_name=server_name,
is_generating_file=is_generating_file,
report_stats=report_stats,
))
@@ -244,8 +250,10 @@ class Config(object):
server_name = config_args.server_name
if not server_name:
print "Must specify a server_name to a generate config for."
sys.exit(1)
raise ConfigError(
"Must specify a server_name to a generate config for."
" Pass -H server.name."
)
if not os.path.exists(config_dir_path):
os.makedirs(config_dir_path)
with open(config_path, "wb") as config_file:
@@ -253,6 +261,7 @@ class Config(object):
config_dir_path=config_dir_path,
server_name=server_name,
report_stats=(config_args.report_stats == "yes"),
is_generating_file=True
)
obj.invoke_all("generate_files", config)
config_file.write(config_bytes)
@@ -266,7 +275,7 @@ class Config(object):
"If this server name is incorrect, you will need to"
" regenerate the SSL certificates"
)
sys.exit(0)
return
else:
print (
"Config file %r already exists. Generating any missing key"
@@ -302,25 +311,25 @@ class Config(object):
specified_config.update(yaml_config)
if "server_name" not in specified_config:
sys.stderr.write("\n" + MISSING_SERVER_NAME + "\n")
sys.exit(1)
raise ConfigError(MISSING_SERVER_NAME)
server_name = specified_config["server_name"]
_, config = obj.generate_config(
config_dir_path=config_dir_path,
server_name=server_name
server_name=server_name,
is_generating_file=False,
)
config.pop("log_config")
config.update(specified_config)
if "report_stats" not in config:
sys.stderr.write(
"\n" + MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
MISSING_REPORT_STATS_SPIEL + "\n")
sys.exit(1)
raise ConfigError(
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
MISSING_REPORT_STATS_SPIEL
)
if generate_keys:
obj.invoke_all("generate_files", config)
sys.exit(0)
return
obj.invoke_all("read_config", config)

40
synapse/config/api.py Normal file
View File

@@ -0,0 +1,40 @@
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
from synapse.api.constants import EventTypes
class ApiConfig(Config):
def read_config(self, config):
self.room_invite_state_types = config.get("room_invite_state_types", [
EventTypes.JoinRules,
EventTypes.CanonicalAlias,
EventTypes.RoomAvatar,
EventTypes.Name,
])
def default_config(cls, **kwargs):
return """\
## API Configuration ##
# A list of event types that will be included in the room_invite_state
room_invite_state_types:
- "{JoinRules}"
- "{CanonicalAlias}"
- "{RoomAvatar}"
- "{Name}"
""".format(**vars(EventTypes))

View File

@@ -1,4 +1,4 @@
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,10 +29,10 @@ class CaptchaConfig(Config):
## Captcha ##
# This Home Server's ReCAPTCHA public key.
recaptcha_private_key: "YOUR_PRIVATE_KEY"
recaptcha_public_key: "YOUR_PUBLIC_KEY"
# This Home Server's ReCAPTCHA private key.
recaptcha_public_key: "YOUR_PUBLIC_KEY"
recaptcha_private_key: "YOUR_PRIVATE_KEY"
# Enables ReCaptcha checks when registering, preventing signup
# unless a captcha is answered. Requires a valid ReCaptcha

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,6 +23,7 @@ from .captcha import CaptchaConfig
from .voip import VoipConfig
from .registration import RegistrationConfig
from .metrics import MetricsConfig
from .api import ApiConfig
from .appservice import AppServiceConfig
from .key import KeyConfig
from .saml2 import SAML2Config
@@ -32,7 +33,7 @@ from .password import PasswordConfig
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
VoipConfig, RegistrationConfig, MetricsConfig,
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
PasswordConfig,):
pass

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,8 +22,14 @@ from signedjson.key import (
read_signing_keys, write_signing_keys, NACL_ED25519
)
from unpaddedbase64 import decode_base64
from synapse.util.stringutils import random_string_with_symbols
import os
import hashlib
import logging
logger = logging.getLogger(__name__)
class KeyConfig(Config):
@@ -40,9 +46,29 @@ class KeyConfig(Config):
config["perspectives"]
)
def default_config(self, config_dir_path, server_name, **kwargs):
self.macaroon_secret_key = config.get(
"macaroon_secret_key", self.registration_shared_secret
)
if not self.macaroon_secret_key:
# Unfortunately, there are people out there that don't have this
# set. Lets just be "nice" and derive one from their secret key.
logger.warn("Config is missing missing macaroon_secret_key")
seed = self.signing_key[0].seed
self.macaroon_secret_key = hashlib.sha256(seed)
def default_config(self, config_dir_path, server_name, is_generating_file=False,
**kwargs):
base_key_name = os.path.join(config_dir_path, server_name)
if is_generating_file:
macaroon_secret_key = random_string_with_symbols(50)
else:
macaroon_secret_key = None
return """\
macaroon_secret_key: "%(macaroon_secret_key)s"
## Signing Keys ##
# Path to the signing key to sign messages with

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,22 +23,27 @@ from distutils.util import strtobool
class RegistrationConfig(Config):
def read_config(self, config):
self.disable_registration = not bool(
self.enable_registration = bool(
strtobool(str(config["enable_registration"]))
)
if "disable_registration" in config:
self.disable_registration = bool(
self.enable_registration = not bool(
strtobool(str(config["disable_registration"]))
)
self.registration_shared_secret = config.get("registration_shared_secret")
self.macaroon_secret_key = config.get("macaroon_secret_key")
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
self.allow_guest_access = config.get("allow_guest_access", False)
self.invite_3pid_guest = (
self.allow_guest_access and config.get("invite_3pid_guest", False)
)
def default_config(self, **kwargs):
registration_shared_secret = random_string_with_symbols(50)
macaroon_secret_key = random_string_with_symbols(50)
return """\
## Registration ##
@@ -49,8 +54,6 @@ class RegistrationConfig(Config):
# secret, even if registration is otherwise disabled.
registration_shared_secret: "%(registration_shared_secret)s"
macaroon_secret_key: "%(macaroon_secret_key)s"
# Set the number of bcrypt rounds used to generate password hash.
# Larger numbers increase the work factor needed to generate the hash.
# The default number of rounds is 12.
@@ -60,6 +63,12 @@ class RegistrationConfig(Config):
# participate in rooms hosted on this server which have been made
# accessible to anonymous users.
allow_guest_access: False
# The list of identity servers trusted to verify third party
# identifiers by this server.
trusted_third_party_id_servers:
- matrix.org
- vector.im
""" % locals()
def add_arguments(self, parser):
@@ -71,6 +80,6 @@ class RegistrationConfig(Config):
def read_arguments(self, args):
if args.enable_registration is not None:
self.disable_registration = not bool(
self.enable_registration = bool(
strtobool(str(args.enable_registration))
)

View File

@@ -97,4 +97,7 @@ class ContentRepositoryConfig(Config):
- width: 640
height: 480
method: scale
- width: 800
height: 600
method: scale
""" % locals()

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -133,6 +133,7 @@ class ServerConfig(Config):
# The domain name of the server, with optional explicit port.
# This is used by remote servers to connect to this server,
# e.g. matrix.org, localhost:8080, etc.
# This is also the last part of your UserID.
server_name: "%(server_name)s"
# When running as a daemon, the file to store the pid in
@@ -199,7 +200,7 @@ class ServerConfig(Config):
- names: [federation]
compress: false
# Turn on the twisted telnet manhole service on localhost on the given
# Turn on the twisted ssh manhole service on localhost on the given
# port.
# - port: 9000
# bind_address: 127.0.0.1

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -36,6 +36,7 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
factory = SynapseKeyClientFactory()
factory.path = path
factory.host = server_name
endpoint = matrix_federation_endpoint(
reactor, server_name, ssl_context_factory, timeout=30
)
@@ -81,6 +82,8 @@ class SynapseKeyClientProtocol(HTTPClient):
self.host = self.transport.getHost()
logger.debug("Connected to %s", self.host)
self.sendCommand(b"GET", self.path)
if self.host:
self.sendHeader(b"Host", self.host)
self.endHeaders()
self.timer = reactor.callLater(
self.timeout,

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,6 +18,10 @@ from synapse.api.errors import SynapseError, Codes
from synapse.util.retryutils import get_retry_limiter
from synapse.util import unwrapFirstError
from synapse.util.async import ObservableDeferred
from synapse.util.logcontext import (
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
preserve_fn
)
from twisted.internet import defer
@@ -142,40 +146,43 @@ class Keyring(object):
for server_name, _ in server_and_json
}
# We want to wait for any previous lookups to complete before
# proceeding.
wait_on_deferred = self.wait_for_previous_lookups(
[server_name for server_name, _ in server_and_json],
server_to_deferred,
)
with PreserveLoggingContext():
# Actually start fetching keys.
wait_on_deferred.addBoth(
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
)
# We want to wait for any previous lookups to complete before
# proceeding.
wait_on_deferred = self.wait_for_previous_lookups(
[server_name for server_name, _ in server_and_json],
server_to_deferred,
)
# When we've finished fetching all the keys for a given server_name,
# resolve the deferred passed to `wait_for_previous_lookups` so that
# any lookups waiting will proceed.
server_to_gids = {}
# Actually start fetching keys.
wait_on_deferred.addBoth(
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
)
def remove_deferreds(res, server_name, group_id):
server_to_gids[server_name].discard(group_id)
if not server_to_gids[server_name]:
d = server_to_deferred.pop(server_name, None)
if d:
d.callback(None)
return res
# When we've finished fetching all the keys for a given server_name,
# resolve the deferred passed to `wait_for_previous_lookups` so that
# any lookups waiting will proceed.
server_to_gids = {}
for g_id, deferred in deferreds.items():
server_name = group_id_to_group[g_id].server_name
server_to_gids.setdefault(server_name, set()).add(g_id)
deferred.addBoth(remove_deferreds, server_name, g_id)
def remove_deferreds(res, server_name, group_id):
server_to_gids[server_name].discard(group_id)
if not server_to_gids[server_name]:
d = server_to_deferred.pop(server_name, None)
if d:
d.callback(None)
return res
for g_id, deferred in deferreds.items():
server_name = group_id_to_group[g_id].server_name
server_to_gids.setdefault(server_name, set()).add(g_id)
deferred.addBoth(remove_deferreds, server_name, g_id)
# Pass those keys to handle_key_deferred so that the json object
# signatures can be verified
return [
handle_key_deferred(
preserve_context_over_fn(
handle_key_deferred,
group_id_to_group[g_id],
deferreds[g_id],
)
@@ -198,12 +205,13 @@ class Keyring(object):
if server_name in self.key_downloads
]
if wait_on:
yield defer.DeferredList(wait_on)
with PreserveLoggingContext():
yield defer.DeferredList(wait_on)
else:
break
for server_name, deferred in server_to_deferred.items():
d = ObservableDeferred(deferred)
d = ObservableDeferred(preserve_context_over_deferred(deferred))
self.key_downloads[server_name] = d
def rm(r, server_name):
@@ -230,7 +238,9 @@ class Keyring(object):
missing_keys = {}
for group in group_id_to_group.values():
missing_keys.setdefault(group.server_name, set()).union(group.key_ids)
missing_keys.setdefault(group.server_name, set()).update(
group.key_ids
)
for fn in key_fetch_fns:
results = yield fn(missing_keys.items())
@@ -242,12 +252,13 @@ class Keyring(object):
for group in group_id_to_group.values():
for key_id in group.key_ids:
if key_id in merged_results[group.server_name]:
group_id_to_deferred[group.group_id].callback((
group.group_id,
group.server_name,
key_id,
merged_results[group.server_name][key_id],
))
with PreserveLoggingContext():
group_id_to_deferred[group.group_id].callback((
group.group_id,
group.server_name,
key_id,
merged_results[group.server_name][key_id],
))
break
else:
missing_groups.setdefault(
@@ -381,28 +392,24 @@ class Keyring(object):
def get_server_verify_key_v2_indirect(self, server_names_and_key_ids,
perspective_name,
perspective_keys):
limiter = yield get_retry_limiter(
perspective_name, self.clock, self.store
)
with limiter:
# TODO(mark): Set the minimum_valid_until_ts to that needed by
# the events being validated or the current time if validating
# an incoming request.
query_response = yield self.client.post_json(
destination=perspective_name,
path=b"/_matrix/key/v2/query",
data={
u"server_keys": {
server_name: {
key_id: {
u"minimum_valid_until_ts": 0
} for key_id in key_ids
}
for server_name, key_ids in server_names_and_key_ids
# TODO(mark): Set the minimum_valid_until_ts to that needed by
# the events being validated or the current time if validating
# an incoming request.
query_response = yield self.client.post_json(
destination=perspective_name,
path=b"/_matrix/key/v2/query",
data={
u"server_keys": {
server_name: {
key_id: {
u"minimum_valid_until_ts": 0
} for key_id in key_ids
}
},
)
for server_name, key_ids in server_names_and_key_ids
}
},
long_retries=True,
)
keys = {}
@@ -506,7 +513,7 @@ class Keyring(object):
yield defer.gatherResults(
[
self.store_keys(
preserve_fn(self.store_keys)(
server_name=key_server_name,
from_server=server_name,
verify_keys=verify_keys,
@@ -575,7 +582,7 @@ class Keyring(object):
yield defer.gatherResults(
[
self.store.store_server_keys_json(
preserve_fn(self.store.store_server_keys_json)(
server_name=server_name,
key_id=key_id,
from_server=server_name,
@@ -677,7 +684,7 @@ class Keyring(object):
# TODO(markjh): Store whether the keys have expired.
yield defer.gatherResults(
[
self.store.store_server_verify_key(
preserve_fn(self.store.store_server_verify_key)(
server_name, server_name, key.time_added, key
)
for key_id, key in verify_keys.items()

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,6 +14,7 @@
# limitations under the License.
from synapse.util.frozenutils import freeze
from synapse.util.caches import intern_dict
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
@@ -117,6 +118,15 @@ class EventBase(object):
def __set__(self, instance, value):
raise AttributeError("Unrecognized attribute %s" % (instance,))
def __getitem__(self, field):
return self._event_dict[field]
def __contains__(self, field):
return field in self._event_dict
def items(self):
return self._event_dict.items()
class FrozenEvent(EventBase):
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
@@ -131,6 +141,10 @@ class FrozenEvent(EventBase):
unsigned = dict(event_dict.pop("unsigned", {}))
# We intern these strings because they turn up a lot (especially when
# caching).
event_dict = intern_dict(event_dict)
if USE_FROZEN_DICTS:
frozen_dict = freeze(event_dict)
else:
@@ -159,5 +173,7 @@ class FrozenEvent(EventBase):
def __repr__(self):
return "<FrozenEvent event_id='%s', type='%s', state_key='%s'>" % (
self.event_id, self.type, self.get("state_key", None),
self.get("event_id", None),
self.get("type", None),
self.get("state_key", None),
)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,3 +20,4 @@ class EventContext(object):
self.current_state = current_state
self.state_group = None
self.rejected = False
self.push_actions = []

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -100,22 +100,20 @@ def format_event_raw(d):
def format_event_for_client_v1(d):
d["user_id"] = d.pop("sender", None)
d = format_event_for_client_v2(d)
move_keys = (
sender = d.get("sender")
if sender is not None:
d["user_id"] = sender
copy_keys = (
"age", "redacted_because", "replaces_state", "prev_content",
"invite_room_state",
)
for key in move_keys:
for key in copy_keys:
if key in d["unsigned"]:
d[key] = d["unsigned"][key]
drop_keys = (
"auth_events", "prev_events", "hashes", "signatures", "depth",
"unsigned", "origin", "prev_state"
)
for key in drop_keys:
d.pop(key, None)
return d
@@ -129,10 +127,9 @@ def format_event_for_client_v2(d):
return d
def format_event_for_client_v2_without_event_id(d):
def format_event_for_client_v2_without_room_id(d):
d = format_event_for_client_v2(d)
d.pop("room_id", None)
d.pop("event_id", None)
return d

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,15 +17,10 @@
"""
from .replication import ReplicationLayer
from .transport import TransportLayer
from .transport.client import TransportLayerClient
def initialize_http_replication(homeserver):
transport = TransportLayer(
homeserver,
homeserver.hostname,
server=homeserver.get_resource_for_federation(),
client=homeserver.get_http_client()
)
transport = TransportLayerClient(homeserver)
return ReplicationLayer(homeserver, transport)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -57,7 +57,7 @@ class FederationClient(FederationBase):
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
expiry_ms=120*1000,
expiry_ms=120 * 1000,
reset_expiry_on_get=False,
)
@@ -114,7 +114,7 @@ class FederationClient(FederationBase):
@log_function
def make_query(self, destination, query_type, args,
retry_on_dns_fail=True):
retry_on_dns_fail=False):
"""Sends a federation Query to a remote homeserver of the given type
and arguments.
@@ -418,6 +418,7 @@ class FederationClient(FederationBase):
"Failed to make_%s via %s: %s",
membership, destination, e.message
)
raise
raise RuntimeError("Failed to send to any server.")

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -126,10 +126,8 @@ class FederationServer(FederationBase):
results = []
for pdu in pdu_list:
d = self._handle_new_pdu(transaction.origin, pdu)
try:
yield d
yield self._handle_new_pdu(transaction.origin, pdu)
results.append({})
except FederationError as e:
self.send_failure(e, transaction.origin)
@@ -139,8 +137,8 @@ class FederationServer(FederationBase):
logger.exception("Failed to handle PDU")
if hasattr(transaction, "edus"):
for edu in [Edu(**x) for x in transaction.edus]:
self.received_edu(
for edu in (Edu(**x) for x in transaction.edus):
yield self.received_edu(
transaction.origin,
edu.edu_type,
edu.content
@@ -163,11 +161,17 @@ class FederationServer(FederationBase):
)
defer.returnValue((200, response))
@defer.inlineCallbacks
def received_edu(self, origin, edu_type, content):
received_edus_counter.inc()
if edu_type in self.edu_handlers:
self.edu_handlers[edu_type](origin, content)
try:
yield self.edu_handlers[edu_type](origin, content)
except SynapseError as e:
logger.info("Failed to handle edu %r: %r", edu_type, e)
except Exception as e:
logger.exception("Failed to handle edu %r", edu_type, e)
else:
logger.warn("Received EDU of type %s with no handler", edu_type)
@@ -527,7 +531,6 @@ class FederationServer(FederationBase):
yield self.handler.on_receive_pdu(
origin,
pdu,
backfilled=False,
state=state,
auth_chain=auth_chain,
)
@@ -545,8 +548,19 @@ class FederationServer(FederationBase):
return event
@defer.inlineCallbacks
def exchange_third_party_invite(self, invite):
ret = yield self.handler.exchange_third_party_invite(invite)
def exchange_third_party_invite(
self,
sender_user_id,
target_user_id,
room_id,
signed,
):
ret = yield self.handler.exchange_third_party_invite(
sender_user_id,
target_user_id,
room_id,
signed,
)
defer.returnValue(ret)
@defer.inlineCallbacks

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -54,8 +54,6 @@ class ReplicationLayer(FederationClient, FederationServer):
self.keyring = hs.get_keyring()
self.transport_layer = transport_layer
self.transport_layer.register_received_handler(self)
self.transport_layer.register_request_handler(self)
self.federation_client = self

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -103,7 +103,6 @@ class TransactionQueue(object):
else:
return not destination.startswith("localhost")
@defer.inlineCallbacks
def enqueue_pdu(self, pdu, destinations, order):
# We loop through all destinations to see whether we already have
# a transaction in progress. If we do, stick it in the pending_pdus
@@ -141,8 +140,6 @@ class TransactionQueue(object):
deferreds.append(deferred)
yield defer.DeferredList(deferreds, consumeErrors=True)
# NO inlineCallbacks
def enqueue_edu(self, edu):
destination = edu.destination

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,55 +20,3 @@ By default this is done over HTTPS (and all home servers are required to
support HTTPS), however individual pairings of servers may decide to
communicate over a different (albeit still reliable) protocol.
"""
from .server import TransportLayerServer
from .client import TransportLayerClient
from synapse.util.ratelimitutils import FederationRateLimiter
class TransportLayer(TransportLayerServer, TransportLayerClient):
"""This is a basic implementation of the transport layer that translates
transactions and other requests to/from HTTP.
Attributes:
server_name (str): Local home server host
server (synapse.http.server.HttpServer): the http server to
register listeners on
client (synapse.http.client.HttpClient): the http client used to
send requests
request_handler (TransportRequestHandler): The handler to fire when we
receive requests for data.
received_handler (TransportReceivedHandler): The handler to fire when
we receive data.
"""
def __init__(self, homeserver, server_name, server, client):
"""
Args:
server_name (str): Local home server host
server (synapse.protocol.http.HttpServer): the http server to
register listeners on
client (synapse.protocol.http.HttpClient): the http client used to
send requests
"""
self.keyring = homeserver.get_keyring()
self.clock = homeserver.get_clock()
self.server_name = server_name
self.server = server
self.client = client
self.request_handler = None
self.received_handler = None
self.ratelimiter = FederationRateLimiter(
self.clock,
window_size=homeserver.config.federation_rc_window_size,
sleep_limit=homeserver.config.federation_rc_sleep_limit,
sleep_msec=homeserver.config.federation_rc_sleep_delay,
reject_limit=homeserver.config.federation_rc_reject_limit,
concurrent_requests=homeserver.config.federation_rc_concurrent,
)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,6 +28,10 @@ logger = logging.getLogger(__name__)
class TransportLayerClient(object):
"""Sends federation HTTP requests to other servers"""
def __init__(self, hs):
self.server_name = hs.hostname
self.client = hs.get_http_client()
@log_function
def get_room_state(self, destination, room_id, event_id):
""" Requests all state for a given room from the given server at the
@@ -156,6 +160,7 @@ class TransportLayerClient(object):
path=path,
args=args,
retry_on_dns_fail=retry_on_dns_fail,
timeout=10000,
)
defer.returnValue(content)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,7 +17,9 @@ from twisted.internet import defer
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
from synapse.api.errors import Codes, SynapseError
from synapse.util.logutils import log_function
from synapse.http.server import JsonResource
from synapse.http.servlet import parse_json_object_from_request
from synapse.util.ratelimitutils import FederationRateLimiter
import functools
import logging
@@ -28,9 +30,41 @@ import re
logger = logging.getLogger(__name__)
class TransportLayerServer(object):
class TransportLayerServer(JsonResource):
"""Handles incoming federation HTTP requests"""
def __init__(self, hs):
self.hs = hs
self.clock = hs.get_clock()
super(TransportLayerServer, self).__init__(hs)
self.authenticator = Authenticator(hs)
self.ratelimiter = FederationRateLimiter(
self.clock,
window_size=hs.config.federation_rc_window_size,
sleep_limit=hs.config.federation_rc_sleep_limit,
sleep_msec=hs.config.federation_rc_sleep_delay,
reject_limit=hs.config.federation_rc_reject_limit,
concurrent_requests=hs.config.federation_rc_concurrent,
)
self.register_servlets()
def register_servlets(self):
register_servlets(
self.hs,
resource=self,
ratelimiter=self.ratelimiter,
authenticator=self.authenticator,
)
class Authenticator(object):
def __init__(self, hs):
self.keyring = hs.get_keyring()
self.server_name = hs.hostname
# A method just so we can pass 'self' as the authenticator to the Servlets
@defer.inlineCallbacks
def authenticate_request(self, request):
@@ -98,37 +132,9 @@ class TransportLayerServer(object):
defer.returnValue((origin, content))
@log_function
def register_received_handler(self, handler):
""" Register a handler that will be fired when we receive data.
Args:
handler (TransportReceivedHandler)
"""
FederationSendServlet(
handler,
authenticator=self,
ratelimiter=self.ratelimiter,
server_name=self.server_name,
).register(self.server)
@log_function
def register_request_handler(self, handler):
""" Register a handler that will be fired when we get asked for data.
Args:
handler (TransportRequestHandler)
"""
for servletclass in SERVLET_CLASSES:
servletclass(
handler,
authenticator=self,
ratelimiter=self.ratelimiter,
).register(self.server)
class BaseFederationServlet(object):
def __init__(self, handler, authenticator, ratelimiter):
def __init__(self, handler, authenticator, ratelimiter, server_name):
self.handler = handler
self.authenticator = authenticator
self.ratelimiter = ratelimiter
@@ -165,14 +171,16 @@ class BaseFederationServlet(object):
if code is None:
continue
server.register_path(method, pattern, self._wrap(code))
server.register_paths(method, (pattern,), self._wrap(code))
class FederationSendServlet(BaseFederationServlet):
PATH = "/send/([^/]*)/"
PATH = "/send/(?P<transaction_id>[^/]*)/"
def __init__(self, handler, server_name, **kwargs):
super(FederationSendServlet, self).__init__(handler, **kwargs)
super(FederationSendServlet, self).__init__(
handler, server_name=server_name, **kwargs
)
self.server_name = server_name
# This is when someone is trying to send us a bunch of data.
@@ -242,7 +250,7 @@ class FederationPullServlet(BaseFederationServlet):
class FederationEventServlet(BaseFederationServlet):
PATH = "/event/([^/]*)/"
PATH = "/event/(?P<event_id>[^/]*)/"
# This is when someone asks for a data item for a given server data_id pair.
def on_GET(self, origin, content, query, event_id):
@@ -250,7 +258,7 @@ class FederationEventServlet(BaseFederationServlet):
class FederationStateServlet(BaseFederationServlet):
PATH = "/state/([^/]*)/"
PATH = "/state/(?P<context>[^/]*)/"
# This is when someone asks for all data for a given context.
def on_GET(self, origin, content, query, context):
@@ -262,7 +270,7 @@ class FederationStateServlet(BaseFederationServlet):
class FederationBackfillServlet(BaseFederationServlet):
PATH = "/backfill/([^/]*)/"
PATH = "/backfill/(?P<context>[^/]*)/"
def on_GET(self, origin, content, query, context):
versions = query["v"]
@@ -277,7 +285,7 @@ class FederationBackfillServlet(BaseFederationServlet):
class FederationQueryServlet(BaseFederationServlet):
PATH = "/query/([^/]*)"
PATH = "/query/(?P<query_type>[^/]*)"
# This is when we receive a server-server Query
def on_GET(self, origin, content, query, query_type):
@@ -288,7 +296,7 @@ class FederationQueryServlet(BaseFederationServlet):
class FederationMakeJoinServlet(BaseFederationServlet):
PATH = "/make_join/([^/]*)/([^/]*)"
PATH = "/make_join/(?P<context>[^/]*)/(?P<user_id>[^/]*)"
@defer.inlineCallbacks
def on_GET(self, origin, content, query, context, user_id):
@@ -297,7 +305,7 @@ class FederationMakeJoinServlet(BaseFederationServlet):
class FederationMakeLeaveServlet(BaseFederationServlet):
PATH = "/make_leave/([^/]*)/([^/]*)"
PATH = "/make_leave/(?P<context>[^/]*)/(?P<user_id>[^/]*)"
@defer.inlineCallbacks
def on_GET(self, origin, content, query, context, user_id):
@@ -306,7 +314,7 @@ class FederationMakeLeaveServlet(BaseFederationServlet):
class FederationSendLeaveServlet(BaseFederationServlet):
PATH = "/send_leave/([^/]*)/([^/]*)"
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<txid>[^/]*)"
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, room_id, txid):
@@ -315,14 +323,14 @@ class FederationSendLeaveServlet(BaseFederationServlet):
class FederationEventAuthServlet(BaseFederationServlet):
PATH = "/event_auth/([^/]*)/([^/]*)"
PATH = "/event_auth(?P<context>[^/]*)/(?P<event_id>[^/]*)"
def on_GET(self, origin, content, query, context, event_id):
return self.handler.on_event_auth(origin, context, event_id)
class FederationSendJoinServlet(BaseFederationServlet):
PATH = "/send_join/([^/]*)/([^/]*)"
PATH = "/send_join/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, context, event_id):
@@ -333,7 +341,7 @@ class FederationSendJoinServlet(BaseFederationServlet):
class FederationInviteServlet(BaseFederationServlet):
PATH = "/invite/([^/]*)/([^/]*)"
PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, context, event_id):
@@ -344,7 +352,7 @@ class FederationInviteServlet(BaseFederationServlet):
class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
PATH = "/exchange_third_party_invite/([^/]*)"
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
@defer.inlineCallbacks
def on_PUT(self, origin, content, query, room_id):
@@ -373,7 +381,7 @@ class FederationClientKeysClaimServlet(BaseFederationServlet):
class FederationQueryAuthServlet(BaseFederationServlet):
PATH = "/query_auth/([^/]*)/([^/]*)"
PATH = "/query_auth/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, context, event_id):
@@ -386,7 +394,7 @@ class FederationQueryAuthServlet(BaseFederationServlet):
class FederationGetMissingEventsServlet(BaseFederationServlet):
# TODO(paul): Why does this path alone end with "/?" optional?
PATH = "/get_missing_events/([^/]*)/?"
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
@defer.inlineCallbacks
def on_POST(self, origin, content, query, room_id):
@@ -412,13 +420,22 @@ class On3pidBindServlet(BaseFederationServlet):
@defer.inlineCallbacks
def on_POST(self, request):
content_bytes = request.content.read()
content = json.loads(content_bytes)
content = parse_json_object_from_request(request)
if "invites" in content:
last_exception = None
for invite in content["invites"]:
try:
yield self.handler.exchange_third_party_invite(invite)
if "signed" not in invite or "token" not in invite["signed"]:
message = ("Rejecting received notification of third-"
"party invite without signed: %s" % (invite,))
logger.info(message)
raise SynapseError(400, message)
yield self.handler.exchange_third_party_invite(
invite["sender"],
invite["mxid"],
invite["room_id"],
invite["signed"],
)
except Exception as e:
last_exception = e
if last_exception:
@@ -432,6 +449,7 @@ class On3pidBindServlet(BaseFederationServlet):
SERVLET_CLASSES = (
FederationSendServlet,
FederationPullServlet,
FederationEventServlet,
FederationStateServlet,
@@ -451,3 +469,13 @@ SERVLET_CLASSES = (
FederationThirdPartyInviteExchangeServlet,
On3pidBindServlet,
)
def register_servlets(hs, resource, authenticator, ratelimiter):
for servletclass in SERVLET_CLASSES:
servletclass(
handler=hs.get_replication_layer(),
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
).register(resource)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,7 +18,8 @@ from twisted.internet import defer
from synapse.api.errors import LimitExceededError, SynapseError, AuthError
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.api.constants import Membership, EventTypes
from synapse.types import UserID, RoomAlias
from synapse.types import UserID, RoomAlias, Requester
from synapse.push.action_generator import ActionGenerator
from synapse.util.logcontext import PreserveLoggingContext
@@ -28,6 +29,14 @@ import logging
logger = logging.getLogger(__name__)
VISIBILITY_PRIORITY = (
"world_readable",
"shared",
"invited",
"joined",
)
class BaseHandler(object):
"""
Common base class for the event handlers.
@@ -52,88 +61,146 @@ class BaseHandler(object):
self.event_builder_factory = hs.get_event_builder_factory()
@defer.inlineCallbacks
def _filter_events_for_client(self, user_id, events, is_guest=False,
require_all_visible_for_guests=True):
# Assumes that user has at some point joined the room if not is_guest.
def filter_events_for_clients(self, user_tuples, events, event_id_to_state):
""" Returns dict of user_id -> list of events that user is allowed to
see.
def allowed(event, membership, visibility):
if visibility == "world_readable":
return True
if is_guest:
return False
if membership == Membership.JOIN:
return True
if event.type == EventTypes.RoomHistoryVisibility:
return not is_guest
if visibility == "shared":
return True
elif visibility == "joined":
return membership == Membership.JOIN
elif visibility == "invited":
return membership == Membership.INVITE
return True
event_id_to_state = yield self.store.get_state_for_events(
frozenset(e.event_id for e in events),
types=(
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.Member, user_id),
:param (str, bool) user_tuples: (user id, is_peeking) for each
user to be checked. is_peeking should be true if:
* the user is not currently a member of the room, and:
* the user has not been a member of the room since the given
events
"""
forgotten = yield defer.gatherResults([
self.store.who_forgot_in_room(
room_id,
)
for room_id in frozenset(e.room_id for e in events)
], consumeErrors=True)
# Set of membership event_ids that have been forgotten
event_id_forgotten = frozenset(
row["event_id"] for rows in forgotten for row in rows
)
events_to_return = []
for event in events:
def allowed(event, user_id, is_peeking):
state = event_id_to_state[event.event_id]
membership_event = state.get((EventTypes.Member, user_id), None)
if membership_event:
membership = membership_event.membership
else:
membership = None
# get the room_visibility at the time of the event.
visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
if visibility_event:
visibility = visibility_event.content.get("history_visibility", "shared")
else:
visibility = "shared"
should_include = allowed(event, membership, visibility)
if should_include:
events_to_return.append(event)
if visibility not in VISIBILITY_PRIORITY:
visibility = "shared"
if (require_all_visible_for_guests
and is_guest
and len(events_to_return) < len(events)):
# This indicates that some events in the requested range were not
# visible to guest users. To be safe, we reject the entire request,
# so that we don't have to worry about interpreting visibility
# boundaries.
raise AuthError(403, "User %s does not have permission" % (
user_id
))
# if it was world_readable, it's easy: everyone can read it
if visibility == "world_readable":
return True
defer.returnValue(events_to_return)
# Always allow history visibility events on boundaries. This is done
# by setting the effective visibility to the least restrictive
# of the old vs new.
if event.type == EventTypes.RoomHistoryVisibility:
prev_content = event.unsigned.get("prev_content", {})
prev_visibility = prev_content.get("history_visibility", None)
def ratelimit(self, user_id):
if prev_visibility not in VISIBILITY_PRIORITY:
prev_visibility = "shared"
new_priority = VISIBILITY_PRIORITY.index(visibility)
old_priority = VISIBILITY_PRIORITY.index(prev_visibility)
if old_priority < new_priority:
visibility = prev_visibility
# get the user's membership at the time of the event. (or rather,
# just *after* the event. Which means that people can see their
# own join events, but not (currently) their own leave events.)
membership_event = state.get((EventTypes.Member, user_id), None)
if membership_event:
if membership_event.event_id in event_id_forgotten:
membership = None
else:
membership = membership_event.membership
else:
membership = None
# if the user was a member of the room at the time of the event,
# they can see it.
if membership == Membership.JOIN:
return True
if visibility == "joined":
# we weren't a member at the time of the event, so we can't
# see this event.
return False
elif visibility == "invited":
# user can also see the event if they were *invited* at the time
# of the event.
return membership == Membership.INVITE
else:
# visibility is shared: user can also see the event if they have
# become a member since the event
#
# XXX: if the user has subsequently joined and then left again,
# ideally we would share history up to the point they left. But
# we don't know when they left.
return not is_peeking
defer.returnValue({
user_id: [
event
for event in events
if allowed(event, user_id, is_peeking)
]
for user_id, is_peeking in user_tuples
})
@defer.inlineCallbacks
def _filter_events_for_client(self, user_id, events, is_peeking=False):
"""
Check which events a user is allowed to see
:param str user_id: user id to be checked
:param [synapse.events.EventBase] events: list of events to be checked
:param bool is_peeking should be True if:
* the user is not currently a member of the room, and:
* the user has not been a member of the room since the given
events
:rtype [synapse.events.EventBase]
"""
types = (
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.Member, user_id),
)
event_id_to_state = yield self.store.get_state_for_events(
frozenset(e.event_id for e in events),
types=types
)
res = yield self.filter_events_for_clients(
[(user_id, is_peeking)], events, event_id_to_state
)
defer.returnValue(res.get(user_id, []))
def ratelimit(self, requester):
time_now = self.clock.time()
allowed, time_allowed = self.ratelimiter.send_message(
user_id, time_now,
requester.user.to_string(), time_now,
msg_rate_hz=self.hs.config.rc_messages_per_second,
burst_count=self.hs.config.rc_message_burst_count,
)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000*(time_allowed - time_now)),
retry_after_ms=int(1000 * (time_allowed - time_now)),
)
@defer.inlineCallbacks
def _create_new_client_event(self, builder):
latest_ret = yield self.store.get_latest_events_in_room(
latest_ret = yield self.store.get_latest_event_ids_and_hashes_in_room(
builder.room_id,
)
@@ -142,7 +209,10 @@ class BaseHandler(object):
else:
depth = 1
prev_events = [(e, h) for e, h, _ in latest_ret]
prev_events = [
(event_id, prev_hashes)
for event_id, prev_hashes, _ in latest_ret
]
builder.prev_events = prev_events
builder.depth = depth
@@ -151,6 +221,50 @@ class BaseHandler(object):
context = yield state_handler.compute_event_context(builder)
# If we've received an invite over federation, there are no latest
# events in the room, because we don't know enough about the graph
# fragment we received to treat it like a graph, so the above returned
# no relevant events. It may have returned some events (if we have
# joined and left the room), but not useful ones, like the invite.
if (
not self.is_host_in_room(context.current_state) and
builder.type == EventTypes.Member
):
prev_member_event = yield self.store.get_room_member(
builder.sender, builder.room_id
)
# The prev_member_event may already be in context.current_state,
# despite us not being present in the room; in particular, if
# inviting user, and all other local users, have already left.
#
# In that case, we have all the information we need, and we don't
# want to drop "context" - not least because we may need to handle
# the invite locally, which will require us to have the whole
# context (not just prev_member_event) to auth it.
#
context_event_ids = (
e.event_id for e in context.current_state.values()
)
if (
prev_member_event and
prev_member_event.event_id not in context_event_ids
):
# The prev_member_event is missing from context, so it must
# have arrived over federation and is an outlier. We forcibly
# set our context to the invite we received over federation
builder.prev_events = (
prev_member_event.event_id,
prev_member_event.prev_events
)
context = yield state_handler.compute_event_context(
builder,
old_state=(prev_member_event,),
outlier=True
)
if builder.is_state():
builder.prev_state = yield self.store.add_event_hashes(
context.prev_state_events
@@ -173,13 +287,41 @@ class BaseHandler(object):
(event, context,)
)
def is_host_in_room(self, current_state):
room_members = [
(state_key, event.membership)
for ((event_type, state_key), event) in current_state.items()
if event_type == EventTypes.Member
]
if len(room_members) == 0:
# Have we just created the room, and is this about to be the very
# first member event?
create_event = current_state.get(("m.room.create", ""))
if create_event:
return True
for (state_key, membership) in room_members:
if (
UserID.from_string(state_key).domain == self.hs.hostname
and membership == Membership.JOIN
):
return True
return False
@defer.inlineCallbacks
def handle_new_client_event(self, event, context, extra_destinations=[],
extra_users=[], suppress_auth=False):
def handle_new_client_event(
self,
requester,
event,
context,
ratelimit=True,
extra_users=[]
):
# We now need to go and hit out to wherever we need to hit out to.
if not suppress_auth:
self.auth.check(event, auth_events=context.current_state)
if ratelimit:
self.ratelimit(requester)
self.auth.check(event, auth_events=context.current_state)
yield self.maybe_kick_guest_users(event, context.current_state.values())
@@ -203,6 +345,12 @@ class BaseHandler(object):
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.INVITE:
def is_inviter_member_event(e):
return (
e.type == EventTypes.Member and
e.sender == event.sender
)
event.unsigned["invite_room_state"] = [
{
"type": e.type,
@@ -211,12 +359,8 @@ class BaseHandler(object):
"sender": e.sender,
}
for k, e in context.current_state.items()
if e.type in (
EventTypes.JoinRules,
EventTypes.CanonicalAlias,
EventTypes.RoomAvatar,
EventTypes.Name,
)
if e.type in self.hs.config.room_invite_state_types
or is_inviter_member_event(e)
]
invitee = UserID.from_string(event.state_key)
@@ -252,11 +396,22 @@ class BaseHandler(object):
"You don't have permission to redact events"
)
if event.type == EventTypes.Create and context.current_state:
raise AuthError(
403,
"Changing the room create event is forbidden",
)
action_generator = ActionGenerator(self.hs)
yield action_generator.handle_push_actions_for_event(
event, context, self
)
(event_stream_id, max_stream_id) = yield self.store.persist_event(
event, context=context
)
destinations = set(extra_destinations)
destinations = set()
for k, s in context.current_state.items():
try:
if k[0] == EventTypes.Member:
@@ -271,19 +426,11 @@ class BaseHandler(object):
with PreserveLoggingContext():
# Don't block waiting on waking up all the listeners.
notify_d = self.notifier.on_new_room_event(
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
notify_d.addErrback(log_failure)
# If invite, remove room_state from unsigned before sending.
event.unsigned.pop("invite_room_state", None)
@@ -307,7 +454,8 @@ class BaseHandler(object):
if member_event.type != EventTypes.Member:
continue
if not self.hs.is_mine(UserID.from_string(member_event.state_key)):
target_user = UserID.from_string(member_event.state_key)
if not self.hs.is_mine(target_user):
continue
if member_event.content["membership"] not in {
@@ -329,18 +477,13 @@ class BaseHandler(object):
# and having homeservers have their own users leave keeps more
# of that decision-making and control local to the guest-having
# homeserver.
message_handler = self.hs.get_handlers().message_handler
yield message_handler.create_and_send_event(
{
"type": EventTypes.Member,
"state_key": member_event.state_key,
"content": {
"membership": Membership.LEAVE,
"kind": "guest"
},
"room_id": member_event.room_id,
"sender": member_event.state_key
},
requester = Requester(target_user, "", True)
handler = self.hs.get_handlers().room_member_handler
yield handler.update_membership(
requester,
target_user,
member_event.room_id,
"leave",
ratelimit=False,
)
except Exception as e:

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,22 +16,23 @@
from twisted.internet import defer
class PrivateUserDataEventSource(object):
class AccountDataEventSource(object):
def __init__(self, hs):
self.store = hs.get_datastore()
def get_current_key(self, direction='f'):
return self.store.get_max_private_user_data_stream_id()
return self.store.get_max_account_data_stream_id()
@defer.inlineCallbacks
def get_new_events(self, user, from_key, **kwargs):
user_id = user.to_string()
last_stream_id = from_key
current_stream_id = yield self.store.get_max_private_user_data_stream_id()
tags = yield self.store.get_updated_tags(user_id, last_stream_id)
current_stream_id = yield self.store.get_max_account_data_stream_id()
results = []
tags = yield self.store.get_updated_tags(user_id, last_stream_id)
for room_id, room_tags in tags.items():
results.append({
"type": "m.tag",
@@ -39,6 +40,24 @@ class PrivateUserDataEventSource(object):
"room_id": room_id,
})
account_data, room_account_data = (
yield self.store.get_updated_account_data_for_user(user_id, last_stream_id)
)
for account_data_type, content in account_data.items():
results.append({
"type": account_data_type,
"content": content,
})
for room_id, account_data in room_account_data.items():
for account_data_type, content in account_data.items():
results.append({
"type": account_data_type,
"content": content,
"room_id": room_id,
})
defer.returnValue((results, current_stream_id))
@defer.inlineCallbacks

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -30,34 +30,27 @@ class AdminHandler(BaseHandler):
@defer.inlineCallbacks
def get_whois(self, user):
res = yield self.store.get_user_ip_and_agents(user)
connections = []
d = {}
for r in res:
# Note that device_id is always None
device = d.setdefault(r["device_id"], {})
session = device.setdefault(r["access_token"], [])
session.append({
"ip": r["ip"],
"user_agent": r["user_agent"],
"last_seen": r["last_seen"],
sessions = yield self.store.get_user_ip_and_agents(user)
for session in sessions:
connections.append({
"ip": session["ip"],
"last_seen": session["last_seen"],
"user_agent": session["user_agent"],
})
ret = {
"user_id": user.to_string(),
"devices": [
{
"device_id": k,
"devices": {
"": {
"sessions": [
{
# "access_token": x, TODO (erikj)
"connections": y,
"connections": connections,
}
for x, y in v.items()
]
}
for k, v in d.items()
],
},
},
}
defer.returnValue(ret)

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,6 +35,7 @@ logger = logging.getLogger(__name__)
class AuthHandler(BaseHandler):
SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000
def __init__(self, hs):
super(AuthHandler, self).__init__(hs)
@@ -66,15 +67,18 @@ class AuthHandler(BaseHandler):
'auth' key: this method prompts for auth if none is sent.
clientip (str): The IP address of the client.
Returns:
A tuple of (authed, dict, dict) where authed is true if the client
has successfully completed an auth flow. If it is true, the first
dict contains the authenticated credentials of each stage.
A tuple of (authed, dict, dict, session_id) where authed is true if
the client has successfully completed an auth flow. If it is true
the first dict contains the authenticated credentials of each stage.
If authed is false, the first dictionary is the server response to
the login request and should be passed back to the client.
In either case, the second dict contains the parameters for this
request (which may have been given only in a previous call).
session_id is the ID of this session, either passed in by the client
or assigned by the call to check_auth
"""
authdict = None
@@ -103,7 +107,10 @@ class AuthHandler(BaseHandler):
if not authdict:
defer.returnValue(
(False, self._auth_dict_for_flows(flows, session), clientdict)
(
False, self._auth_dict_for_flows(flows, session),
clientdict, session['id']
)
)
if 'creds' not in session:
@@ -122,12 +129,11 @@ class AuthHandler(BaseHandler):
for f in flows:
if len(set(f) - set(creds.keys())) == 0:
logger.info("Auth completed with creds: %r", creds)
self._remove_session(session)
defer.returnValue((True, creds, clientdict))
defer.returnValue((True, creds, clientdict, session['id']))
ret = self._auth_dict_for_flows(flows, session)
ret['completed'] = creds.keys()
defer.returnValue((False, ret, clientdict))
defer.returnValue((False, ret, clientdict, session['id']))
@defer.inlineCallbacks
def add_oob_auth(self, stagetype, authdict, clientip):
@@ -154,6 +160,43 @@ class AuthHandler(BaseHandler):
defer.returnValue(True)
defer.returnValue(False)
def get_session_id(self, clientdict):
"""
Gets the session ID for a client given the client dictionary
:param clientdict: The dictionary sent by the client in the request
:return: The string session ID the client sent. If the client did not
send a session ID, returns None.
"""
sid = None
if clientdict and 'auth' in clientdict:
authdict = clientdict['auth']
if 'session' in authdict:
sid = authdict['session']
return sid
def set_session_data(self, session_id, key, value):
"""
Store a key-value pair into the sessions data associated with this
request. This data is stored server-side and cannot be modified by
the client.
:param session_id: (string) The ID of this session as returned from check_auth
:param key: (string) The key to store the data under
:param value: (any) The data to store
"""
sess = self._get_session_info(session_id)
sess.setdefault('serverdict', {})[key] = value
self._save_session(sess)
def get_session_data(self, session_id, key, default=None):
"""
Retrieve data stored with set_session_data
:param session_id: (string) The ID of this session as returned from check_auth
:param key: (string) The key to store the data under
:param default: (any) Value to return if the key has not been set
"""
sess = self._get_session_info(session_id)
return sess.setdefault('serverdict', {}).get(key, default)
@defer.inlineCallbacks
def _check_password_auth(self, authdict, _):
if "user" not in authdict or "password" not in authdict:
@@ -407,8 +450,8 @@ class AuthHandler(BaseHandler):
try:
macaroon = pymacaroons.Macaroon.deserialize(login_token)
auth_api = self.hs.get_auth()
auth_api.validate_macaroon(macaroon, "login", [auth_api.verify_expiry])
return self._get_user_from_macaroon(macaroon)
auth_api.validate_macaroon(macaroon, "login", True)
return self.get_user_from_macaroon(macaroon)
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
raise AuthError(401, "Invalid token", errcode=Codes.UNKNOWN_TOKEN)
@@ -421,7 +464,7 @@ class AuthHandler(BaseHandler):
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
return macaroon
def _get_user_from_macaroon(self, macaroon):
def get_user_from_macaroon(self, macaroon):
user_prefix = "user_id = "
for caveat in macaroon.caveats:
if caveat.caveat_id.startswith(user_prefix):
@@ -432,13 +475,18 @@ class AuthHandler(BaseHandler):
)
@defer.inlineCallbacks
def set_password(self, user_id, newpassword):
def set_password(self, user_id, newpassword, requester=None):
password_hash = self.hash(newpassword)
except_access_token_ids = [requester.access_token_id] if requester else []
yield self.store.user_set_password_hash(user_id, password_hash)
yield self.store.user_delete_access_tokens(user_id)
yield self.hs.get_pusherpool().remove_pushers_by_user(user_id)
yield self.store.flush_user(user_id)
yield self.store.user_delete_access_tokens(
user_id, except_access_token_ids
)
yield self.hs.get_pusherpool().remove_pushers_by_user(
user_id, except_access_token_ids
)
@defer.inlineCallbacks
def add_threepid(self, user_id, medium, address, validated_at):
@@ -450,11 +498,18 @@ class AuthHandler(BaseHandler):
def _save_session(self, session):
# TODO: Persistent storage
logger.debug("Saving session %s", session)
session["last_used"] = self.hs.get_clock().time_msec()
self.sessions[session["id"]] = session
self._prune_sessions()
def _remove_session(self, session):
logger.debug("Removing session %s", session)
del self.sessions[session["id"]]
def _prune_sessions(self):
for sid, sess in self.sessions.items():
last_used = 0
if 'last_used' in sess:
last_used = sess['last_used']
now = self.hs.get_clock().time_msec()
if last_used < now - AuthHandler.SESSION_EXPIRE_MS:
del self.sessions[sid]
def hash(self, password):
"""Computes a secure hash of password.
@@ -477,4 +532,4 @@ class AuthHandler(BaseHandler):
Returns:
Whether self.hash(password) == stored_hash (bool).
"""
return bcrypt.checkpw(password, stored_hash)
return bcrypt.hashpw(password, stored_hash) == stored_hash

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,9 +17,9 @@
from twisted.internet import defer
from ._base import BaseHandler
from synapse.api.errors import SynapseError, Codes, CodeMessageException
from synapse.api.errors import SynapseError, Codes, CodeMessageException, AuthError
from synapse.api.constants import EventTypes
from synapse.types import RoomAlias
from synapse.types import RoomAlias, UserID
import logging
import string
@@ -32,13 +32,15 @@ class DirectoryHandler(BaseHandler):
def __init__(self, hs):
super(DirectoryHandler, self).__init__(hs)
self.state = hs.get_state_handler()
self.federation = hs.get_replication_layer()
self.federation.register_query_handler(
"directory", self.on_directory_query
)
@defer.inlineCallbacks
def _create_association(self, room_alias, room_id, servers=None):
def _create_association(self, room_alias, room_id, servers=None, creator=None):
# general association creation for both human users and app services
for wchar in string.whitespace:
@@ -60,7 +62,8 @@ class DirectoryHandler(BaseHandler):
yield self.store.create_room_alias_association(
room_alias,
room_id,
servers
servers,
creator=creator,
)
@defer.inlineCallbacks
@@ -77,7 +80,7 @@ class DirectoryHandler(BaseHandler):
400, "This alias is reserved by an application service.",
errcode=Codes.EXCLUSIVE
)
yield self._create_association(room_alias, room_id, servers)
yield self._create_association(room_alias, room_id, servers, creator=user_id)
@defer.inlineCallbacks
def create_appservice_association(self, service, room_alias, room_id,
@@ -92,10 +95,14 @@ class DirectoryHandler(BaseHandler):
yield self._create_association(room_alias, room_id, servers)
@defer.inlineCallbacks
def delete_association(self, user_id, room_alias):
def delete_association(self, requester, user_id, room_alias):
# association deletion for human users
# TODO Check if server admin
can_delete = yield self._user_can_delete_alias(room_alias, user_id)
if not can_delete:
raise AuthError(
403, "You don't have permission to delete the alias.",
)
can_delete = yield self.can_modify_alias(
room_alias,
@@ -107,7 +114,25 @@ class DirectoryHandler(BaseHandler):
errcode=Codes.EXCLUSIVE
)
yield self._delete_association(room_alias)
room_id = yield self._delete_association(room_alias)
try:
yield self.send_room_alias_update_event(
requester,
requester.user.to_string(),
room_id
)
yield self._update_canonical_alias(
requester,
requester.user.to_string(),
room_id,
room_alias,
)
except AuthError as e:
logger.info("Failed to update alias events: %s", e)
defer.returnValue(room_id)
@defer.inlineCallbacks
def delete_appservice_association(self, service, room_alias):
@@ -124,11 +149,9 @@ class DirectoryHandler(BaseHandler):
if not self.hs.is_mine(room_alias):
raise SynapseError(400, "Room alias must be local")
yield self.store.delete_room_alias(room_alias)
room_id = yield self.store.delete_room_alias(room_alias)
# TODO - Looks like _update_room_alias_event has never been implemented
# if room_id:
# yield self._update_room_alias_events(user_id, room_id)
defer.returnValue(room_id)
@defer.inlineCallbacks
def get_association(self, room_alias):
@@ -175,8 +198,8 @@ class DirectoryHandler(BaseHandler):
# If this server is in the list of servers, return it first.
if self.server_name in servers:
servers = (
[self.server_name]
+ [s for s in servers if s != self.server_name]
[self.server_name] +
[s for s in servers if s != self.server_name]
)
else:
servers = list(servers)
@@ -212,17 +235,44 @@ class DirectoryHandler(BaseHandler):
)
@defer.inlineCallbacks
def send_room_alias_update_event(self, user_id, room_id):
def send_room_alias_update_event(self, requester, user_id, room_id):
aliases = yield self.store.get_aliases_for_room(room_id)
msg_handler = self.hs.get_handlers().message_handler
yield msg_handler.create_and_send_event({
"type": EventTypes.Aliases,
"state_key": self.hs.hostname,
"room_id": room_id,
"sender": user_id,
"content": {"aliases": aliases},
}, ratelimit=False)
yield msg_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.Aliases,
"state_key": self.hs.hostname,
"room_id": room_id,
"sender": user_id,
"content": {"aliases": aliases},
},
ratelimit=False
)
@defer.inlineCallbacks
def _update_canonical_alias(self, requester, user_id, room_id, room_alias):
alias_event = yield self.state.get_current_state(
room_id, EventTypes.CanonicalAlias, ""
)
alias_str = room_alias.to_string()
if not alias_event or alias_event.content.get("alias", "") != alias_str:
return
msg_handler = self.hs.get_handlers().message_handler
yield msg_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.CanonicalAlias,
"state_key": "",
"room_id": room_id,
"sender": user_id,
"content": {},
},
ratelimit=False
)
@defer.inlineCallbacks
def get_association_from_room_alias(self, room_alias):
@@ -257,3 +307,35 @@ class DirectoryHandler(BaseHandler):
return
# either no interested services, or no service with an exclusive lock
defer.returnValue(True)
@defer.inlineCallbacks
def _user_can_delete_alias(self, alias, user_id):
creator = yield self.store.get_room_alias_creator(alias.to_string())
if creator and creator == user_id:
defer.returnValue(True)
is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
defer.returnValue(is_admin)
@defer.inlineCallbacks
def edit_published_room_list(self, requester, room_id, visibility):
"""Edit the entry of the room in the published room list.
requester
room_id (str)
visibility (str): "public" or "private"
"""
if requester.is_guest:
raise AuthError(403, "Guests cannot edit the published room list")
if visibility not in ["public", "private"]:
raise SynapseError(400, "Invalid visibility setting")
room = yield self.store.get_room(room_id)
if room is None:
raise SynapseError(400, "Unknown room")
yield self.auth.check_can_change_room_list(room_id, requester.user)
yield self.store.set_room_is_public(room_id, visibility == "public")

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,6 +18,8 @@ from twisted.internet import defer
from synapse.util.logutils import log_function
from synapse.types import UserID
from synapse.events.utils import serialize_event
from synapse.api.constants import Membership, EventTypes
from synapse.events import EventBase
from ._base import BaseHandler
@@ -46,90 +48,64 @@ class EventStreamHandler(BaseHandler):
self.notifier = hs.get_notifier()
@defer.inlineCallbacks
def started_stream(self, user):
"""Tells the presence handler that we have started an eventstream for
the user:
Args:
user (User): The user who started a stream.
Returns:
A deferred that completes once their presence has been updated.
"""
if user not in self._streams_per_user:
self._streams_per_user[user] = 0
if user in self._stop_timer_per_user:
try:
self.clock.cancel_call_later(
self._stop_timer_per_user.pop(user)
)
except:
logger.exception("Failed to cancel event timer")
else:
yield self.distributor.fire("started_user_eventstream", user)
self._streams_per_user[user] += 1
def stopped_stream(self, user):
"""If there are no streams for a user this starts a timer that will
notify the presence handler that we haven't got an event stream for
the user unless the user starts a new stream in 30 seconds.
Args:
user (User): The user who stopped a stream.
"""
self._streams_per_user[user] -= 1
if not self._streams_per_user[user]:
del self._streams_per_user[user]
# 30 seconds of grace to allow the client to reconnect again
# before we think they're gone
def _later():
logger.debug("_later stopped_user_eventstream %s", user)
self._stop_timer_per_user.pop(user, None)
return self.distributor.fire("stopped_user_eventstream", user)
logger.debug("Scheduling _later: for %s", user)
self._stop_timer_per_user[user] = (
self.clock.call_later(30, _later)
)
@defer.inlineCallbacks
@log_function
def get_stream(self, auth_user_id, pagin_config, timeout=0,
as_client_event=True, affect_presence=True,
only_room_events=False, room_id=None, is_guest=False):
only_keys=None, room_id=None, is_guest=False):
"""Fetches the events stream for a given user.
If `only_room_events` is `True` only room events will be returned.
If `only_keys` is not None, events from keys will be sent down.
"""
auth_user = UserID.from_string(auth_user_id)
presence_handler = self.hs.get_handlers().presence_handler
try:
if affect_presence:
yield self.started_stream(auth_user)
context = yield presence_handler.user_syncing(
auth_user_id, affect_presence=affect_presence,
)
with context:
if timeout:
# If they've set a timeout set a minimum limit.
timeout = max(timeout, 500)
# Add some randomness to this value to try and mitigate against
# thundering herds on restart.
timeout = random.randint(int(timeout*0.9), int(timeout*1.1))
if is_guest:
yield self.distributor.fire(
"user_joined_room", user=auth_user, room_id=room_id
)
timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1))
events, tokens = yield self.notifier.get_events_for(
auth_user, pagin_config, timeout,
only_room_events=only_room_events,
is_guest=is_guest, guest_room_id=room_id
only_keys=only_keys,
is_guest=is_guest, explicit_room_id=room_id
)
# When the user joins a new room, or another user joins a currently
# joined room, we need to send down presence for those users.
to_add = []
for event in events:
if not isinstance(event, EventBase):
continue
if event.type == EventTypes.Member:
if event.membership != Membership.JOIN:
continue
# Send down presence.
if event.state_key == auth_user_id:
# Send down presence for everyone in the room.
users = yield self.store.get_users_in_room(event.room_id)
states = yield presence_handler.get_states(
users,
as_event=True,
)
to_add.extend(states)
else:
ev = yield presence_handler.get_state(
UserID.from_string(event.state_key),
as_event=True,
)
to_add.append(ev)
events.extend(to_add)
time_now = self.clock.time_msec()
chunks = [
@@ -144,10 +120,6 @@ class EventStreamHandler(BaseHandler):
defer.returnValue(chunk)
finally:
if affect_presence:
self.stopped_stream(auth_user)
class EventHandler(BaseHandler):

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,6 +14,9 @@
# limitations under the License.
"""Contains handlers for federation events."""
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
from ._base import BaseHandler
@@ -36,6 +39,8 @@ from synapse.events.utils import prune_event
from synapse.util.retryutils import NotRetryingDestination
from synapse.push.action_generator import ActionGenerator
from twisted.internet import defer
import itertools
@@ -44,6 +49,10 @@ import logging
logger = logging.getLogger(__name__)
def user_joined_room(distributor, user, room_id):
return distributor.fire("user_joined_room", user, room_id)
class FederationHandler(BaseHandler):
"""Handles events that originated from federation.
Responsible for:
@@ -60,10 +69,7 @@ class FederationHandler(BaseHandler):
self.hs = hs
self.distributor.observe(
"user_joined_room",
self._on_user_joined
)
self.distributor.observe("user_joined_room", self.user_joined_room)
self.waiting_for_join_list = {}
@@ -96,7 +102,7 @@ class FederationHandler(BaseHandler):
@log_function
@defer.inlineCallbacks
def on_receive_pdu(self, origin, pdu, backfilled, state=None,
def on_receive_pdu(self, origin, pdu, state=None,
auth_chain=None):
""" Called by the ReplicationLayer when we have a new pdu. We need to
do auth checks and put it through the StateHandler.
@@ -117,7 +123,6 @@ class FederationHandler(BaseHandler):
# FIXME (erikj): Awful hack to make the case where we are not currently
# in the room work
current_state = None
is_in_room = yield self.auth.check_host_in_room(
event.room_id,
self.server_name
@@ -176,12 +181,10 @@ class FederationHandler(BaseHandler):
)
try:
_, event_stream_id, max_stream_id = yield self._handle_new_event(
context, event_stream_id, max_stream_id = yield self._handle_new_event(
origin,
event,
state=state,
backfilled=backfilled,
current_state=current_state,
)
except AuthError as e:
raise FederationError(
@@ -210,33 +213,27 @@ class FederationHandler(BaseHandler):
except StoreError:
logger.exception("Failed to store room.")
if not backfilled:
extra_users = []
if event.type == EventTypes.Member:
target_user_id = event.state_key
target_user = UserID.from_string(target_user_id)
extra_users.append(target_user)
extra_users = []
if event.type == EventTypes.Member:
target_user_id = event.state_key
target_user = UserID.from_string(target_user_id)
extra_users.append(target_user)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
with PreserveLoggingContext():
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=extra_users
)
if event.type == EventTypes.Member:
if event.membership == Membership.JOIN:
user = UserID.from_string(event.state_key)
yield self.distributor.fire(
"user_joined_room", user=user, room_id=event.room_id
)
prev_state = context.current_state.get((event.type, event.state_key))
if not prev_state or prev_state.membership != Membership.JOIN:
# Only fire user_joined_room if the user has acutally
# joined the room. Don't bother if the user is just
# changing their profile info.
user = UserID.from_string(event.state_key)
yield user_joined_room(self.distributor, user, event.room_id)
@defer.inlineCallbacks
def _filter_events_for_server(self, server_name, room_id, events):
@@ -471,7 +468,7 @@ class FederationHandler(BaseHandler):
limit=100,
extremities=[e for e in extremities.keys()]
)
except SynapseError:
except SynapseError as e:
logger.info(
"Failed to backfill from %s because %s",
dom, e,
@@ -592,7 +589,7 @@ class FederationHandler(BaseHandler):
handled_events = set()
try:
new_event = self._sign_event(event)
event = self._sign_event(event)
# Try the host we successfully got a response to /make_join/
# request first.
try:
@@ -600,7 +597,7 @@ class FederationHandler(BaseHandler):
target_hosts.insert(0, origin)
except ValueError:
pass
ret = yield self.replication_layer.send_join(target_hosts, new_event)
ret = yield self.replication_layer.send_join(target_hosts, event)
origin = ret["origin"]
state = ret["state"]
@@ -609,12 +606,12 @@ class FederationHandler(BaseHandler):
handled_events.update([s.event_id for s in state])
handled_events.update([a.event_id for a in auth_chain])
handled_events.add(new_event.event_id)
handled_events.add(event.event_id)
logger.debug("do_invite_join auth_chain: %s", auth_chain)
logger.debug("do_invite_join state: %s", state)
logger.debug("do_invite_join event: %s", new_event)
logger.debug("do_invite_join event: %s", event)
try:
yield self.store.store_room(
@@ -631,19 +628,11 @@ class FederationHandler(BaseHandler):
)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
new_event, event_stream_id, max_stream_id,
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=[joinee]
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
new_event.event_id, f.value
)
d.addErrback(log_failure)
logger.debug("Finished joining %s to %s", joinee, room_id)
finally:
room_queue = self.room_queues[room_id]
@@ -654,7 +643,7 @@ class FederationHandler(BaseHandler):
continue
try:
self.on_receive_pdu(origin, p, backfilled=False)
self.on_receive_pdu(origin, p)
except:
logger.exception("Couldn't handle pdu")
@@ -718,24 +707,14 @@ class FederationHandler(BaseHandler):
extra_users.append(target_user)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id, extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.JOIN:
user = UserID.from_string(event.state_key)
yield self.distributor.fire(
"user_joined_room", user=user, room_id=event.room_id
)
yield user_joined_room(self.distributor, user, event.room_id)
new_pdu = event
@@ -796,24 +775,15 @@ class FederationHandler(BaseHandler):
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
backfilled=False,
)
target_user = UserID.from_string(event.state_key)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=[target_user],
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
defer.returnValue(event)
@defer.inlineCallbacks
@@ -838,7 +808,21 @@ class FederationHandler(BaseHandler):
target_hosts,
signed_event
)
defer.returnValue(None)
context = yield self.state_handler.compute_event_context(event)
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
)
target_user = UserID.from_string(event.state_key)
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=[target_user],
)
defer.returnValue(event)
@defer.inlineCallbacks
def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
@@ -938,18 +922,10 @@ class FederationHandler(BaseHandler):
extra_users.append(target_user)
with PreserveLoggingContext():
d = self.notifier.on_new_room_event(
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id, extra_users=extra_users
)
def log_failure(f):
logger.warn(
"Failed to notify about %s: %s",
event.event_id, f.value
)
d.addErrback(log_failure)
new_pdu = event
destinations = set()
@@ -1082,7 +1058,7 @@ class FederationHandler(BaseHandler):
return self.store.get_min_depth(context)
@log_function
def _on_user_joined(self, user, room_id):
def user_joined_room(self, user, room_id):
waiters = self.waiting_for_join_list.get(
(user.to_string(), room_id),
[]
@@ -1092,8 +1068,7 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
@log_function
def _handle_new_event(self, origin, event, state=None, backfilled=False,
current_state=None, auth_events=None):
def _handle_new_event(self, origin, event, state=None, auth_events=None):
outlier = event.internal_metadata.is_outlier()
@@ -1103,12 +1078,16 @@ class FederationHandler(BaseHandler):
auth_events=auth_events,
)
if not event.internal_metadata.is_outlier():
action_generator = ActionGenerator(self.hs)
yield action_generator.handle_push_actions_for_event(
event, context, self
)
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
backfilled=backfilled,
is_new_state=(not outlier and not backfilled),
current_state=current_state,
is_new_state=not outlier,
)
defer.returnValue((context, event_stream_id, max_stream_id))
@@ -1176,7 +1155,13 @@ class FederationHandler(BaseHandler):
try:
self.auth.check(e, auth_events=auth_for_e)
except AuthError as err:
except SynapseError as err:
# we may get SynapseErrors here as well as AuthErrors. For
# instance, there are a couple of (ancient) events in some
# rooms whose senders do not have the correct sigil; these
# cause SynapseErrors in auth.check. We don't want to give up
# the attempt to federate altogether in such cases.
logger.warn(
"Rejecting %s because %s",
e.event_id, err.msg
@@ -1200,7 +1185,6 @@ class FederationHandler(BaseHandler):
event_stream_id, max_stream_id = yield self.store.persist_event(
event, new_event_context,
backfilled=False,
is_new_state=True,
current_state=state,
)
@@ -1644,31 +1628,43 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
@log_function
def exchange_third_party_invite(self, invite):
sender = invite["sender"]
room_id = invite["room_id"]
def exchange_third_party_invite(
self,
sender_user_id,
target_user_id,
room_id,
signed,
):
third_party_invite = {
"signed": signed,
}
event_dict = {
"type": EventTypes.Member,
"content": {
"membership": Membership.INVITE,
"third_party_invite": invite,
"third_party_invite": third_party_invite,
},
"room_id": room_id,
"sender": sender,
"state_key": invite["mxid"],
"sender": sender_user_id,
"state_key": target_user_id,
}
if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)):
builder = self.event_builder_factory.new(event_dict)
EventValidator().validate_new(builder)
event, context = yield self._create_new_client_event(builder=builder)
event, context = yield self.add_display_name_to_third_party_invite(
event_dict, event, context
)
self.auth.check(event, context.current_state)
yield self._validate_keyserver(event, auth_events=context.current_state)
yield self._check_signature(event, auth_events=context.current_state)
member_handler = self.hs.get_handlers().room_member_handler
yield member_handler.change_membership(event, context)
yield member_handler.send_membership_event(None, event, context)
else:
destinations = set([x.split(":", 1)[-1] for x in (sender, room_id)])
destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id))
yield self.replication_layer.forward_third_party_invite(
destinations,
room_id,
@@ -1684,27 +1680,104 @@ class FederationHandler(BaseHandler):
builder=builder,
)
event, context = yield self.add_display_name_to_third_party_invite(
event_dict, event, context
)
self.auth.check(event, auth_events=context.current_state)
yield self._validate_keyserver(event, auth_events=context.current_state)
yield self._check_signature(event, auth_events=context.current_state)
returned_invite = yield self.send_invite(origin, event)
# TODO: Make sure the signatures actually are correct.
event.signatures.update(returned_invite.signatures)
member_handler = self.hs.get_handlers().room_member_handler
yield member_handler.change_membership(event, context)
yield member_handler.send_membership_event(None, event, context)
@defer.inlineCallbacks
def _validate_keyserver(self, event, auth_events):
token = event.content["third_party_invite"]["signed"]["token"]
def add_display_name_to_third_party_invite(self, event_dict, event, context):
key = (
EventTypes.ThirdPartyInvite,
event.content["third_party_invite"]["signed"]["token"]
)
original_invite = context.current_state.get(key)
if not original_invite:
logger.info(
"Could not find invite event for third_party_invite - "
"discarding: %s" % (event_dict,)
)
return
display_name = original_invite.content["display_name"]
event_dict["content"]["third_party_invite"]["display_name"] = display_name
builder = self.event_builder_factory.new(event_dict)
EventValidator().validate_new(builder)
event, context = yield self._create_new_client_event(builder=builder)
defer.returnValue((event, context))
@defer.inlineCallbacks
def _check_signature(self, event, auth_events):
"""
Checks that the signature in the event is consistent with its invite.
:param event (Event): The m.room.member event to check
:param auth_events (dict<(event type, state_key), event>)
:raises
AuthError if signature didn't match any keys, or key has been
revoked,
SynapseError if a transient error meant a key couldn't be checked
for revocation.
"""
signed = event.content["third_party_invite"]["signed"]
token = signed["token"]
invite_event = auth_events.get(
(EventTypes.ThirdPartyInvite, token,)
)
if not invite_event:
raise AuthError(403, "Could not find invite")
last_exception = None
for public_key_object in self.hs.get_auth().get_public_keys(invite_event):
try:
for server, signature_block in signed["signatures"].items():
for key_name, encoded_signature in signature_block.items():
if not key_name.startswith("ed25519:"):
continue
public_key = public_key_object["public_key"]
verify_key = decode_verify_key_bytes(
key_name,
decode_base64(public_key)
)
verify_signed_json(signed, server, verify_key)
if "key_validity_url" in public_key_object:
yield self._check_key_revocation(
public_key,
public_key_object["key_validity_url"]
)
return
except Exception as e:
last_exception = e
raise last_exception
@defer.inlineCallbacks
def _check_key_revocation(self, public_key, url):
"""
Checks whether public_key has been revoked.
:param public_key (str): base-64 encoded public key.
:param url (str): Key revocation URL.
:raises
AuthError if they key has been revoked.
SynapseError if a transient error meant a key couldn't be checked
for revocation.
"""
try:
response = yield self.hs.get_simple_http_client().get_json(
invite_event.content["key_validity_url"],
{"public_key": invite_event.content["public_key"]}
url,
{"public_key": public_key}
)
except Exception:
raise SynapseError(

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,7 +20,6 @@ from synapse.api.errors import (
CodeMessageException
)
from ._base import BaseHandler
from synapse.http.client import SimpleHttpClient
from synapse.util.async import run_on_reactor
from synapse.api.errors import SynapseError
@@ -35,17 +34,17 @@ class IdentityHandler(BaseHandler):
def __init__(self, hs):
super(IdentityHandler, self).__init__(hs)
self.http_client = hs.get_simple_http_client()
self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers)
self.trust_any_id_server_just_for_testing_do_not_use = (
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
)
@defer.inlineCallbacks
def threepid_from_creds(self, creds):
yield run_on_reactor()
# TODO: get this from the homeserver rather than creating a new one for
# each request
http_client = SimpleHttpClient(self.hs)
# XXX: make this configurable!
# trustedIdServers = ['matrix.org', 'localhost:8090']
trustedIdServers = ['matrix.org', 'vector.im']
if 'id_server' in creds:
id_server = creds['id_server']
elif 'idServer' in creds:
@@ -60,14 +59,23 @@ class IdentityHandler(BaseHandler):
else:
raise SynapseError(400, "No client_secret in creds")
if id_server not in trustedIdServers:
logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
'credentials', id_server)
defer.returnValue(None)
if id_server not in self.trusted_id_servers:
if self.trust_any_id_server_just_for_testing_do_not_use:
logger.warn(
"Trusting untrustworthy ID server %r even though it isn't"
" in the trusted id list for testing because"
" 'use_insecure_ssl_client_just_for_testing_do_not_use'"
" is set in the config",
id_server,
)
else:
logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
'credentials', id_server)
defer.returnValue(None)
data = {}
try:
data = yield http_client.get_json(
data = yield self.http_client.get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/3pid/getValidated3pid"
@@ -85,7 +93,6 @@ class IdentityHandler(BaseHandler):
def bind_threepid(self, creds, mxid):
yield run_on_reactor()
logger.debug("binding threepid %r to %s", creds, mxid)
http_client = SimpleHttpClient(self.hs)
data = None
if 'id_server' in creds:
@@ -103,7 +110,7 @@ class IdentityHandler(BaseHandler):
raise SynapseError(400, "No client_secret in creds")
try:
data = yield http_client.post_urlencoded_get_json(
data = yield self.http_client.post_urlencoded_get_json(
"https://%s%s" % (
id_server, "/_matrix/identity/api/v1/3pid/bind"
),
@@ -121,7 +128,6 @@ class IdentityHandler(BaseHandler):
@defer.inlineCallbacks
def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
yield run_on_reactor()
http_client = SimpleHttpClient(self.hs)
params = {
'email': email,
@@ -131,7 +137,7 @@ class IdentityHandler(BaseHandler):
params.update(kwargs)
try:
data = yield http_client.post_urlencoded_get_json(
data = yield self.http_client.post_urlencoded_get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/validate/email/requestToken"

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,21 +16,27 @@
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError, AuthError, Codes
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.streams.config import PaginationConfig
from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator
from synapse.util import unwrapFirstError
from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.caches.snapshot_cache import SnapshotCache
from synapse.types import UserID, RoomStreamToken, StreamToken
from ._base import BaseHandler
from canonicaljson import encode_canonical_json
import logging
logger = logging.getLogger(__name__)
def collect_presencelike_data(distributor, user, content):
return distributor.fire("collect_presencelike_data", user, content)
class MessageHandler(BaseHandler):
def __init__(self, hs):
@@ -39,6 +45,7 @@ class MessageHandler(BaseHandler):
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.validator = EventValidator()
self.snapshot_cache = SnapshotCache()
@defer.inlineCallbacks
def get_message(self, msg_id=None, room_id=None, sender_id=None,
@@ -70,21 +77,20 @@ class MessageHandler(BaseHandler):
defer.returnValue(None)
@defer.inlineCallbacks
def get_messages(self, user_id=None, room_id=None, pagin_config=None,
as_client_event=True, is_guest=False):
def get_messages(self, requester, room_id=None, pagin_config=None,
as_client_event=True):
"""Get messages in a room.
Args:
user_id (str): The user requesting messages.
requester (Requester): The user requesting messages.
room_id (str): The room they want messages from.
pagin_config (synapse.api.streams.PaginationConfig): The pagination
config rules to apply, if any.
as_client_event (bool): True to get events in client-server format.
is_guest (bool): Whether the requesting user is a guest (as opposed
to a fully registered user).
Returns:
dict: Pagination API results
"""
user_id = requester.user.to_string()
data_source = self.hs.get_event_sources().sources["room"]
if pagin_config.from_token:
@@ -98,8 +104,6 @@ class MessageHandler(BaseHandler):
room_token = pagin_config.from_token.room_key
room_token = RoomStreamToken.parse(room_token)
if room_token.topological is None:
raise SynapseError(400, "Invalid token")
pagin_config.from_token = pagin_config.from_token.copy_and_replace(
"room_key", str(room_token)
@@ -107,36 +111,37 @@ class MessageHandler(BaseHandler):
source_config = pagin_config.get_source_config("room")
if not is_guest:
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
if member_event.membership == Membership.LEAVE:
# If they have left the room then clamp the token to be before
# they left the room.
# If they're a guest, we'll just 403 them if they're asking for
# events they can't see.
leave_token = yield self.store.get_topological_token_for_event(
member_event.event_id
)
leave_token = RoomStreamToken.parse(leave_token)
if leave_token.topological < room_token.topological:
source_config.from_key = str(leave_token)
if source_config.direction == "f":
if source_config.to_key is None:
source_config.to_key = str(leave_token)
else:
to_token = RoomStreamToken.parse(source_config.to_key)
if leave_token.topological < to_token.topological:
source_config.to_key = str(leave_token)
yield self.hs.get_handlers().federation_handler.maybe_backfill(
room_id, room_token.topological
membership, member_event_id = yield self._check_in_room_or_world_readable(
room_id, user_id
)
user = UserID.from_string(user_id)
if source_config.direction == 'b':
# if we're going backwards, we might need to backfill. This
# requires that we have a topo token.
if room_token.topological:
max_topo = room_token.topological
else:
max_topo = yield self.store.get_max_topological_token_for_stream_and_room(
room_id, room_token.stream
)
if membership == Membership.LEAVE:
# If they have left the room then clamp the token to be before
# they left the room, to save the effort of loading from the
# database.
leave_token = yield self.store.get_topological_token_for_event(
member_event_id
)
leave_token = RoomStreamToken.parse(leave_token)
if leave_token.topological < max_topo:
source_config.from_key = str(leave_token)
yield self.hs.get_handlers().federation_handler.maybe_backfill(
room_id, max_topo
)
events, next_key = yield data_source.get_pagination_rows(
user, source_config, room_id
requester.user, source_config, room_id
)
next_token = pagin_config.from_token.copy_and_replace(
@@ -150,7 +155,11 @@ class MessageHandler(BaseHandler):
"end": next_token.to_string(),
})
events = yield self._filter_events_for_client(user_id, events, is_guest=is_guest)
events = yield self._filter_events_for_client(
user_id,
events,
is_peeking=(member_event_id is None),
)
time_now = self.clock.time_msec()
@@ -166,40 +175,46 @@ class MessageHandler(BaseHandler):
defer.returnValue(chunk)
@defer.inlineCallbacks
def create_and_send_event(self, event_dict, ratelimit=True,
token_id=None, txn_id=None, is_guest=False):
""" Given a dict from a client, create and handle a new event.
def create_event(self, event_dict, token_id=None, txn_id=None):
"""
Given a dict from a client, create a new event.
Creates an FrozenEvent object, filling out auth_events, prev_events,
etc.
Adds display names to Join membership events.
Persists and notifies local clients and federation.
Args:
event_dict (dict): An entire event
Returns:
Tuple of created event (FrozenEvent), Context
"""
builder = self.event_builder_factory.new(event_dict)
self.validator.validate_new(builder)
if ratelimit:
self.ratelimit(builder.user_id)
# TODO(paul): Why does 'event' not have a 'user' object?
user = UserID.from_string(builder.user_id)
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
target = UserID.from_string(builder.state_key)
if membership == Membership.JOIN:
joinee = UserID.from_string(builder.state_key)
# If event doesn't include a display name, add one.
yield self.distributor.fire(
"collect_presencelike_data",
joinee,
builder.content
yield collect_presencelike_data(
self.distributor, target, builder.content
)
elif membership == Membership.INVITE:
profile = self.hs.get_handlers().profile_handler
content = builder.content
try:
content["displayname"] = yield profile.get_displayname(target)
content["avatar_url"] = yield profile.get_avatar_url(target)
except Exception as e:
logger.info(
"Failed to get profile information for %r: %s",
target, e
)
if token_id is not None:
builder.internal_metadata.token_id = token_id
@@ -210,21 +225,84 @@ class MessageHandler(BaseHandler):
event, context = yield self._create_new_client_event(
builder=builder,
)
defer.returnValue((event, context))
@defer.inlineCallbacks
def send_nonmember_event(self, requester, event, context, ratelimit=True):
"""
Persists and notifies local clients and federation of an event.
Args:
event (FrozenEvent) the event to send.
context (Context) the context of the event.
ratelimit (bool): Whether to rate limit this send.
is_guest (bool): Whether the sender is a guest.
"""
if event.type == EventTypes.Member:
member_handler = self.hs.get_handlers().room_member_handler
yield member_handler.change_membership(event, context, is_guest=is_guest)
else:
yield self.handle_new_client_event(
event=event,
context=context,
raise SynapseError(
500,
"Tried to send member event through non-member codepath"
)
user = UserID.from_string(event.sender)
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
if event.is_state():
prev_state = self.deduplicate_state_event(event, context)
if prev_state is not None:
defer.returnValue(prev_state)
yield self.handle_new_client_event(
requester=requester,
event=event,
context=context,
ratelimit=ratelimit,
)
if event.type == EventTypes.Message:
presence = self.hs.get_handlers().presence_handler
with PreserveLoggingContext():
presence.bump_presence_active_time(user)
yield presence.bump_presence_active_time(user)
def deduplicate_state_event(self, event, context):
"""
Checks whether event is in the latest resolved state in context.
If so, returns the version of the event in context.
Otherwise, returns None.
"""
prev_event = context.current_state.get((event.type, event.state_key))
if prev_event and event.user_id == prev_event.user_id:
prev_content = encode_canonical_json(prev_event.content)
next_content = encode_canonical_json(event.content)
if prev_content == next_content:
return prev_event
return None
@defer.inlineCallbacks
def create_and_send_nonmember_event(
self,
requester,
event_dict,
ratelimit=True,
txn_id=None
):
"""
Creates an event, then sends it.
See self.create_event and self.send_nonmember_event.
"""
event, context = yield self.create_event(
event_dict,
token_id=requester.access_token_id,
txn_id=txn_id
)
yield self.send_nonmember_event(
requester,
event,
context,
ratelimit=ratelimit,
)
defer.returnValue(event)
@defer.inlineCallbacks
@@ -240,7 +318,7 @@ class MessageHandler(BaseHandler):
SynapseError if something went wrong.
"""
membership, membership_event_id = yield self._check_in_room_or_world_readable(
room_id, user_id, is_guest
room_id, user_id
)
if membership == Membership.JOIN:
@@ -257,7 +335,7 @@ class MessageHandler(BaseHandler):
defer.returnValue(data)
@defer.inlineCallbacks
def _check_in_room_or_world_readable(self, room_id, user_id, is_guest):
def _check_in_room_or_world_readable(self, room_id, user_id):
try:
# check_user_was_in_room will return the most recent membership
# event for the user if:
@@ -267,7 +345,7 @@ class MessageHandler(BaseHandler):
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
defer.returnValue((member_event.membership, member_event.event_id))
return
except AuthError, auth_error:
except AuthError:
visibility = yield self.state_handler.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
)
@@ -277,8 +355,6 @@ class MessageHandler(BaseHandler):
):
defer.returnValue((Membership.JOIN, None))
return
if not is_guest:
raise auth_error
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
@@ -296,7 +372,7 @@ class MessageHandler(BaseHandler):
A list of dicts representing state events. [{}, {}, {}]
"""
membership, membership_event_id = yield self._check_in_room_or_world_readable(
room_id, user_id, is_guest
room_id, user_id
)
if membership == Membership.JOIN:
@@ -312,7 +388,6 @@ class MessageHandler(BaseHandler):
[serialize_event(c, now) for c in room_state.values()]
)
@defer.inlineCallbacks
def snapshot_all_rooms(self, user_id=None, pagin_config=None,
as_client_event=True, include_archived=False):
"""Retrieve a snapshot of all rooms the user is invited or has joined.
@@ -332,6 +407,28 @@ class MessageHandler(BaseHandler):
is joined on, may return a "messages" key with messages, depending
on the specified PaginationConfig.
"""
key = (
user_id,
pagin_config.from_token,
pagin_config.to_token,
pagin_config.direction,
pagin_config.limit,
as_client_event,
include_archived,
)
now_ms = self.clock.time_msec()
result = self.snapshot_cache.get(now_ms, key)
if result is not None:
return result
return self.snapshot_cache.set(now_ms, key, self._snapshot_all_rooms(
user_id, pagin_config, as_client_event, include_archived
))
@defer.inlineCallbacks
def _snapshot_all_rooms(self, user_id=None, pagin_config=None,
as_client_event=True, include_archived=False):
memberships = [Membership.INVITE, Membership.JOIN]
if include_archived:
memberships.append(Membership.LEAVE)
@@ -359,6 +456,10 @@ class MessageHandler(BaseHandler):
tags_by_room = yield self.store.get_tags_for_user(user_id)
account_data, account_data_by_room = (
yield self.store.get_account_data_for_user(user_id)
)
public_room_ids = yield self.store.get_public_room_ids()
limit = pagin_config.limit
@@ -436,14 +537,22 @@ class MessageHandler(BaseHandler):
for c in current_state.values()
]
private_user_data = []
account_data_events = []
tags = tags_by_room.get(event.room_id)
if tags:
private_user_data.append({
account_data_events.append({
"type": "m.tag",
"content": {"tags": tags},
})
d["private_user_data"] = private_user_data
account_data = account_data_by_room.get(event.room_id, {})
for account_data_type, content in account_data.items():
account_data_events.append({
"type": account_data_type,
"content": content,
})
d["account_data"] = account_data_events
except:
logger.exception("Failed to get snapshot")
@@ -456,9 +565,17 @@ class MessageHandler(BaseHandler):
consumeErrors=True
).addErrback(unwrapFirstError)
account_data_events = []
for account_data_type, content in account_data.items():
account_data_events.append({
"type": account_data_type,
"content": content,
})
ret = {
"rooms": rooms_ret,
"presence": presence,
"account_data": account_data_events,
"receipts": receipt,
"end": now_token.to_string(),
}
@@ -466,13 +583,13 @@ class MessageHandler(BaseHandler):
defer.returnValue(ret)
@defer.inlineCallbacks
def room_initial_sync(self, user_id, room_id, pagin_config=None, is_guest=False):
def room_initial_sync(self, requester, room_id, pagin_config=None):
"""Capture the a snapshot of a room. If user is currently a member of
the room this will be what is currently in the room. If the user left
the room this will be what was in the room when they left.
Args:
user_id(str): The user to get a snapshot for.
requester(Requester): The user to get a snapshot for.
room_id(str): The room to get a snapshot of.
pagin_config(synapse.streams.config.PaginationConfig):
The pagination config used to determine how many messages to
@@ -483,35 +600,44 @@ class MessageHandler(BaseHandler):
A JSON serialisable dict with the snapshot of the room.
"""
user_id = requester.user.to_string()
membership, member_event_id = yield self._check_in_room_or_world_readable(
room_id,
user_id,
is_guest
room_id, user_id,
)
is_peeking = member_event_id is None
if membership == Membership.JOIN:
result = yield self._room_initial_sync_joined(
user_id, room_id, pagin_config, membership, is_guest
user_id, room_id, pagin_config, membership, is_peeking
)
elif membership == Membership.LEAVE:
result = yield self._room_initial_sync_parted(
user_id, room_id, pagin_config, membership, member_event_id, is_guest
user_id, room_id, pagin_config, membership, member_event_id, is_peeking
)
private_user_data = []
account_data_events = []
tags = yield self.store.get_tags_for_room(user_id, room_id)
if tags:
private_user_data.append({
account_data_events.append({
"type": "m.tag",
"content": {"tags": tags},
})
result["private_user_data"] = private_user_data
account_data = yield self.store.get_account_data_for_room(user_id, room_id)
for account_data_type, content in account_data.items():
account_data_events.append({
"type": account_data_type,
"content": content,
})
result["account_data"] = account_data_events
defer.returnValue(result)
@defer.inlineCallbacks
def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
membership, member_event_id, is_guest):
membership, member_event_id, is_peeking):
room_state = yield self.store.get_state_for_events(
[member_event_id], None
)
@@ -533,11 +659,11 @@ class MessageHandler(BaseHandler):
)
messages = yield self._filter_events_for_client(
user_id, messages, is_guest=is_guest
user_id, messages, is_peeking=is_peeking
)
start_token = StreamToken(token[0], 0, 0, 0, 0)
end_token = StreamToken(token[1], 0, 0, 0, 0)
start_token = StreamToken.START.copy_and_replace("room_key", token[0])
end_token = StreamToken.START.copy_and_replace("room_key", token[1])
time_now = self.clock.time_msec()
@@ -556,15 +682,11 @@ class MessageHandler(BaseHandler):
@defer.inlineCallbacks
def _room_initial_sync_joined(self, user_id, room_id, pagin_config,
membership, is_guest):
membership, is_peeking):
current_state = yield self.state.get_current_state(
room_id=room_id,
)
# TODO(paul): I wish I was called with user objects not user_id
# strings...
auth_user = UserID.from_string(user_id)
# TODO: These concurrently
time_now = self.clock.time_msec()
state = [
@@ -588,23 +710,26 @@ class MessageHandler(BaseHandler):
@defer.inlineCallbacks
def get_presence():
states = {}
if not is_guest:
states = yield presence_handler.get_states(
target_users=[UserID.from_string(m.user_id) for m in room_members],
auth_user=auth_user,
as_event=True,
check_auth=False,
)
states = yield presence_handler.get_states(
[m.user_id for m in room_members],
as_event=True,
)
defer.returnValue(states.values())
defer.returnValue(states)
receipts_handler = self.hs.get_handlers().receipts_handler
@defer.inlineCallbacks
def get_receipts():
receipts_handler = self.hs.get_handlers().receipts_handler
receipts = yield receipts_handler.get_receipts_for_room(
room_id,
now_token.receipt_key
)
defer.returnValue(receipts)
presence, receipts, (messages, token) = yield defer.gatherResults(
[
get_presence(),
receipts_handler.get_receipts_for_room(room_id, now_token.receipt_key),
get_receipts(),
self.store.get_recent_events_for_room(
room_id,
limit=limit,
@@ -615,7 +740,7 @@ class MessageHandler(BaseHandler):
).addErrback(unwrapFirstError)
messages = yield self._filter_events_for_client(
user_id, messages, is_guest=is_guest, require_all_visible_for_guests=False
user_id, messages, is_peeking=is_peeking,
)
start_token = now_token.copy_and_replace("room_key", token[0])
@@ -634,7 +759,7 @@ class MessageHandler(BaseHandler):
"presence": presence,
"receipts": receipts,
}
if not is_guest:
if not is_peeking:
ret["membership"] = membership
defer.returnValue(ret)

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,8 +16,7 @@
from twisted.internet import defer
from synapse.api.errors import SynapseError, AuthError, CodeMessageException
from synapse.api.constants import EventTypes, Membership
from synapse.types import UserID
from synapse.types import UserID, Requester
from synapse.util import unwrapFirstError
from ._base import BaseHandler
@@ -28,6 +27,14 @@ import logging
logger = logging.getLogger(__name__)
def changed_presencelike_data(distributor, user, state):
return distributor.fire("changed_presencelike_data", user, state)
def collect_presencelike_data(distributor, user, content):
return distributor.fire("collect_presencelike_data", user, content)
class ProfileHandler(BaseHandler):
def __init__(self, hs):
@@ -41,6 +48,9 @@ class ProfileHandler(BaseHandler):
distributor = hs.get_distributor()
self.distributor = distributor
distributor.declare("collect_presencelike_data")
distributor.declare("changed_presencelike_data")
distributor.observe("registered_user", self.registered_user)
distributor.observe(
@@ -79,13 +89,13 @@ class ProfileHandler(BaseHandler):
defer.returnValue(result["displayname"])
@defer.inlineCallbacks
def set_displayname(self, target_user, auth_user, new_displayname):
def set_displayname(self, target_user, requester, new_displayname):
"""target_user is the user whose displayname is to be changed;
auth_user is the user attempting to make this change."""
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this Home Server")
if target_user != auth_user:
if target_user != requester.user:
raise AuthError(400, "Cannot set another user's displayname")
if new_displayname == '':
@@ -95,13 +105,11 @@ class ProfileHandler(BaseHandler):
target_user.localpart, new_displayname
)
yield self.distributor.fire(
"changed_presencelike_data", target_user, {
"displayname": new_displayname,
}
)
yield changed_presencelike_data(self.distributor, target_user, {
"displayname": new_displayname,
})
yield self._update_join_states(target_user)
yield self._update_join_states(requester)
@defer.inlineCallbacks
def get_avatar_url(self, target_user):
@@ -131,26 +139,24 @@ class ProfileHandler(BaseHandler):
defer.returnValue(result["avatar_url"])
@defer.inlineCallbacks
def set_avatar_url(self, target_user, auth_user, new_avatar_url):
def set_avatar_url(self, target_user, requester, new_avatar_url):
"""target_user is the user whose avatar_url is to be changed;
auth_user is the user attempting to make this change."""
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this Home Server")
if target_user != auth_user:
if target_user != requester.user:
raise AuthError(400, "Cannot set another user's avatar_url")
yield self.store.set_profile_avatar_url(
target_user.localpart, new_avatar_url
)
yield self.distributor.fire(
"changed_presencelike_data", target_user, {
"avatar_url": new_avatar_url,
}
)
yield changed_presencelike_data(self.distributor, target_user, {
"avatar_url": new_avatar_url,
})
yield self._update_join_states(target_user)
yield self._update_join_states(requester)
@defer.inlineCallbacks
def collect_presencelike_data(self, user, state):
@@ -193,34 +199,30 @@ class ProfileHandler(BaseHandler):
defer.returnValue(response)
@defer.inlineCallbacks
def _update_join_states(self, user):
def _update_join_states(self, requester):
user = requester.user
if not self.hs.is_mine(user):
return
self.ratelimit(user.to_string())
self.ratelimit(requester)
joins = yield self.store.get_rooms_for_user(
user.to_string(),
)
for j in joins:
content = {
"membership": Membership.JOIN,
}
yield self.distributor.fire(
"collect_presencelike_data", user, content
)
msg_handler = self.hs.get_handlers().message_handler
handler = self.hs.get_handlers().room_member_handler
try:
yield msg_handler.create_and_send_event({
"type": EventTypes.Member,
"room_id": j.room_id,
"state_key": user.to_string(),
"content": content,
"sender": user.to_string()
}, ratelimit=False)
# Assume the user isn't a guest because we don't let guests set
# profile or avatar data.
requester = Requester(user, "", False)
yield handler.update_membership(
requester,
user,
j.room_id,
"join", # We treat a profile update like a join.
ratelimit=False, # Try to hide that these events aren't atomic.
)
except Exception as e:
logger.warn(
"Failed to update join event for room %s - %s",

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -36,8 +36,6 @@ class ReceiptsHandler(BaseHandler):
)
self.clock = self.hs.get_clock()
self._receipt_cache = None
@defer.inlineCallbacks
def received_client_receipt(self, room_id, receipt_type, user_id,
event_id):

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,7 +21,6 @@ from synapse.api.errors import (
AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError
)
from ._base import BaseHandler
import synapse.util.stringutils as stringutils
from synapse.util.async import run_on_reactor
from synapse.http.client import CaptchaServerHttpClient
@@ -31,45 +30,79 @@ import urllib
logger = logging.getLogger(__name__)
def registered_user(distributor, user):
return distributor.fire("registered_user", user)
class RegistrationHandler(BaseHandler):
def __init__(self, hs):
super(RegistrationHandler, self).__init__(hs)
self.auth = hs.get_auth()
self.distributor = hs.get_distributor()
self.distributor.declare("registered_user")
self.captcha_client = CaptchaServerHttpClient(hs)
self._next_generated_user_id = None
@defer.inlineCallbacks
def check_username(self, localpart):
def check_username(self, localpart, guest_access_token=None,
assigned_user_id=None):
yield run_on_reactor()
if urllib.quote(localpart) != localpart:
if urllib.quote(localpart.encode('utf-8')) != localpart:
raise SynapseError(
400,
"User ID must only contain characters which do not"
" require URL encoding."
"User ID can only contain characters a-z, 0-9, or '_-./'",
Codes.INVALID_USERNAME
)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
if assigned_user_id:
if user_id == assigned_user_id:
return
else:
raise SynapseError(
400,
"A different user ID has already been registered for this session",
)
yield self.check_user_id_not_appservice_exclusive(user_id)
users = yield self.store.get_users_by_id_case_insensitive(user_id)
if users:
raise SynapseError(
400,
"User ID already taken.",
errcode=Codes.USER_IN_USE,
)
if not guest_access_token:
raise SynapseError(
400,
"User ID already taken.",
errcode=Codes.USER_IN_USE,
)
user_data = yield self.auth.get_user_from_macaroon(guest_access_token)
if not user_data["is_guest"] or user_data["user"].localpart != localpart:
raise AuthError(
403,
"Cannot register taken user ID without valid guest "
"credentials for that user.",
errcode=Codes.FORBIDDEN,
)
@defer.inlineCallbacks
def register(self, localpart=None, password=None, generate_token=True):
def register(
self,
localpart=None,
password=None,
generate_token=True,
guest_access_token=None,
make_guest=False
):
"""Registers a new client on the server.
Args:
localpart : The local part of the user ID to register. If None,
one will be randomly generated.
one will be generated.
password (str) : The password to assign to this user so they can
login again. This can be None which means they cannot login again
via a password (e.g. the user is an application service user).
@@ -84,7 +117,19 @@ class RegistrationHandler(BaseHandler):
password_hash = self.auth_handler().hash(password)
if localpart:
yield self.check_username(localpart)
yield self.check_username(localpart, guest_access_token=guest_access_token)
was_guest = guest_access_token is not None
if not was_guest:
try:
int(localpart)
raise RegistrationError(
400,
"Numeric user IDs are reserved for guest users."
)
except ValueError:
pass
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
@@ -95,57 +140,42 @@ class RegistrationHandler(BaseHandler):
yield self.store.register(
user_id=user_id,
token=token,
password_hash=password_hash
password_hash=password_hash,
was_guest=was_guest,
make_guest=make_guest,
)
yield self.distributor.fire("registered_user", user)
yield registered_user(self.distributor, user)
else:
# autogen a random user ID
# autogen a sequential user ID
attempts = 0
user_id = None
token = None
while not user_id:
user = None
while not user:
localpart = yield self._generate_user_id(attempts > 0)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_not_appservice_exclusive(user_id)
if generate_token:
token = self.auth_handler().generate_access_token(user_id)
try:
localpart = self._generate_user_id()
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
if generate_token:
token = self.auth_handler().generate_access_token(user_id)
yield self.store.register(
user_id=user_id,
token=token,
password_hash=password_hash)
self.distributor.fire("registered_user", user)
password_hash=password_hash,
make_guest=make_guest
)
except SynapseError:
# if user id is taken, just generate another
user = None
user_id = None
token = None
attempts += 1
if attempts > 5:
raise RegistrationError(
500, "Cannot generate user ID.")
yield registered_user(self.distributor, user)
# create a default avatar for the user
# XXX: ideally clients would explicitly specify one, but given they don't
# and we want consistent and pretty identicons for random users, we'll
# do it here.
try:
auth_user = UserID.from_string(user_id)
media_repository = self.hs.get_resource_for_media_repository()
identicon_resource = media_repository.getChildWithDefault("identicon", None)
upload_resource = media_repository.getChildWithDefault("upload", None)
identicon_bytes = identicon_resource.generate_identicon(user_id, 320, 320)
content_uri = yield upload_resource.create_content(
"image/png", None, identicon_bytes, len(identicon_bytes), auth_user
)
profile_handler = self.hs.get_handlers().profile_handler
profile_handler.set_avatar_url(
auth_user, auth_user, ("%s#auto" % (content_uri,))
)
except NotImplementedError:
pass # make tests pass without messing around creating default avatars
# We used to generate default identicons here, but nowadays
# we want clients to generate their own as part of their branding
# rather than there being consistent matrix-wide ones, so we don't.
defer.returnValue((user_id, token))
@@ -161,13 +191,21 @@ class RegistrationHandler(BaseHandler):
400, "Invalid user localpart for this application service.",
errcode=Codes.EXCLUSIVE
)
service_id = service.id if service.is_exclusive_user(user_id) else None
yield self.check_user_id_not_appservice_exclusive(
user_id, allowed_appservice=service
)
token = self.auth_handler().generate_access_token(user_id)
yield self.store.register(
user_id=user_id,
token=token,
password_hash=""
password_hash="",
appservice_id=service_id,
)
self.distributor.fire("registered_user", user)
yield registered_user(self.distributor, user)
defer.returnValue((user_id, token))
@defer.inlineCallbacks
@@ -203,11 +241,11 @@ class RegistrationHandler(BaseHandler):
400,
"User ID must only contain characters which do not"
" require URL encoding."
)
)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_is_valid(user_id)
yield self.check_user_id_not_appservice_exclusive(user_id)
token = self.auth_handler().generate_access_token(user_id)
try:
yield self.store.register(
@@ -215,8 +253,8 @@ class RegistrationHandler(BaseHandler):
token=token,
password_hash=None
)
yield self.distributor.fire("registered_user", user)
except Exception, e:
yield registered_user(self.distributor, user)
except Exception as e:
yield self.store.add_access_token_to_user(user_id, token)
# Ignore Registration errors
logger.exception(e)
@@ -259,12 +297,14 @@ class RegistrationHandler(BaseHandler):
yield identity_handler.bind_threepid(c, user_id)
@defer.inlineCallbacks
def check_user_id_is_valid(self, user_id):
def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
# valid user IDs must not clash with any user ID namespaces claimed by
# application services.
services = yield self.store.get_app_services()
interested_services = [
s for s in services if s.is_interested_in_user(user_id)
s for s in services
if s.is_interested_in_user(user_id)
and s != allowed_appservice
]
for service in interested_services:
if service.is_exclusive_user(user_id):
@@ -273,8 +313,16 @@ class RegistrationHandler(BaseHandler):
errcode=Codes.EXCLUSIVE
)
def _generate_user_id(self):
return "-" + stringutils.random_string(18)
@defer.inlineCallbacks
def _generate_user_id(self, reseed=False):
if reseed or self._next_generated_user_id is None:
self._next_generated_user_id = (
yield self.store.find_next_generated_user_id_localpart()
)
id = self._next_generated_user_id
self._next_generated_user_id += 1
defer.returnValue(str(id))
@defer.inlineCallbacks
def _validate_captcha(self, ip_addr, private_key, challenge, response):
@@ -302,10 +350,7 @@ class RegistrationHandler(BaseHandler):
"""
Used only by c/s api v1
"""
# TODO: get this from the homeserver rather than creating a new one for
# each request
client = CaptchaServerHttpClient(self.hs)
data = yield client.post_urlencoded_get_raw(
data = yield self.captcha_client.post_urlencoded_get_raw(
"http://www.google.com:80/recaptcha/api/verify",
args={
'privatekey': private_key,
@@ -318,3 +363,18 @@ class RegistrationHandler(BaseHandler):
def auth_handler(self):
return self.hs.get_handlers().auth_handler
@defer.inlineCallbacks
def guest_access_token_for(self, medium, address, inviter_user_id):
access_token = yield self.store.get_3pid_guest_access_token(medium, address)
if access_token:
defer.returnValue(access_token)
_, access_token = yield self.register(
generate_token=True,
make_guest=True
)
access_token = yield self.store.save_or_get_3pid_guest_access_token(
medium, address, access_token, inviter_user_id
)
defer.returnValue(access_token)

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,13 +17,14 @@ from twisted.internet import defer
from ._base import BaseHandler
from synapse.api.constants import Membership
from synapse.api.constants import Membership, EventTypes
from synapse.api.filtering import Filter
from synapse.api.errors import SynapseError
from synapse.events.utils import serialize_event
from unpaddedbase64 import decode_base64, encode_base64
import itertools
import logging
@@ -79,6 +80,9 @@ class SearchHandler(BaseHandler):
# What to order results by (impacts whether pagination can be doen)
order_by = room_cat.get("order_by", "rank")
# Return the current state of the rooms?
include_state = room_cat.get("include_state", False)
# Include context around each event?
event_context = room_cat.get(
"event_context", None
@@ -96,6 +100,10 @@ class SearchHandler(BaseHandler):
after_limit = int(event_context.get(
"after_limit", 5
))
# Return the historic display name and avatar for the senders
# of the events?
include_profile = bool(event_context.get("include_profile", False))
except KeyError:
raise SynapseError(400, "Invalid search query")
@@ -123,6 +131,17 @@ class SearchHandler(BaseHandler):
if batch_group == "room_id":
room_ids.intersection_update({batch_group_key})
if not room_ids:
defer.returnValue({
"search_categories": {
"room_events": {
"results": [],
"count": 0,
"highlights": [],
}
}
})
rank_map = {} # event_id -> rank of event
allowed_events = []
room_groups = {} # Holds result of grouping by room, if applicable
@@ -131,11 +150,22 @@ class SearchHandler(BaseHandler):
# Holds the next_batch for the entire result set if one of those exists
global_next_batch = None
highlights = set()
count = None
if order_by == "rank":
results = yield self.store.search_msgs(
search_result = yield self.store.search_msgs(
room_ids, search_term, keys
)
count = search_result["count"]
if search_result["highlights"]:
highlights.update(search_result["highlights"])
results = search_result["results"]
results_map = {r["event"].event_id: r for r in results}
rank_map.update({r["event"].event_id: r["rank"] for r in results})
@@ -163,80 +193,78 @@ class SearchHandler(BaseHandler):
s["results"].append(e.event_id)
elif order_by == "recent":
# In this case we specifically loop through each room as the given
# limit applies to each room, rather than a global list.
# This is not necessarilly a good idea.
for room_id in room_ids:
room_events = []
if batch_group == "room_id" and batch_group_key == room_id:
pagination_token = batch_token
else:
room_events = []
i = 0
pagination_token = batch_token
# We keep looping and we keep filtering until we reach the limit
# or we run out of things.
# But only go around 5 times since otherwise synapse will be sad.
while len(room_events) < search_filter.limit() and i < 5:
i += 1
search_result = yield self.store.search_rooms(
room_ids, search_term, keys, search_filter.limit() * 2,
pagination_token=pagination_token,
)
if search_result["highlights"]:
highlights.update(search_result["highlights"])
count = search_result["count"]
results = search_result["results"]
results_map = {r["event"].event_id: r for r in results}
rank_map.update({r["event"].event_id: r["rank"] for r in results})
filtered_events = search_filter.filter([
r["event"] for r in results
])
events = yield self._filter_events_for_client(
user.to_string(), filtered_events
)
room_events.extend(events)
room_events = room_events[:search_filter.limit()]
if len(results) < search_filter.limit() * 2:
pagination_token = None
i = 0
# We keep looping and we keep filtering until we reach the limit
# or we run out of things.
# But only go around 5 times since otherwise synapse will be sad.
while len(room_events) < search_filter.limit() and i < 5:
i += 1
results = yield self.store.search_room(
room_id, search_term, keys, search_filter.limit() * 2,
pagination_token=pagination_token,
)
results_map = {r["event"].event_id: r for r in results}
rank_map.update({r["event"].event_id: r["rank"] for r in results})
filtered_events = search_filter.filter([
r["event"] for r in results
])
events = yield self._filter_events_for_client(
user.to_string(), filtered_events
)
room_events.extend(events)
room_events = room_events[:search_filter.limit()]
if len(results) < search_filter.limit() * 2:
pagination_token = None
break
else:
pagination_token = results[-1]["pagination_token"]
if room_events:
res = results_map[room_events[-1].event_id]
pagination_token = res["pagination_token"]
group = room_groups.setdefault(room_id, {})
if pagination_token:
next_batch = encode_base64("%s\n%s\n%s" % (
"room_id", room_id, pagination_token
))
group["next_batch"] = next_batch
if batch_token:
global_next_batch = next_batch
group["results"] = [e.event_id for e in room_events]
group["order"] = max(
e.origin_server_ts/1000 for e in room_events
if hasattr(e, "origin_server_ts")
)
allowed_events.extend(room_events)
# Normalize the group orders
if room_groups:
if len(room_groups) > 1:
mx = max(g["order"] for g in room_groups.values())
mn = min(g["order"] for g in room_groups.values())
for g in room_groups.values():
g["order"] = (g["order"] - mn) * 1.0 / (mx - mn)
break
else:
room_groups.values()[0]["order"] = 1
pagination_token = results[-1]["pagination_token"]
for event in room_events:
group = room_groups.setdefault(event.room_id, {
"results": [],
})
group["results"].append(event.event_id)
if room_events and len(room_events) >= search_filter.limit():
last_event_id = room_events[-1].event_id
pagination_token = results_map[last_event_id]["pagination_token"]
# We want to respect the given batch group and group keys so
# that if people blindly use the top level `next_batch` token
# it returns more from the same group (if applicable) rather
# than reverting to searching all results again.
if batch_group and batch_group_key:
global_next_batch = encode_base64("%s\n%s\n%s" % (
batch_group, batch_group_key, pagination_token
))
else:
global_next_batch = encode_base64("%s\n%s\n%s" % (
"all", "", pagination_token
))
for room_id, group in room_groups.items():
group["next_batch"] = encode_base64("%s\n%s\n%s" % (
"room_id", room_id, pagination_token
))
allowed_events.extend(room_events)
else:
# We should never get here due to the guard earlier.
@@ -269,6 +297,33 @@ class SearchHandler(BaseHandler):
"room_key", res["end"]
).to_string()
if include_profile:
senders = set(
ev.sender
for ev in itertools.chain(
res["events_before"], [event], res["events_after"]
)
)
if res["events_after"]:
last_event_id = res["events_after"][-1].event_id
else:
last_event_id = event.event_id
state = yield self.store.get_state_for_event(
last_event_id,
types=[(EventTypes.Member, sender) for sender in senders]
)
res["profile_info"] = {
s.state_key: {
"displayname": s.content.get("displayname", None),
"avatar_url": s.content.get("avatar_url", None),
}
for s in state.values()
if s.type == EventTypes.Member and s.state_key in senders
}
contexts[event.event_id] = res
else:
contexts = {}
@@ -287,22 +342,39 @@ class SearchHandler(BaseHandler):
for e in context["events_after"]
]
results = {
e.event_id: {
state_results = {}
if include_state:
rooms = set(e.room_id for e in allowed_events)
for room_id in rooms:
state = yield self.state_handler.get_current_state(room_id)
state_results[room_id] = state.values()
state_results.values()
# We're now about to serialize the events. We should not make any
# blocking calls after this. Otherwise the 'age' will be wrong
results = [
{
"rank": rank_map[e.event_id],
"result": serialize_event(e, time_now),
"context": contexts.get(e.event_id, {}),
}
for e in allowed_events
}
logger.info("Found %d results", len(results))
]
rooms_cat_res = {
"results": results,
"count": len(results)
"count": count,
"highlights": list(highlights),
}
if state_results:
rooms_cat_res["state"] = {
room_id: [serialize_event(e, time_now) for e in state]
for room_id, state in state_results.items()
}
if room_groups and "room_id" in group_keys:
rooms_cat_res.setdefault("groups", {})["room_id"] = room_groups

Some files were not shown because too many files have changed in this diff Show More