1
0

Compare commits

...

111 Commits

Author SHA1 Message Date
Erik Johnston 86090eadb0 Don't send outlier events to ASes 2018-04-06 10:21:17 +01:00
Erik Johnston edbeed06ca Fix MRO for replication stores 2018-04-06 10:20:50 +01:00
Erik Johnston 277d2c506d Add cache for if ASes have users in a room 2018-04-05 17:28:27 +01:00
Jan Christian Grünhage 7d0f712348 Merge pull request #3063 from matrix-org/jcgruenhage/cache_settings_stats
phone home cache size configurations
2018-04-04 17:24:40 +01:00
Jan Christian Grünhage e4570c53dd phone home cache size configurations 2018-04-04 16:46:58 +01:00
Richard van der Hoff 9cd3f06ab7 Merge pull request #3062 from matrix-org/revert-3053-speedup-mxid-check
Revert "improve mxid check performance"
2018-04-04 12:09:17 +01:00
Richard van der Hoff f92963f5db Revert "improve mxid check performance" 2018-04-04 12:08:29 +01:00
Richard van der Hoff 725a72ec5a Merge pull request #3000 from NotAFile/change-except-style
Replace old style error catching with 'as' keyword
2018-04-04 10:45:22 +01:00
Richard van der Hoff a89f9f830c Merge pull request #3044 from matrix-org/michaelk/performance_stats
Add basic performance statistics to phone home
2018-04-04 10:37:25 +01:00
Richard van der Hoff 39ce38b024 Merge pull request #3053 from NotAFile/speedup-mxid-check
improve mxid check performance
2018-04-04 10:19:52 +01:00
Richard van der Hoff 8da39ad98f Merge pull request #3049 from matrix-org/rav/use_staticjson
Use static JSONEncoders
2018-04-03 15:18:32 +01:00
Richard van der Hoff 3ee4ad09eb Fix json encoding bug in replication
json encoders have an encode method, not a dumps method.
2018-04-03 15:09:48 +01:00
Richard van der Hoff 0ca5c4d2af Merge pull request #3048 from matrix-org/rav/use_simplejson
Use simplejson throughout
2018-04-03 14:39:57 +01:00
Adrian Tschira 11597ddea5 improve mxid check performance ~4x
Signed-off-by: Adrian Tschira <nota@notafile.com>
2018-03-31 02:00:22 +02:00
Richard van der Hoff 05630758f2 Use static JSONEncoders
using json.dumps with custom options requires us to create a new JSONEncoder on
each call. It's more efficient to create one upfront and reuse it.
2018-03-29 23:13:33 +01:00
Richard van der Hoff fcfe7f6ad3 Use simplejson throughout
Let's use simplejson rather than json, for consistency.
2018-03-29 22:45:52 +01:00
Erik Johnston 88cc9cc69e Merge pull request #3043 from matrix-org/erikj/measure_state_group_creation
Measure time it takes to calculate state group ID
2018-03-28 17:40:14 +01:00
Richard van der Hoff 9a0db062af Merge pull request #3034 from matrix-org/rav/fix_key_claim_errors
Fix error when claiming e2e keys from offline servers
2018-03-28 14:50:38 +01:00
Michael Kaye 33f6195d9a Handle review comments 2018-03-28 14:25:25 +01:00
Erik Johnston e9e4cb25fc Merge pull request #3042 from matrix-org/fix_locally_failing_tests
fix tests/storage/test_user_directory.py
2018-03-28 13:10:37 +01:00
Michael Kaye 4ceaa7433a As daemonizing will make a new process, defer call to init. 2018-03-28 12:19:01 +01:00
Neil Johnson 545001b9e4 Fix search_user_dir multiple sqlite versions do different things 2018-03-28 11:19:45 +01:00
Erik Johnston 01ccc9e6f2 Measure time it takes to calculate state group ID 2018-03-28 11:03:52 +01:00
Neil Johnson a9cb1a35c8 fix tests/storage/test_user_directory.py 2018-03-28 10:57:27 +01:00
Erik Johnston f879127aaa Merge pull request #3029 from matrix-org/erikj/linearize_generate_user_id
Linearize calls to _generate_user_id
2018-03-28 10:00:31 +01:00
Erik Johnston e6d87c93f3 Merge pull request #3030 from matrix-org/erikj/no_ujson
Remove last usage of ujson
2018-03-28 10:00:06 +01:00
Erik Johnston 004cc8a328 Merge pull request #3033 from matrix-org/erikj/calculate_state_metrics
Add counter metrics for calculating state delta
2018-03-28 09:59:42 +01:00
Michael Kaye ef520d8d0e Include coarse CPU and Memory use in stats callbacks.
This requires the psutil module, and is still opt-in based on the report_stats
config option.
2018-03-27 17:56:03 +01:00
Richard van der Hoff a134c572a6 Stringify exceptions for keys/{query,claim}
Make sure we stringify any exceptions we return from keys/query and keys/claim,
to avoid a 'not JSON serializable' error later

Fixes #3010
2018-03-27 17:15:06 +01:00
Richard van der Hoff c2a5cf2fe3 factor out exception handling for keys/claim and keys/query
this stuff is badly c&p'ed
2018-03-27 17:11:23 +01:00
Erik Johnston 800cfd5774 Comment 2018-03-27 13:30:39 +01:00
Erik Johnston 152c2ac19e Fix indent 2018-03-27 13:13:46 +01:00
Erik Johnston e70287cff3 Comment 2018-03-27 13:13:38 +01:00
Erik Johnston 03a26e28d9 Merge pull request #3017 from matrix-org/erikj/add_cache_control_headers
Add Cache-Control headers to all JSON APIs
2018-03-27 13:10:38 +01:00
Erik Johnston 3e0c0660b3 Also do check inside linearizer 2018-03-27 13:01:34 +01:00
Erik Johnston 3f49e131d9 Add counter metrics for calculating state delta
This will allow us to measure how often we calculate state deltas in
event persistence that we would have been able to calculate at the same
time we calculated the state for the event.
2018-03-27 10:57:35 +01:00
Erik Johnston 9b8c0fb162 Merge branch 'master' of github.com:matrix-org/synapse into develop 2018-03-26 21:41:55 +01:00
Erik Johnston 691f8492fb Merge branch 'release-v0.27.0' of github.com:matrix-org/synapse 2018-03-26 16:38:23 +01:00
Erik Johnston a9d7d98d3f Bum version and changelog 2018-03-26 16:36:53 +01:00
Erik Johnston bdbb1eec65 Merge branch 'erikj/simplejson_replication' of github.com:matrix-org/synapse into release-v0.27.0 2018-03-26 16:35:55 +01:00
Erik Johnston 01f72e2fc7 Fix date 2018-03-26 16:21:26 +01:00
Erik Johnston 9187862002 Bump version and changelog 2018-03-26 16:20:24 +01:00
Neil Johnson aa3587fdd1 Merge branch 'release-v0.27.0' of https://github.com/matrix-org/synapse 2018-03-26 14:51:11 +01:00
Neil Johnson 51406dab96 version bump 2018-03-26 14:48:19 +01:00
Erik Johnston fecb45e0c3 Remove last usage of ujson 2018-03-26 13:32:29 +01:00
Erik Johnston 44cd6e1358 PEP8 2018-03-26 12:06:48 +01:00
Erik Johnston 8d6dc106d1 Don't use _cursor_to_dict in find_next_generated_user_id_localpart 2018-03-26 12:02:44 +01:00
Erik Johnston a052aa42e7 Linearize calls to _generate_user_id 2018-03-26 12:02:20 +01:00
Matthew Hodgson 8efe773ef1 fix typo 2018-03-23 21:38:42 +00:00
Matthew Hodgson b7e7b52452 Merge pull request #3022 from matrix-org/matthew/noresource
404 correctly on missing paths via NoResource
2018-03-23 11:10:32 +00:00
Matthew Hodgson 8cbbfaefc1 404 correctly on missing paths via NoResource
fixes https://github.com/matrix-org/synapse/issues/2043 and https://github.com/matrix-org/synapse/issues/2029
2018-03-23 10:32:50 +00:00
Erik Johnston 84b5cc69f5 Merge pull request #3006 from matrix-org/erikj/state_iter
Use .iter* to avoid copies in StateHandler
2018-03-22 11:51:13 +00:00
Erik Johnston fde8e8f09f Fix s/iteriterms/itervalues 2018-03-22 11:42:16 +00:00
Erik Johnston eb9fc021e3 Merge branch 'release-v0.27.0' of github.com:matrix-org/synapse into develop 2018-03-22 10:19:53 +00:00
Erik Johnston 1c41b05c8c Add Cache-Control headers to all JSON APIs
It is especially important that sync requests don't get cached, as if a
sync returns the same token given then the client will call sync with
the same parameters again. If the previous response was cached it will
get reused, resulting in the client tight looping making the same
request and never making any progress.

In general, clients will expect to get up to date data when requesting
APIs, and so its safer to do a blanket no cache policy than only
whitelisting APIs that we know will break things if they get cached.
2018-03-21 17:46:26 +00:00
Erik Johnston 5bdb57cb66 Merge pull request #3015 from matrix-org/erikj/simplejson_replication
Fix replication after switch to simplejson
2018-03-20 15:06:33 +00:00
Neil Johnson f5aa027c2f Update CHANGES.rst
rearrange ordering of releases to match chronology
2018-03-20 15:06:22 +00:00
Neil Johnson e66fbcbb02 fix merge conflicts 2018-03-20 14:25:31 +00:00
Erik Johnston 9aa5a0af51 Explicitly use simplejson 2018-03-20 09:58:13 +00:00
Erik Johnston 610accbb7f Fix replication after switch to simplejson
Turns out that simplejson serialises namedtuple's as dictionaries rather
than tuples by default.
2018-03-19 16:12:48 +00:00
Neil Johnson c384705ee8 Update __init__.py
bump version
2018-03-19 15:11:58 +00:00
Neil Johnson 1a3aa957ca Update CHANGES.rst 2018-03-19 15:11:00 +00:00
Erik Johnston 3f961e638a Merge pull request #3005 from matrix-org/erikj/fix_cache_size
Fix bug where state cache used lots of memory
2018-03-19 11:44:26 +00:00
Erik Johnston fa72803490 Merge branch 'master' of github.com:matrix-org/synapse into develop 2018-03-19 11:41:01 +00:00
Erik Johnston 9a0d783c11 Add comments 2018-03-19 11:35:53 +00:00
Matthew Hodgson 38f952b9bc spell out not to massively increase bcrypt rounds 2018-03-19 09:27:36 +00:00
Erik Johnston a8ce159be4 Replace some ujson with simplejson to make it work 2018-03-16 00:27:09 +00:00
Erik Johnston f609acc109 Merge branch 'hotfixes-v0.26.1' of github.com:matrix-org/synapse 2018-03-16 00:13:51 +00:00
Erik Johnston 0092cf38ae Newline 2018-03-16 00:11:58 +00:00
Erik Johnston 5b631ff41a Remove wrong comment 2018-03-16 00:07:08 +00:00
Erik Johnston ba48755d56 Bump version and changelog 2018-03-15 23:57:26 +00:00
Erik Johnston 926ba76e23 Replace ujson with simplejson 2018-03-15 23:43:31 +00:00
Erik Johnston 9cf519769b Use .iter* to avoid copies in StateHandler 2018-03-15 17:50:26 +00:00
Erik Johnston 7c7706f42b Fix bug where state cache used lots of memory
The state cache bases its size on the sum of the size of entries. The
size of the entry is calculated once on insertion, so it is important
that the size of entries does not change.

The DictionaryCache modified the entries size, which caused the state
cache to incorrectly think it was smaller than it actually was.
2018-03-15 15:46:54 +00:00
NotAFile 2cc9f76bc3 replace old style error catching with 'as' keyword
This is both easier to read and compatible with python3 (not that that
matters)

Signed-off-by: Adrian Tschira <nota@notafile.com>
2018-03-15 16:11:17 +01:00
Erik Johnston ddb00efc1d Bump version number 2018-03-15 14:41:30 +00:00
Erik Johnston 2a376579f3 Merge pull request #3003 from matrix-org/rav/fix_contributing
CONTRIBUTING.rst: fix CI info
2018-03-15 13:31:59 +00:00
Erik Johnston 873aea7168 Merge pull request #3002 from matrix-org/rav/purge_doc
Update purge_history_api.rst
2018-03-15 13:31:54 +00:00
Erik Johnston bf7ee93cb6 Merge pull request #2997 from turt2live/patch-2
Doc: Make the event_creator routes regex a code block
2018-03-15 13:12:44 +00:00
Richard van der Hoff 5ea624b0f5 CONTRIBUTING.rst: fix CI info 2018-03-15 11:48:35 +00:00
Richard van der Hoff 0ad5125814 Update purge_history_api.rst
clarify that `purge_history` will not purge state
2018-03-15 11:05:42 +00:00
Richard van der Hoff 068c21ab10 CHANGES.rst: reword metric deprecation 2018-03-15 10:36:31 +00:00
Neil Johnson b29d1abab6 Update CHANGES.rst 2018-03-15 10:34:15 +00:00
Neil Johnson 7367a4a823 Update CHANGES.rst 2018-03-15 10:33:52 +00:00
Neil Johnson 7d26591048 Update CHANGES.rst 2018-03-15 10:33:24 +00:00
Erik Johnston 2059b8573f Update CHANGES.rst 2018-03-14 18:11:21 +00:00
Erik Johnston 10fdcf561d Fix up rst formatting 2018-03-14 17:30:17 +00:00
Neil Johnson 5ccb57d3ff Update CHANGES.rst 2018-03-14 17:12:58 +00:00
Travis Ralston c33c1ceddd OCD: Make the event_creator routes regex a code block
All the others are code blocks, so this one should be to (currently it is a blockquote).

Signed-off-by: Travis Ralston <travpc@gmail.com>
2018-03-14 11:09:08 -06:00
Neil Johnson fb647164f2 Update CHANGES.rst 2018-03-14 16:20:36 +00:00
Neil Johnson a492b17fe2 Update CHANGES.rst
clean formatting
2018-03-14 16:18:09 +00:00
Neil Johnson cb2c7c0669 Update CHANGES.rst
WIP, need to add most recent PRs
2018-03-14 16:09:20 +00:00
Erik Johnston 3959754de3 Merge pull request #2995 from matrix-org/erikj/enable_membership_worker
Register membership/state servlets in event_creator
2018-03-14 14:37:33 +00:00
Erik Johnston 4f28018c83 Register membership/state servlets in event_creator 2018-03-14 14:30:06 +00:00
Erik Johnston 57db62e554 Merge pull request #2992 from matrix-org/erikj/implement_member_workre
Implement RoomMemberWorkerHandler
2018-03-14 14:29:33 +00:00
Erik Johnston 0011ede3b0 Fix imports 2018-03-14 14:19:23 +00:00
Erik Johnston 62ad701326 s/join/joined/ in notify_user_membership_change 2018-03-14 14:17:43 +00:00
Erik Johnston 3f0f06cb31 Split RoomMemberWorkerHandler to separate file 2018-03-14 11:41:45 +00:00
Erik Johnston 3e839e0548 Merge pull request #2989 from matrix-org/erikj/profile_cache_master
Only update remote profile cache on master
2018-03-14 09:42:27 +00:00
Erik Johnston ebd0127999 Merge pull request #2988 from matrix-org/erikj/split_profile_store
Split up ProfileStore
2018-03-14 09:41:06 +00:00
Erik Johnston cfe75a9fb6 Merge pull request #2991 from matrix-org/erikj/fixup_rm
_remote_join and co take a requester
2018-03-14 09:40:57 +00:00
Erik Johnston f51565e023 Merge pull request #2993 from matrix-org/erikj/is_blocked
Add is_blocked to worker store
2018-03-14 09:39:18 +00:00
Matthew Hodgson d144ed6ffb fix bug #2926 (loading all state for a given type from the DB if the state_key is None) (#2990)
Fixes a regression that had crept in where the caching layer upholds requests for loading state which is filtered by type (but not by state_key), but the DB layer itself would interpret a missing state_key as a request to filter by null state_key rather than returning all state_keys.
2018-03-13 22:36:04 +00:00
Erik Johnston a08726fc42 Add is_blocked to worker store 2018-03-13 18:28:44 +00:00
Erik Johnston 350331d466 _remote_join and co take a requester 2018-03-13 17:50:39 +00:00
Erik Johnston df8ff682a7 Only update remote profile cache on master 2018-03-13 17:38:21 +00:00
Erik Johnston 3518d0ea8f Split up ProfileStore 2018-03-13 17:36:50 +00:00
Erik Johnston 3d5a25407c Merge pull request #2798 from jeremycline/fedora-repo
Note that Synapse is available in Fedora
2018-01-17 14:28:17 +00:00
Jeremy Cline 4102468da9 Note that Synapse is available in Fedora
Signed-off-by: Jeremy Cline <jeremy@jcline.org>
2018-01-16 16:14:02 -05:00
Richard van der Hoff 5b527d7ee1 Merge pull request #2674 from her001/readme-sytest
Mention SyTest in the README, after Development
2018-01-16 13:18:17 +00:00
Andrew Conrad 053ecae4db Mention SyTest in the README, after Development
Signed-off-by: Andrew Conrad <aconrad103@gmail.com>
2017-11-14 15:11:35 -06:00
73 changed files with 808 additions and 369 deletions
+82 -4
View File
@@ -1,11 +1,89 @@
Unreleased
==========
Changes in synapse v0.27.2 (2018-03-26)
=======================================
synctl no longer starts the main synapse when using ``-a`` option with workers.
A new worker file should be added with ``worker_app: synapse.app.homeserver``.
Bug fixes:
* Fix bug which broke TCP replication between workers (PR #3015)
Changes in synapse v0.27.1 (2018-03-26)
=======================================
Meta release as v0.27.0 temporarily pointed to the wrong commit
Changes in synapse v0.27.0 (2018-03-26)
=======================================
No changes since v0.27.0-rc2
Changes in synapse v0.27.0-rc2 (2018-03-19)
===========================================
Pulls in v0.26.1
Bug fixes:
* Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache (PR #3005)
Changes in synapse v0.26.1 (2018-03-15)
=======================================
Bug fixes:
* Fix bug where an invalid event caused server to stop functioning correctly,
due to parsing and serializing bugs in ujson library (PR #3008)
Changes in synapse v0.27.0-rc1 (2018-03-14)
===========================================
The common case for running Synapse is not to run separate workers, but for those that do, be aware that synctl no longer starts the main synapse when using ``-a`` option with workers. A new worker file should be added with ``worker_app: synapse.app.homeserver``.
This release also begins the process of renaming a number of the metrics
reported to prometheus. See `docs/metrics-howto.rst <docs/metrics-howto.rst#block-and-response-metrics-renamed-for-0-27-0>`_.
Note that the v0.28.0 release will remove the deprecated metric names.
Features:
* Add ability for ASes to override message send time (PR #2754)
* Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767)
* Add purge API features, see `docs/admin_api/purge_history_api.rst <docs/admin_api/purge_history_api.rst>`_ for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
* Add support for whitelisting 3PIDs that users can register. (PR #2813)
* Add ``/room/{id}/event/{id}`` API (PR #2766)
* Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live!
* Add ``federation_domain_whitelist`` option (PR #2820, #2821)
Changes:
* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst <docs/workers.rst>`_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784)
* Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
* Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
* No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!
* Remove ``verbosity``/``log_file`` from generated config (PR #2755)
* Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838)
* When using synctl with workers, don't start the main synapse automatically (PR #2774)
* Minor performance improvements (PR #2773, #2792)
* Use a connection pool for non-federation outbound connections (PR #2817)
* Make it possible to run unit tests against postgres (PR #2829)
* Update pynacl dependency to 1.2.1 or higher (PR #2888) Thanks to @bachp!
* Remove ability for AS users to call /events and /sync (PR #2948)
* Use bcrypt.checkpw (PR #2949) Thanks to @krombel!
Bug fixes:
* Fix broken ``ldap_config`` config option (PR #2683) Thanks to @seckrv!
* Fix error message when user is not allowed to unban (PR #2761) Thanks to @turt2live!
* Fix publicised groups GET API (singular) over federation (PR #2772)
* Fix user directory when using ``user_directory_search_all_users`` config option (PR #2803, #2831)
* Fix error on ``/publicRooms`` when no rooms exist (PR #2827)
* Fix bug in quarantine_media (PR #2837)
* Fix url_previews when no Content-Type is returned from URL (PR #2845)
* Fix rare race in sync API when joining room (PR #2944)
* Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848)
Changes in synapse v0.26.0 (2018-01-05)
+7 -3
View File
@@ -30,8 +30,12 @@ use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
We use `Jenkins <http://matrix.org/jenkins>`_ and
`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
integration. All pull requests to synapse get automatically tested by Travis;
the Jenkins builds require an adminstrator to start them. If your change
breaks the build, this will be shown in github, so please keep an eye on the
pull request for feedback.
Code style
~~~~~~~~~~
@@ -115,4 +119,4 @@ can't be accepted. Git makes this trivial - just use the -s flag when you do
Conclusion
~~~~~~~~~~
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!
+15
View File
@@ -354,6 +354,10 @@ https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one
Fedora
------
Synapse is in the Fedora repositories as ``matrix-synapse``::
sudo dnf install matrix-synapse
Oleg Girko provides Fedora RPMs at
https://obs.infoserver.lv/project/monitor/matrix-synapse
@@ -890,6 +894,17 @@ This should end with a 'PASSED' result::
PASSED (successes=143)
Running the Integration Tests
=============================
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
a Matrix homeserver integration testing suite, which uses HTTP requests to
access the API as a Matrix client would. It is able to run Synapse directly from
the source tree, so installation of the server is not required.
Testing with SyTest is recommended for verifying that changes related to the
Client-Server API are functioning correctly. See the `installation instructions
<https://github.com/matrix-org/sytest#installing>`_ for details.
Building Internal API Documentation
===================================
+12
View File
@@ -48,6 +48,18 @@ returned by the Client-Server API:
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
Upgrading to $NEXT_VERSION
====================
This release expands the anonymous usage stats sent if the opt-in
``report_stats`` configuration is set to ``true``. We now capture RSS memory
and cpu use at a very coarse level. This requires administrators to install
the optional ``psutil`` python module.
We would appreciate it if you could assist by ensuring this module is available
and ``report_stats`` is enabled. This will let us see if performance changes to
synapse are having an impact to the general community.
Upgrading to v0.15.0
====================
+4 -2
View File
@@ -16,9 +16,11 @@ including an ``access_token`` of a server admin.
By default, events sent by local users are not deleted, as they may represent
the only copies of this content in existence. (Events sent by remote users are
deleted, and room state data before the cutoff is always removed).
deleted.)
To delete local events as well, set ``delete_local_events`` in the body:
Room state data (such as joins, leaves, topic) is always preserved.
To delete local message events as well, set ``delete_local_events`` in the body:
.. code:: json
+1 -1
View File
@@ -230,7 +230,7 @@ file. For example::
``synapse.app.event_creator``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Handles non-state event creation. It can handle REST endpoints matching:
Handles non-state event creation. It can handle REST endpoints matching::
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
+1 -1
View File
@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.26.0"
__version__ = "0.27.2"
+2 -1
View File
@@ -15,9 +15,10 @@
"""Contains exceptions and error codes."""
import json
import logging
import simplejson as json
logger = logging.getLogger(__name__)
+1 -1
View File
@@ -17,7 +17,7 @@ from synapse.storage.presence import UserPresenceState
from synapse.types import UserID, RoomID
from twisted.internet import defer
import ujson as json
import simplejson as json
import jsonschema
from jsonschema import FormatChecker
+3 -3
View File
@@ -36,13 +36,13 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.appservice")
class AppserviceSlaveStore(
DirectoryStore, SlavedEventStore, SlavedApplicationServiceStore,
DirectoryStore, SlavedApplicationServiceStore, SlavedEventStore,
SlavedRegistrationStore,
):
pass
@@ -64,7 +64,7 @@ class AppserviceServer(HomeServer):
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
+3 -3
View File
@@ -44,17 +44,17 @@ from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.client_reader")
class ClientReaderSlavedStore(
SlavedApplicationServiceStore,
SlavedEventStore,
SlavedKeyStore,
RoomStore,
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
TransactionStore,
SlavedClientIpStore,
@@ -88,7 +88,7 @@ class ClientReaderServer(HomeServer):
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
+15 -3
View File
@@ -31,14 +31,20 @@ from synapse.replication.slave.storage.account_data import SlavedAccountDataStor
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.profile import SlavedProfileStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import TransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v1.room import RoomSendEventRestServlet
from synapse.rest.client.v1.room import (
RoomSendEventRestServlet, RoomMembershipRestServlet, RoomStateEventRestServlet,
JoinRoomAliasServlet,
)
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
@@ -46,12 +52,15 @@ from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.event_creator")
class EventCreatorSlavedStore(
DirectoryStore,
TransactionStore,
SlavedProfileStore,
SlavedAccountDataStore,
SlavedPusherStore,
SlavedReceiptsStore,
@@ -85,6 +94,9 @@ class EventCreatorServer(HomeServer):
elif name == "client":
resource = JsonResource(self, canonical_json=False)
RoomSendEventRestServlet(self).register(resource)
RoomMembershipRestServlet(self).register(resource)
RoomStateEventRestServlet(self).register(resource)
JoinRoomAliasServlet(self).register(resource)
resources.update({
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
@@ -92,7 +104,7 @@ class EventCreatorServer(HomeServer):
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
+2 -2
View File
@@ -41,7 +41,7 @@ from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.federation_reader")
@@ -77,7 +77,7 @@ class FederationReaderServer(HomeServer):
FEDERATION_PREFIX: TransportLayerServer(self),
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
+2 -2
View File
@@ -42,7 +42,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.federation_sender")
@@ -91,7 +91,7 @@ class FederationSenderServer(HomeServer):
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
+2 -2
View File
@@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.frontend_proxy")
@@ -142,7 +142,7 @@ class FrontendProxyServer(HomeServer):
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
+38 -2
View File
@@ -48,6 +48,7 @@ from synapse.server import HomeServer
from synapse.storage import are_all_users_on_domain
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
from synapse.util.caches import CACHE_SIZE_FACTOR
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
@@ -56,7 +57,7 @@ from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
from twisted.application import service
from twisted.internet import defer, reactor
from twisted.web.resource import EncodingResourceWrapper, Resource
from twisted.web.resource import EncodingResourceWrapper, NoResource
from twisted.web.server import GzipEncoderFactory
from twisted.web.static import File
@@ -126,7 +127,7 @@ class SynapseHomeServer(HomeServer):
if WEB_CLIENT_PREFIX in resources:
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
else:
root_resource = Resource()
root_resource = NoResource()
root_resource = create_resource_tree(resources, root_resource)
@@ -402,6 +403,10 @@ def run(hs):
stats = {}
# Contains the list of processes we will be monitoring
# currently either 0 or 1
stats_process = []
@defer.inlineCallbacks
def phone_stats_home():
logger.info("Gathering stats for reporting")
@@ -427,6 +432,15 @@ def run(hs):
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
stats["daily_sent_messages"] = daily_sent_messages
stats["cache_factor"] = CACHE_SIZE_FACTOR
stats["event_cache_size"] = hs.config.event_cache_size
if len(stats_process) > 0:
stats["memory_rss"] = 0
stats["cpu_average"] = 0
for process in stats_process:
stats["memory_rss"] += process.memory_info().rss
stats["cpu_average"] += int(process.cpu_percent(interval=None))
logger.info("Reporting stats to matrix.org: %s" % (stats,))
try:
@@ -437,10 +451,32 @@ def run(hs):
except Exception as e:
logger.warn("Error reporting stats: %s", e)
def performance_stats_init():
try:
import psutil
process = psutil.Process()
# Ensure we can fetch both, and make the initial request for cpu_percent
# so the next request will use this as the initial point.
process.memory_info().rss
process.cpu_percent(interval=None)
logger.info("report_stats can use psutil")
stats_process.append(process)
except (ImportError, AttributeError):
logger.warn(
"report_stats enabled but psutil is not installed or incorrect version."
" Disabling reporting of memory/cpu stats."
" Ensuring psutil is available will help matrix.org track performance"
" changes across releases."
)
if hs.config.report_stats:
logger.info("Scheduling stats reporting for 3 hour intervals")
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
# We need to defer this init for the cases that we daemonize
# otherwise the process ID we get is that of the non-daemon process
clock.call_later(0, performance_stats_init)
# We wait 5 minutes to send the first set of stats as the server can
# be quite busy the first few minutes
clock.call_later(5 * 60, phone_stats_home)
+2 -2
View File
@@ -43,7 +43,7 @@ from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.media_repository")
@@ -84,7 +84,7 @@ class MediaRepositoryServer(HomeServer):
),
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
+2 -2
View File
@@ -37,7 +37,7 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.pusher")
@@ -94,7 +94,7 @@ class PusherServer(HomeServer):
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(self)
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
+3 -3
View File
@@ -56,7 +56,7 @@ from synapse.util.manhole import manhole
from synapse.util.stringutils import random_string
from synapse.util.versionstring import get_version_string
from twisted.internet import defer, reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.synchrotron")
@@ -64,6 +64,7 @@ logger = logging.getLogger("synapse.app.synchrotron")
class SynchrotronSlavedStore(
SlavedReceiptsStore,
SlavedAccountDataStore,
SlavedPushRuleStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedFilteringStore,
@@ -71,7 +72,6 @@ class SynchrotronSlavedStore(
SlavedGroupServerStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedPushRuleStore,
SlavedEventStore,
SlavedClientIpStore,
RoomStore,
@@ -269,7 +269,7 @@ class SynchrotronServer(HomeServer):
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
+2 -2
View File
@@ -38,7 +38,7 @@ def pid_running(pid):
try:
os.kill(pid, 0)
return True
except OSError, err:
except OSError as err:
if err.errno == errno.EPERM:
return True
return False
@@ -98,7 +98,7 @@ def stop(pidfile, app):
try:
os.kill(pid, signal.SIGTERM)
write("stopped %s" % (app,), colour=GREEN)
except OSError, err:
except OSError as err:
if err.errno == errno.ESRCH:
write("%s not running" % (app,), colour=YELLOW)
elif err.errno == errno.EPERM:
+3 -3
View File
@@ -43,14 +43,14 @@ from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
logger = logging.getLogger("synapse.app.user_dir")
class UserDirectorySlaveStore(
SlavedEventStore,
SlavedApplicationServiceStore,
SlavedEventStore,
SlavedRegistrationStore,
SlavedClientIpStore,
UserDirectoryStore,
@@ -116,7 +116,7 @@ class UserDirectoryServer(HomeServer):
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp(
bind_addresses,
+9 -11
View File
@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.constants import EventTypes
from synapse.util.caches.descriptors import cachedInlineCallbacks
from synapse.types import GroupID, get_domain_from_id
from twisted.internet import defer
@@ -173,6 +172,7 @@ class ApplicationService(object):
if self.is_interested_in_user(event.sender):
defer.returnValue(True)
# also check m.room.member state key
if (event.type == EventTypes.Member and
self.is_interested_in_user(event.state_key)):
@@ -181,20 +181,18 @@ class ApplicationService(object):
if not store:
defer.returnValue(False)
does_match = yield self._matches_user_in_member_list(event.room_id, store)
does_match = yield self._matches_user_in_member_list(
event, store,
)
defer.returnValue(does_match)
@cachedInlineCallbacks(num_args=1, cache_context=True)
def _matches_user_in_member_list(self, room_id, store, cache_context):
member_list = yield store.get_users_in_room(
room_id, on_invalidate=cache_context.invalidate
@defer.inlineCallbacks
def _matches_user_in_member_list(self, event, store):
ases = yield store.get_appservices_with_user_in_room(
event,
)
# check joined member events
for user_id in member_list:
if self.is_interested_in_user(user_id):
defer.returnValue(True)
defer.returnValue(False)
defer.returnValue(self.id in ases)
def _matches_room_id(self, event):
if hasattr(event, "room_id"):
+3 -1
View File
@@ -77,7 +77,9 @@ class RegistrationConfig(Config):
# Set the number of bcrypt rounds used to generate password hash.
# Larger numbers increase the work factor needed to generate the hash.
# The default number of rounds is 12.
# The default number is 12 (which equates to 2^12 rounds).
# N.B. that increasing this will exponentially increase the time required
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
bcrypt_rounds: 12
# Allows users to register as guests without a password/email/etc, and
+3 -3
View File
@@ -155,7 +155,7 @@ class DeviceHandler(BaseHandler):
try:
yield self.store.delete_device(user_id, device_id)
except errors.StoreError, e:
except errors.StoreError as e:
if e.code == 404:
# no match
pass
@@ -204,7 +204,7 @@ class DeviceHandler(BaseHandler):
try:
yield self.store.delete_devices(user_id, device_ids)
except errors.StoreError, e:
except errors.StoreError as e:
if e.code == 404:
# no match
pass
@@ -243,7 +243,7 @@ class DeviceHandler(BaseHandler):
new_display_name=content.get("display_name")
)
yield self.notify_device_update(user_id, [device_id])
except errors.StoreError, e:
except errors.StoreError as e:
if e.code == 404:
raise errors.NotFoundError()
else:
+29 -29
View File
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import ujson as json
import simplejson as json
import logging
from canonicaljson import encode_canonical_json
@@ -134,23 +135,8 @@ class E2eKeysHandler(object):
if user_id in destination_query:
results[user_id] = keys
except CodeMessageException as e:
failures[destination] = {
"status": e.code, "message": e.message
}
except NotRetryingDestination as e:
failures[destination] = {
"status": 503, "message": "Not ready for retry",
}
except FederationDeniedError as e:
failures[destination] = {
"status": 403, "message": "Federation Denied",
}
except Exception as e:
# include ConnectionRefused and other errors
failures[destination] = {
"status": 503, "message": e.message
}
failures[destination] = _exception_to_failure(e)
yield make_deferred_yieldable(defer.gatherResults([
preserve_fn(do_remote_query)(destination)
@@ -252,19 +238,8 @@ class E2eKeysHandler(object):
for user_id, keys in remote_result["one_time_keys"].items():
if user_id in device_keys:
json_result[user_id] = keys
except CodeMessageException as e:
failures[destination] = {
"status": e.code, "message": e.message
}
except NotRetryingDestination as e:
failures[destination] = {
"status": 503, "message": "Not ready for retry",
}
except Exception as e:
# include ConnectionRefused and other errors
failures[destination] = {
"status": 503, "message": e.message
}
failures[destination] = _exception_to_failure(e)
yield make_deferred_yieldable(defer.gatherResults([
preserve_fn(claim_client_keys)(destination)
@@ -362,6 +337,31 @@ class E2eKeysHandler(object):
)
def _exception_to_failure(e):
if isinstance(e, CodeMessageException):
return {
"status": e.code, "message": e.message,
}
if isinstance(e, NotRetryingDestination):
return {
"status": 503, "message": "Not ready for retry",
}
if isinstance(e, FederationDeniedError):
return {
"status": 403, "message": "Federation Denied",
}
# include ConnectionRefused and other errors
#
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
# give a string for e.message, which simplejson then fails to serialize.
return {
"status": 503, "message": str(e.message),
}
def _one_time_keys_match(old_key_json, new_key):
old_key = json.loads(old_key_json)
+5 -3
View File
@@ -15,6 +15,11 @@
# limitations under the License.
"""Utilities for interacting with Identity Servers"""
import logging
import simplejson as json
from twisted.internet import defer
from synapse.api.errors import (
@@ -24,9 +29,6 @@ from ._base import BaseHandler
from synapse.util.async import run_on_reactor
from synapse.api.errors import SynapseError, Codes
import json
import logging
logger = logging.getLogger(__name__)
+4 -4
View File
@@ -27,7 +27,7 @@ from synapse.types import (
from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter
from synapse.util.logcontext import preserve_fn, run_in_background
from synapse.util.metrics import measure_func
from synapse.util.frozenutils import unfreeze
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.stringutils import random_string
from synapse.visibility import filter_events_for_client
from synapse.replication.http.send_event import send_event_to_master
@@ -38,7 +38,7 @@ from canonicaljson import encode_canonical_json
import logging
import random
import ujson
import simplejson
logger = logging.getLogger(__name__)
@@ -678,8 +678,8 @@ class EventCreationHandler(object):
# Ensure that we can round trip before trying to persist in db
try:
dump = ujson.dumps(unfreeze(event.content))
ujson.loads(dump)
dump = frozendict_json_encoder.encode(event.content)
simplejson.loads(dump)
except Exception:
logger.exception("Failed to encode content: %r", event.content)
raise
+4 -1
View File
@@ -38,7 +38,10 @@ class ProfileHandler(BaseHandler):
self.user_directory_handler = hs.get_user_directory_handler()
self.clock.looping_call(self._update_remote_profile_cache, self.PROFILE_UPDATE_MS)
if hs.config.worker_app is None:
self.clock.looping_call(
self._update_remote_profile_cache, self.PROFILE_UPDATE_MS,
)
@defer.inlineCallbacks
def get_profile(self, user_id):
+10 -4
View File
@@ -24,7 +24,7 @@ from synapse.api.errors import (
from synapse.http.client import CaptchaServerHttpClient
from synapse import types
from synapse.types import UserID
from synapse.util.async import run_on_reactor
from synapse.util.async import run_on_reactor, Linearizer
from synapse.util.threepids import check_3pid_allowed
from ._base import BaseHandler
@@ -46,6 +46,10 @@ class RegistrationHandler(BaseHandler):
self.macaroon_gen = hs.get_macaroon_generator()
self._generate_user_id_linearizer = Linearizer(
name="_generate_user_id_linearizer",
)
@defer.inlineCallbacks
def check_username(self, localpart, guest_access_token=None,
assigned_user_id=None):
@@ -345,9 +349,11 @@ class RegistrationHandler(BaseHandler):
@defer.inlineCallbacks
def _generate_user_id(self, reseed=False):
if reseed or self._next_generated_user_id is None:
self._next_generated_user_id = (
yield self.store.find_next_generated_user_id_localpart()
)
with (yield self._generate_user_id_linearizer.queue(())):
if reseed or self._next_generated_user_id is None:
self._next_generated_user_id = (
yield self.store.find_next_generated_user_id_localpart()
)
id = self._next_generated_user_id
self._next_generated_user_id += 1
+4 -82
View File
@@ -27,10 +27,6 @@ from synapse.api.constants import (
EventTypes, Membership,
)
from synapse.api.errors import AuthError, SynapseError, Codes
from synapse.replication.http.membership import (
remote_join, remote_reject_invite, get_or_register_3pid_guest,
notify_user_membership_change,
)
from synapse.types import UserID, RoomID
from synapse.util.async import Linearizer
from synapse.util.distributor import user_left_room, user_joined_room
@@ -374,7 +370,7 @@ class RoomMemberHandler(object):
content["kind"] = "guest"
ret = yield self._remote_join(
remote_room_hosts, room_id, target, content
requester, remote_room_hosts, room_id, target, content
)
defer.returnValue(ret)
@@ -396,7 +392,7 @@ class RoomMemberHandler(object):
# send the rejection to the inviter's HS.
remote_room_hosts = remote_room_hosts + [inviter.domain]
res = yield self._remote_reject_invite(
remote_room_hosts, room_id, target,
requester, remote_room_hosts, room_id, target,
)
defer.returnValue(res)
@@ -853,7 +849,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
self.distributor.declare("user_left_room")
@defer.inlineCallbacks
def _remote_join(self, remote_room_hosts, room_id, user, content):
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
"""Implements RoomMemberHandler._remote_join
"""
if len(remote_room_hosts) == 0:
@@ -872,7 +868,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
yield self._user_joined_room(user, room_id)
@defer.inlineCallbacks
def _remote_reject_invite(self, remote_room_hosts, room_id, target):
def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
"""Implements RoomMemberHandler._remote_reject_invite
"""
fed_handler = self.federation_handler
@@ -933,77 +929,3 @@ class RoomMemberMasterHandler(RoomMemberHandler):
if membership:
yield self.store.forget(user_id, room_id)
class RoomMemberWorkerHandler(RoomMemberHandler):
@defer.inlineCallbacks
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
"""Implements RoomMemberHandler._remote_join
"""
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
ret = yield remote_join(
self.simple_http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
requester=requester,
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user_id=user.to_string(),
content=content,
)
yield self._user_joined_room(user, room_id)
defer.returnValue(ret)
def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
"""Implements RoomMemberHandler._remote_reject_invite
"""
return remote_reject_invite(
self.simple_http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
requester=requester,
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user_id=target.to_string(),
)
def _user_joined_room(self, target, room_id):
"""Implements RoomMemberHandler._user_joined_room
"""
return notify_user_membership_change(
self.simple_http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
user_id=target.to_string(),
room_id=room_id,
change="join",
)
def _user_left_room(self, target, room_id):
"""Implements RoomMemberHandler._user_left_room
"""
return notify_user_membership_change(
self.simple_http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
user_id=target.to_string(),
room_id=room_id,
change="left",
)
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
"""Implements RoomMemberHandler.get_or_register_3pid_guest
"""
return get_or_register_3pid_guest(
self.simple_http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
requester=requester,
medium=medium,
address=address,
inviter_user_id=inviter_user_id,
)
+102
View File
@@ -0,0 +1,102 @@
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from twisted.internet import defer
from synapse.api.errors import SynapseError
from synapse.handlers.room_member import RoomMemberHandler
from synapse.replication.http.membership import (
remote_join, remote_reject_invite, get_or_register_3pid_guest,
notify_user_membership_change,
)
logger = logging.getLogger(__name__)
class RoomMemberWorkerHandler(RoomMemberHandler):
@defer.inlineCallbacks
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
"""Implements RoomMemberHandler._remote_join
"""
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
ret = yield remote_join(
self.simple_http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
requester=requester,
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user_id=user.to_string(),
content=content,
)
yield self._user_joined_room(user, room_id)
defer.returnValue(ret)
def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
"""Implements RoomMemberHandler._remote_reject_invite
"""
return remote_reject_invite(
self.simple_http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
requester=requester,
remote_room_hosts=remote_room_hosts,
room_id=room_id,
user_id=target.to_string(),
)
def _user_joined_room(self, target, room_id):
"""Implements RoomMemberHandler._user_joined_room
"""
return notify_user_membership_change(
self.simple_http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
user_id=target.to_string(),
room_id=room_id,
change="joined",
)
def _user_left_room(self, target, room_id):
"""Implements RoomMemberHandler._user_left_room
"""
return notify_user_membership_change(
self.simple_http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
user_id=target.to_string(),
room_id=room_id,
change="left",
)
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
"""Implements RoomMemberHandler.get_or_register_3pid_guest
"""
return get_or_register_3pid_guest(
self.simple_http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
requester=requester,
medium=medium,
address=address,
inviter_user_id=inviter_user_id,
)
+3 -3
View File
@@ -37,7 +37,7 @@ from twisted.web.util import redirectTo
import collections
import logging
import urllib
import ujson
import simplejson
logger = logging.getLogger(__name__)
@@ -461,8 +461,7 @@ def respond_with_json(request, code, json_object, send_cors=False,
if canonical_json or synapse.events.USE_FROZEN_DICTS:
json_bytes = encode_canonical_json(json_object)
else:
# ujson doesn't like frozen_dicts.
json_bytes = ujson.dumps(json_object, ensure_ascii=False)
json_bytes = simplejson.dumps(json_object)
return respond_with_json_bytes(
request, code, json_bytes,
@@ -489,6 +488,7 @@ def respond_with_json_bytes(request, code, json_bytes, send_cors=False,
request.setHeader(b"Content-Type", b"application/json")
request.setHeader(b"Server", version_string)
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
if send_cors:
set_cors_headers(request)
-1
View File
@@ -34,7 +34,6 @@ REQUIREMENTS = {
"bcrypt": ["bcrypt>=3.1.0"],
"pillow": ["PIL"],
"pydenticon": ["pydenticon"],
"ujson": ["ujson"],
"blist": ["blist"],
"pysaml2>=3.0.0": ["saml2>=3.0.0"],
"pymacaroons-pynacl": ["pymacaroons"],
+1 -4
View File
@@ -13,11 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import send_event
import membership
from synapse.http.server import JsonResource
from synapse.replication.http import membership, send_event
REPLICATION_PREFIX = "/_synapse/replication"
+6 -6
View File
@@ -13,6 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from twisted.internet import defer
from synapse.api.errors import SynapseError, MatrixCodeMessageException
@@ -20,9 +23,6 @@ from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.types import Requester, UserID
from synapse.util.distributor import user_left_room, user_joined_room
import logging
import re
logger = logging.getLogger(__name__)
@@ -154,7 +154,7 @@ def notify_user_membership_change(client, host, port, user_id, room_id, change):
Returns:
Deferred
"""
assert change in ("join", "left")
assert change in ("joined", "left")
uri = "http://%s:%s/_synapse/replication/user_%s_room" % (host, port, change)
@@ -297,7 +297,7 @@ class ReplicationRegister3PIDGuestRestServlet(RestServlet):
class ReplicationUserJoinedLeftRoomRestServlet(RestServlet):
PATTERNS = [re.compile("^/_synapse/replication/user_(?P<change>join|left)_room$")]
PATTERNS = [re.compile("^/_synapse/replication/user_(?P<change>joined|left)_room$")]
def __init__(self, hs):
super(ReplicationUserJoinedLeftRoomRestServlet, self).__init__()
@@ -317,7 +317,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(RestServlet):
user = UserID.from_string(user_id)
if change == "join":
if change == "joined":
user_joined_room(self.distributor, user, room_id)
elif change == "left":
user_left_room(self.distributor, user, room_id)
@@ -17,8 +17,10 @@
from synapse.storage.appservice import (
ApplicationServiceWorkerStore, ApplicationServiceTransactionWorkerStore,
)
from synapse.replication.slave.storage.events import SlavedEventStore
class SlavedApplicationServiceStore(ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore):
ApplicationServiceWorkerStore,
SlavedEventStore):
pass
@@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.storage.profile import ProfileWorkerStore
class SlavedProfileStore(ProfileWorkerStore, BaseSlavedStore):
pass
+11 -7
View File
@@ -19,11 +19,13 @@ allowed to be sent by which side.
"""
import logging
import ujson as json
import simplejson
logger = logging.getLogger(__name__)
_json_encoder = simplejson.JSONEncoder(namedtuple_as_object=False)
class Command(object):
"""The base command class.
@@ -100,14 +102,14 @@ class RdataCommand(Command):
return cls(
stream_name,
None if token == "batch" else int(token),
json.loads(row_json)
simplejson.loads(row_json)
)
def to_line(self):
return " ".join((
self.stream_name,
str(self.token) if self.token is not None else "batch",
json.dumps(self.row),
_json_encoder.encode(self.row),
))
@@ -298,10 +300,12 @@ class InvalidateCacheCommand(Command):
def from_line(cls, line):
cache_func, keys_json = line.split(" ", 1)
return cls(cache_func, json.loads(keys_json))
return cls(cache_func, simplejson.loads(keys_json))
def to_line(self):
return " ".join((self.cache_func, json.dumps(self.keys)))
return " ".join((
self.cache_func, _json_encoder.encode(self.keys),
))
class UserIpCommand(Command):
@@ -325,14 +329,14 @@ class UserIpCommand(Command):
def from_line(cls, line):
user_id, jsn = line.split(" ", 1)
access_token, ip, user_agent, device_id, last_seen = json.loads(jsn)
access_token, ip, user_agent, device_id, last_seen = simplejson.loads(jsn)
return cls(
user_id, access_token, ip, user_agent, device_id, last_seen
)
def to_line(self):
return self.user_id + " " + json.dumps((
return self.user_id + " " + _json_encoder.encode((
self.access_token, self.ip, self.user_agent, self.device_id,
self.last_seen,
))
+1 -1
View File
@@ -30,7 +30,7 @@ from synapse.http.servlet import (
import logging
import urllib
import ujson as json
import simplejson as json
logger = logging.getLogger(__name__)
+1 -1
View File
@@ -33,7 +33,7 @@ from ._base import set_timeline_upper_limit
import itertools
import logging
import ujson as json
import simplejson as json
logger = logging.getLogger(__name__)
@@ -23,7 +23,7 @@ import re
import shutil
import sys
import traceback
import ujson as json
import simplejson as json
import urlparse
from twisted.web.server import NOT_DONE_YET
+2 -3
View File
@@ -47,9 +47,8 @@ from synapse.handlers.device import DeviceHandler
from synapse.handlers.e2e_keys import E2eKeysHandler
from synapse.handlers.presence import PresenceHandler
from synapse.handlers.room_list import RoomListHandler
from synapse.handlers.room_member import (
RoomMemberMasterHandler, RoomMemberWorkerHandler,
)
from synapse.handlers.room_member import RoomMemberMasterHandler
from synapse.handlers.room_member_worker import RoomMemberWorkerHandler
from synapse.handlers.set_password import SetPasswordHandler
from synapse.handlers.sync import SyncHandler
from synapse.handlers.typing import TypingHandler
+36 -35
View File
@@ -132,7 +132,7 @@ class StateHandler(object):
state_map = yield self.store.get_events(state.values(), get_prev_content=False)
state = {
key: state_map[e_id] for key, e_id in state.items() if e_id in state_map
key: state_map[e_id] for key, e_id in state.iteritems() if e_id in state_map
}
defer.returnValue(state)
@@ -378,7 +378,7 @@ class StateHandler(object):
new_state = resolve_events_with_state_map(state_set_ids, state_map)
new_state = {
key: state_map[ev_id] for key, ev_id in new_state.items()
key: state_map[ev_id] for key, ev_id in new_state.iteritems()
}
return new_state
@@ -458,15 +458,15 @@ class StateResolutionHandler(object):
# build a map from state key to the event_ids which set that state.
# dict[(str, str), set[str])
state = {}
for st in state_groups_ids.values():
for key, e_id in st.items():
for st in state_groups_ids.itervalues():
for key, e_id in st.iteritems():
state.setdefault(key, set()).add(e_id)
# build a map from state key to the event_ids which set that state,
# including only those where there are state keys in conflict.
conflicted_state = {
k: list(v)
for k, v in state.items()
for k, v in state.iteritems()
if len(v) > 1
}
@@ -480,36 +480,37 @@ class StateResolutionHandler(object):
)
else:
new_state = {
key: e_ids.pop() for key, e_ids in state.items()
key: e_ids.pop() for key, e_ids in state.iteritems()
}
# if the new state matches any of the input state groups, we can
# use that state group again. Otherwise we will generate a state_id
# which will be used as a cache key for future resolutions, but
# not get persisted.
state_group = None
new_state_event_ids = frozenset(new_state.values())
for sg, events in state_groups_ids.items():
if new_state_event_ids == frozenset(e_id for e_id in events):
state_group = sg
break
with Measure(self.clock, "state.create_group_ids"):
# if the new state matches any of the input state groups, we can
# use that state group again. Otherwise we will generate a state_id
# which will be used as a cache key for future resolutions, but
# not get persisted.
state_group = None
new_state_event_ids = frozenset(new_state.itervalues())
for sg, events in state_groups_ids.iteritems():
if new_state_event_ids == frozenset(e_id for e_id in events):
state_group = sg
break
# TODO: We want to create a state group for this set of events, to
# increase cache hits, but we need to make sure that it doesn't
# end up as a prev_group without being added to the database
# TODO: We want to create a state group for this set of events, to
# increase cache hits, but we need to make sure that it doesn't
# end up as a prev_group without being added to the database
prev_group = None
delta_ids = None
for old_group, old_ids in state_groups_ids.iteritems():
if not set(new_state) - set(old_ids):
n_delta_ids = {
k: v
for k, v in new_state.iteritems()
if old_ids.get(k) != v
}
if not delta_ids or len(n_delta_ids) < len(delta_ids):
prev_group = old_group
delta_ids = n_delta_ids
prev_group = None
delta_ids = None
for old_group, old_ids in state_groups_ids.iteritems():
if not set(new_state) - set(old_ids):
n_delta_ids = {
k: v
for k, v in new_state.iteritems()
if old_ids.get(k) != v
}
if not delta_ids or len(n_delta_ids) < len(delta_ids):
prev_group = old_group
delta_ids = n_delta_ids
cache = _StateCacheEntry(
state=new_state,
@@ -702,7 +703,7 @@ def _resolve_with_state(unconflicted_state_ids, conflicted_state_ds, auth_event_
auth_events = {
key: state_map[ev_id]
for key, ev_id in auth_event_ids.items()
for key, ev_id in auth_event_ids.iteritems()
if ev_id in state_map
}
@@ -740,7 +741,7 @@ def _resolve_state_events(conflicted_state, auth_events):
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
for key, events in conflicted_state.iteritems():
if key[0] == EventTypes.JoinRules:
logger.debug("Resolving conflicted join rules %r", events)
resolved_state[key] = _resolve_auth_events(
@@ -750,7 +751,7 @@ def _resolve_state_events(conflicted_state, auth_events):
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
for key, events in conflicted_state.iteritems():
if key[0] == EventTypes.Member:
logger.debug("Resolving conflicted member lists %r", events)
resolved_state[key] = _resolve_auth_events(
@@ -760,7 +761,7 @@ def _resolve_state_events(conflicted_state, auth_events):
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
for key, events in conflicted_state.iteritems():
if key not in resolved_state:
logger.debug("Resolving conflicted state %r:%r", key, events)
resolved_state[key] = _resolve_normal_events(
+1 -1
View File
@@ -23,7 +23,7 @@ from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.caches.descriptors import cached, cachedList, cachedInlineCallbacks
import abc
import ujson as json
import simplejson as json
import logging
logger = logging.getLogger(__name__)
+156 -1
View File
@@ -18,9 +18,14 @@ import re
import simplejson as json
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.appservice import AppServiceTransaction
from synapse.config.appservice import load_appservices
from synapse.storage.events import EventsWorkerStore
from synapse.storage.roommember import RoomMemberWorkerStore
from synapse.storage.state import StateGroupWorkerStore
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.util.async import Linearizer
from ._base import SQLBaseStore
@@ -46,7 +51,8 @@ def _make_exclusive_regex(services_cache):
return exclusive_user_regex
class ApplicationServiceWorkerStore(SQLBaseStore):
class ApplicationServiceWorkerStore(RoomMemberWorkerStore, StateGroupWorkerStore,
SQLBaseStore):
def __init__(self, db_conn, hs):
self.services_cache = load_appservices(
hs.hostname,
@@ -111,6 +117,38 @@ class ApplicationServiceWorkerStore(SQLBaseStore):
return service
return None
@defer.inlineCallbacks
def get_appservices_with_user_in_room(self, event):
"""Get the list of appservices in the room at the given event
Args:
event (Event)
Returns:
Deferred[set(str)]: The IDs of all ASes in the room
"""
state_group = yield self._get_state_group_for_event(event.event_id)
if not state_group:
raise Exception("No state group for event %s", event.event_id)
ases_in_room = yield self._get_appservices_with_user_in_room(
event.room_id, state_group,
)
defer.returnValue(ases_in_room)
@cachedInlineCallbacks(num_args=2, max_entries=10000)
def _get_appservices_with_user_in_room(self, room_id, state_group):
cache = self._get_appservices_with_user_in_room_cache(room_id)
ases_in_room = yield cache.get_appservices_in_room_by_user(state_group)
defer.returnValue(ases_in_room)
@cached(max_entries=10000)
def _get_appservices_with_user_in_room_cache(self, room_id):
return _AppserviceUsersCache(self, room_id)
class ApplicationServiceStore(ApplicationServiceWorkerStore):
# This is currently empty due to there not being any AS storage functions
@@ -346,6 +384,7 @@ class ApplicationServiceTransactionWorkerStore(ApplicationServiceWorkerStore,
" (SELECT stream_ordering FROM appservice_stream_position)"
" < e.stream_ordering"
" AND e.stream_ordering <= ?"
" AND NOT e.outlier"
" ORDER BY e.stream_ordering ASC"
" LIMIT ?"
)
@@ -374,3 +413,119 @@ class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStor
# to keep consistency with the other stores, we keep this empty class for
# now.
pass
class _AppserviceUsersCache(object):
"""Attempts to calculate which appservices have users in a given room by
looking at state groups and their delta_ids
"""
def __init__(self, store, room_id):
self.store = store
self.room_id = room_id
self.linearizer = Linearizer("_AppserviceUsersCache")
# The last state group we calculated the ASes in the room for.
self.state_group = object()
# A dict of all appservices in the room at the above state group,
# along with a user_id of an AS user in the room.
# Dict of as_id -> user_id.
self.appservices_in_room = {}
@defer.inlineCallbacks
def get_appservices_in_room_by_user(self, state_group):
"""
Args:
state_group(str)
Returns:
Deferred[set(str)]: The IDs of all ASes in the room
"""
assert state_group is not None
if state_group == self.state_group:
defer.returnValue(frozenset(self.appservices_in_room))
with (yield self.linearizer.queue(())):
# Set of ASes that we need to recalculate their membership of
# the room
uhandled_ases = set()
# If the state groups match then there is nothing to do
if state_group == self.state_group:
defer.returnValue(frozenset(self.appservices_in_room))
prev_group, delta_ids = yield self.store.get_state_group_delta(state_group)
# If the prev_group matches the last state group we can calculate
# the new value by looking at the deltas
if prev_group and prev_group == self.state_group:
for (typ, state_key), event_id in delta_ids.iteritems():
if typ != EventTypes.Member:
continue
user_id = state_key
event = yield self.store.get_event(event_id)
is_join = event.membership == Membership.JOIN
for appservice in self.store.get_app_services():
as_id = appservice.id
# If this is a join and the appservice is already in
# the room then its a noop
if is_join:
if as_id in self.appservices_in_room:
continue
# If this is not a join, then we only need to recalculate
# if the AS is in the room and the cached joined AS user
# matches this event.
elif self.appservices_in_room.get(as_id, None) != user_id:
continue
# If the AS is not interested in the user then its a
# noop.
if not appservice.is_interested_in_user(user_id):
continue
if is_join:
# If an AS user is joining then the AS is now
# interested in the room
self.appservices_in_room[as_id] = user_id
else:
# If an AS user has left then we need to
# recalcualte if they're in the room.
uhandled_ases.add(appservice)
self.appservices_in_room.pop(as_id, None)
else:
uhandled_ases = set(self.store.get_app_services())
if uhandled_ases:
# We need to recalculate which ASes are in the room, so lets
# get the current state and try and find a join event
# that the AS is interested in.
current_state_ids = yield self.store.get_state_ids_for_group(state_group)
for appservice in uhandled_ases:
as_id = appservice.id
self.appservices_in_room.pop(as_id, None)
for (etype, state_key), event_id in current_state_ids.iteritems():
if etype != EventTypes.Member:
continue
if not appservice.is_interested_in_user(state_key):
continue
event = yield self.store.get_event(event_id)
if event.membership == Membership.JOIN:
self.appservices_in_room[as_id] = state_key
break
self.state_group = state_group
defer.returnValue(frozenset(self.appservices_in_room))
+1 -1
View File
@@ -19,7 +19,7 @@ from . import engines
from twisted.internet import defer
import ujson as json
import simplejson as json
import logging
logger = logging.getLogger(__name__)
+6 -6
View File
@@ -14,7 +14,7 @@
# limitations under the License.
import logging
import ujson
import simplejson
from twisted.internet import defer
@@ -85,7 +85,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
)
rows = []
for destination, edu in remote_messages_by_destination.items():
edu_json = ujson.dumps(edu)
edu_json = simplejson.dumps(edu)
rows.append((destination, stream_id, now_ms, edu_json))
txn.executemany(sql, rows)
@@ -177,7 +177,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
" WHERE user_id = ?"
)
txn.execute(sql, (user_id,))
message_json = ujson.dumps(messages_by_device["*"])
message_json = simplejson.dumps(messages_by_device["*"])
for row in txn:
# Add the message for all devices for this user on this
# server.
@@ -199,7 +199,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
# Only insert into the local inbox if the device exists on
# this server
device = row[0]
message_json = ujson.dumps(messages_by_device[device])
message_json = simplejson.dumps(messages_by_device[device])
messages_json_for_user[device] = message_json
if messages_json_for_user:
@@ -253,7 +253,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
messages = []
for row in txn:
stream_pos = row[0]
messages.append(ujson.loads(row[1]))
messages.append(simplejson.loads(row[1]))
if len(messages) < limit:
stream_pos = current_stream_id
return (messages, stream_pos)
@@ -389,7 +389,7 @@ class DeviceInboxStore(BackgroundUpdateStore):
messages = []
for row in txn:
stream_pos = row[0]
messages.append(ujson.loads(row[1]))
messages.append(simplejson.loads(row[1]))
if len(messages) < limit:
stream_pos = current_stream_id
return (messages, stream_pos)
+1 -1
View File
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import ujson as json
import simplejson as json
from twisted.internet import defer
+1 -1
View File
@@ -17,7 +17,7 @@ from twisted.internet import defer
from synapse.util.caches.descriptors import cached
from canonicaljson import encode_canonical_json
import ujson as json
import simplejson as json
from ._base import SQLBaseStore
+1 -1
View File
@@ -22,7 +22,7 @@ from synapse.types import RoomStreamToken
from .stream import lower_bound
import logging
import ujson as json
import simplejson as json
logger = logging.getLogger(__name__)
+46 -17
View File
@@ -14,15 +14,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.storage.events_worker import EventsWorkerStore
from collections import OrderedDict, deque, namedtuple
from functools import wraps
import logging
import simplejson as json
from twisted.internet import defer
from synapse.events import USE_FROZEN_DICTS
from synapse.storage.events_worker import EventsWorkerStore
from synapse.util.async import ObservableDeferred
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.logcontext import (
PreserveLoggingContext, make_deferred_yieldable
PreserveLoggingContext, make_deferred_yieldable,
)
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
@@ -30,16 +34,8 @@ from synapse.api.constants import EventTypes
from synapse.api.errors import SynapseError
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.types import get_domain_from_id
from canonicaljson import encode_canonical_json
from collections import deque, namedtuple, OrderedDict
from functools import wraps
import synapse.metrics
import logging
import ujson as json
# these are only included to make the type annotations work
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
@@ -53,13 +49,25 @@ event_counter = metrics.register_counter(
"persisted_events_sep", labels=["type", "origin_type", "origin_entity"]
)
# The number of times we are recalculating the current state
state_delta_counter = metrics.register_counter(
"state_delta",
)
# The number of times we are recalculating state when there is only a
# single forward extremity
state_delta_single_event_counter = metrics.register_counter(
"state_delta_single_event",
)
# The number of times we are reculating state when we could have resonably
# calculated the delta when we calculated the state for an event we were
# persisting.
state_delta_reuse_delta_counter = metrics.register_counter(
"state_delta_reuse_delta",
)
def encode_json(json_object):
if USE_FROZEN_DICTS:
# ujson doesn't like frozen_dicts
return encode_canonical_json(json_object)
else:
return json.dumps(json_object, ensure_ascii=False)
return frozendict_json_encoder.encode(json_object)
class _EventPeristenceQueue(object):
@@ -369,7 +377,8 @@ class EventsStore(EventsWorkerStore):
room_id, ev_ctx_rm, latest_event_ids
)
if new_latest_event_ids == set(latest_event_ids):
latest_event_ids = set(latest_event_ids)
if new_latest_event_ids == latest_event_ids:
# No change in extremities, so no change in state
continue
@@ -390,6 +399,26 @@ class EventsStore(EventsWorkerStore):
if all_single_prev_not_state:
continue
state_delta_counter.inc()
if len(new_latest_event_ids) == 1:
state_delta_single_event_counter.inc()
# This is a fairly handwavey check to see if we could
# have guessed what the delta would have been when
# processing one of these events.
# What we're interested in is if the latest extremities
# were the same when we created the event as they are
# now. When this server creates a new event (as opposed
# to receiving it over federation) it will use the
# forward extremities as the prev_events, so we can
# guess this by looking at the prev_events and checking
# if they match the current forward extremities.
for ev, _ in ev_ctx_rm:
prev_event_ids = set(e for e, _ in ev.prev_events)
if latest_event_ids == prev_event_ids:
state_delta_reuse_delta_counter.inc()
break
logger.info(
"Calculating state delta for room %s", room_id,
)
+1 -1
View File
@@ -28,7 +28,7 @@ from synapse.api.errors import SynapseError
from collections import namedtuple
import logging
import ujson as json
import simplejson as json
# these are only included to make the type annotations work
from synapse.events import EventBase # noqa: F401
+1 -1
View File
@@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError
from ._base import SQLBaseStore
import ujson as json
import simplejson as json
# The category ID for the "default" category. We don't store as null in the
+26 -24
View File
@@ -21,14 +21,7 @@ from synapse.api.errors import StoreError
from ._base import SQLBaseStore
class ProfileStore(SQLBaseStore):
def create_profile(self, user_localpart):
return self._simple_insert(
table="profiles",
values={"user_id": user_localpart},
desc="create_profile",
)
class ProfileWorkerStore(SQLBaseStore):
@defer.inlineCallbacks
def get_profileinfo(self, user_localpart):
try:
@@ -61,14 +54,6 @@ class ProfileStore(SQLBaseStore):
desc="get_profile_displayname",
)
def set_profile_displayname(self, user_localpart, new_displayname):
return self._simple_update_one(
table="profiles",
keyvalues={"user_id": user_localpart},
updatevalues={"displayname": new_displayname},
desc="set_profile_displayname",
)
def get_profile_avatar_url(self, user_localpart):
return self._simple_select_one_onecol(
table="profiles",
@@ -77,14 +62,6 @@ class ProfileStore(SQLBaseStore):
desc="get_profile_avatar_url",
)
def set_profile_avatar_url(self, user_localpart, new_avatar_url):
return self._simple_update_one(
table="profiles",
keyvalues={"user_id": user_localpart},
updatevalues={"avatar_url": new_avatar_url},
desc="set_profile_avatar_url",
)
def get_from_remote_profile_cache(self, user_id):
return self._simple_select_one(
table="remote_profile_cache",
@@ -94,6 +71,31 @@ class ProfileStore(SQLBaseStore):
desc="get_from_remote_profile_cache",
)
class ProfileStore(ProfileWorkerStore):
def create_profile(self, user_localpart):
return self._simple_insert(
table="profiles",
values={"user_id": user_localpart},
desc="create_profile",
)
def set_profile_displayname(self, user_localpart, new_displayname):
return self._simple_update_one(
table="profiles",
keyvalues={"user_id": user_localpart},
updatevalues={"displayname": new_displayname},
desc="set_profile_displayname",
)
def set_profile_avatar_url(self, user_localpart, new_avatar_url):
return self._simple_update_one(
table="profiles",
keyvalues={"user_id": user_localpart},
updatevalues={"avatar_url": new_avatar_url},
desc="set_profile_avatar_url",
)
def add_remote_profile_cache(self, user_id, displayname, avatar_url):
"""Ensure we are caching the remote user's profiles.
+1 -1
View File
@@ -23,7 +23,7 @@ from twisted.internet import defer
import abc
import logging
import ujson as json
import simplejson as json
logger = logging.getLogger(__name__)
+1 -3
View File
@@ -460,14 +460,12 @@ class RegistrationStore(RegistrationWorkerStore,
"""
def _find_next_generated_user_id(txn):
txn.execute("SELECT name FROM users")
rows = self.cursor_to_dict(txn)
regex = re.compile("^@(\d+):")
found = set()
for r in rows:
user_id = r["name"]
for user_id, in txn:
match = regex.search(user_id)
if match:
found.add(int(match.group(1)))
+18 -14
View File
@@ -22,7 +22,7 @@ from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
import collections
import logging
import ujson as json
import simplejson as json
import re
logger = logging.getLogger(__name__)
@@ -157,6 +157,18 @@ class RoomWorkerStore(SQLBaseStore):
"get_public_room_changes", get_public_room_changes_txn
)
@cached(max_entries=10000)
def is_room_blocked(self, room_id):
return self._simple_select_one_onecol(
table="blocked_rooms",
keyvalues={
"room_id": room_id,
},
retcol="1",
allow_none=True,
desc="is_room_blocked",
)
class RoomStore(RoomWorkerStore, SearchStore):
@@ -485,18 +497,6 @@ class RoomStore(RoomWorkerStore, SearchStore):
else:
defer.returnValue(None)
@cached(max_entries=10000)
def is_room_blocked(self, room_id):
return self._simple_select_one_onecol(
table="blocked_rooms",
keyvalues={
"room_id": room_id,
},
retcol="1",
allow_none=True,
desc="is_room_blocked",
)
@defer.inlineCallbacks
def block_room(self, room_id, user_id):
yield self._simple_insert(
@@ -507,7 +507,11 @@ class RoomStore(RoomWorkerStore, SearchStore):
},
desc="block_room",
)
self.is_room_blocked.invalidate((room_id,))
yield self.runInteraction(
"block_room_invalidation",
self._invalidate_cache_and_stream,
self.is_room_blocked, (room_id,),
)
def get_media_mxcs_in_room(self, room_id):
"""Retrieves all the local and remote media MXC URIs in a given room
+1 -2
View File
@@ -28,7 +28,7 @@ from synapse.api.constants import Membership, EventTypes
from synapse.types import get_domain_from_id
import logging
import ujson as json
import simplejson as json
logger = logging.getLogger(__name__)
@@ -440,7 +440,6 @@ class RoomMemberWorkerStore(EventsWorkerStore):
)
@cachedInlineCallbacks(num_args=2, max_entries=10000, iterable=True)
# @defer.inlineCallbacks
def _get_joined_hosts(self, room_id, state_group, current_state_ids, state_entry):
# We don't use `state_group`, its there so that we can cache based
# on it. However, its important that its never None, since two current_state's
@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import simplejson as json
logger = logging.getLogger(__name__)
+2 -2
View File
@@ -17,7 +17,7 @@ import logging
from synapse.storage.prepare_database import get_statements
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
import ujson
import simplejson
logger = logging.getLogger(__name__)
@@ -66,7 +66,7 @@ def run_create(cur, database_engine, *args, **kwargs):
"max_stream_id_exclusive": max_stream_id + 1,
"rows_inserted": 0,
}
progress_json = ujson.dumps(progress)
progress_json = simplejson.dumps(progress)
sql = (
"INSERT into background_updates (update_name, progress_json)"
+2 -2
View File
@@ -16,7 +16,7 @@ import logging
from synapse.storage.prepare_database import get_statements
import ujson
import simplejson
logger = logging.getLogger(__name__)
@@ -45,7 +45,7 @@ def run_create(cur, database_engine, *args, **kwargs):
"max_stream_id_exclusive": max_stream_id + 1,
"rows_inserted": 0,
}
progress_json = ujson.dumps(progress)
progress_json = simplejson.dumps(progress)
sql = (
"INSERT into background_updates (update_name, progress_json)"
@@ -16,7 +16,7 @@ from synapse.storage.engines import PostgresEngine
from synapse.storage.prepare_database import get_statements
import logging
import ujson
import simplejson
logger = logging.getLogger(__name__)
@@ -49,7 +49,7 @@ def run_create(cur, database_engine, *args, **kwargs):
"rows_inserted": 0,
"have_added_indexes": False,
}
progress_json = ujson.dumps(progress)
progress_json = simplejson.dumps(progress)
sql = (
"INSERT into background_updates (update_name, progress_json)"
@@ -15,7 +15,7 @@
from synapse.storage.prepare_database import get_statements
import logging
import ujson
import simplejson
logger = logging.getLogger(__name__)
@@ -44,7 +44,7 @@ def run_create(cur, database_engine, *args, **kwargs):
"max_stream_id_exclusive": max_stream_id + 1,
"rows_inserted": 0,
}
progress_json = ujson.dumps(progress)
progress_json = simplejson.dumps(progress)
sql = (
"INSERT into background_updates (update_name, progress_json)"
+1 -1
View File
@@ -16,7 +16,7 @@
from collections import namedtuple
import logging
import re
import ujson as json
import simplejson as json
from twisted.internet import defer
+27 -7
View File
@@ -240,6 +240,9 @@ class StateGroupWorkerStore(SQLBaseStore):
(
"AND type = ? AND state_key = ?",
(etype, state_key)
) if state_key is not None else (
"AND type = ?",
(etype,)
)
for etype, state_key in types
]
@@ -259,10 +262,19 @@ class StateGroupWorkerStore(SQLBaseStore):
key = (typ, state_key)
results[group][key] = event_id
else:
where_args = []
where_clauses = []
wildcard_types = False
if types is not None:
where_clause = "AND (%s)" % (
" OR ".join(["(type = ? AND state_key = ?)"] * len(types)),
)
for typ in types:
if typ[1] is None:
where_clauses.append("(type = ?)")
where_args.extend(typ[0])
wildcard_types = True
else:
where_clauses.append("(type = ? AND state_key = ?)")
where_args.extend([typ[0], typ[1]])
where_clause = "AND (%s)" % (" OR ".join(where_clauses))
else:
where_clause = ""
@@ -279,7 +291,7 @@ class StateGroupWorkerStore(SQLBaseStore):
# after we finish deduping state, which requires this func)
args = [next_group]
if types:
args.extend(i for typ in types for i in typ)
args.extend(where_args)
txn.execute(
"SELECT type, state_key, event_id FROM state_groups_state"
@@ -292,9 +304,17 @@ class StateGroupWorkerStore(SQLBaseStore):
if (typ, state_key) not in results[group]
)
# If the lengths match then we must have all the types,
# so no need to go walk further down the tree.
if types is not None and len(results[group]) == len(types):
# If the number of entries in the (type,state_key)->event_id dict
# matches the number of (type,state_keys) types we were searching
# for, then we must have found them all, so no need to go walk
# further down the tree... UNLESS our types filter contained
# wildcards (i.e. Nones) in which case we have to do an exhaustive
# search
if (
types is not None and
not wildcard_types and
len(results[group]) == len(types)
):
break
next_group = self._simple_select_one_onecol_txn(
+1 -1
View File
@@ -19,7 +19,7 @@ from synapse.storage.account_data import AccountDataWorkerStore
from synapse.util.caches.descriptors import cached
from twisted.internet import defer
import ujson as json
import simplejson as json
import logging
logger = logging.getLogger(__name__)
+1 -1
View File
@@ -23,7 +23,7 @@ from canonicaljson import encode_canonical_json
from collections import namedtuple
import logging
import ujson as json
import simplejson as json
logger = logging.getLogger(__name__)
+2 -2
View File
@@ -667,7 +667,7 @@ class UserDirectoryStore(SQLBaseStore):
# The array of numbers are the weights for the various part of the
# search: (domain, _, display name, localpart)
sql = """
SELECT d.user_id, display_name, avatar_url
SELECT d.user_id AS user_id, display_name, avatar_url
FROM user_directory_search
INNER JOIN user_directory AS d USING (user_id)
%s
@@ -702,7 +702,7 @@ class UserDirectoryStore(SQLBaseStore):
search_query = _parse_query_sqlite(search_term)
sql = """
SELECT d.user_id, display_name, avatar_url
SELECT d.user_id AS user_id, display_name, avatar_url
FROM user_directory_search
INNER JOIN user_directory AS d USING (user_id)
%s
+5 -1
View File
@@ -132,9 +132,13 @@ class DictionaryCache(object):
self._update_or_insert(key, value, known_absent)
def _update_or_insert(self, key, value, known_absent):
entry = self.cache.setdefault(key, DictionaryEntry(False, set(), {}))
# We pop and reinsert as we need to tell the cache the size may have
# changed
entry = self.cache.pop(key, DictionaryEntry(False, set(), {}))
entry.value.update(value)
entry.known_absent.update(known_absent)
self.cache[key] = entry
def _insert(self, key, value, known_absent):
self.cache[key] = DictionaryEntry(True, known_absent, value)
+11 -4
View File
@@ -154,14 +154,21 @@ class LruCache(object):
def cache_set(key, value, callbacks=[]):
node = cache.get(key, None)
if node is not None:
if value != node.value:
# We sometimes store large objects, e.g. dicts, which cause
# the inequality check to take a long time. So let's only do
# the check if we have some callbacks to call.
if node.callbacks and value != node.value:
for cb in node.callbacks:
cb()
node.callbacks.clear()
if size_callback:
cached_cache_len[0] -= size_callback(node.value)
cached_cache_len[0] += size_callback(value)
# We don't bother to protect this by value != node.value as
# generally size_callback will be cheap compared with equality
# checks. (For example, taking the size of two dicts is quicker
# than comparing them for equality.)
if size_callback:
cached_cache_len[0] -= size_callback(node.value)
cached_cache_len[0] += size_callback(value)
node.callbacks.update(callbacks)
+19
View File
@@ -14,6 +14,7 @@
# limitations under the License.
from frozendict import frozendict
import simplejson as json
def freeze(o):
@@ -49,3 +50,21 @@ def unfreeze(o):
pass
return o
def _handle_frozendict(obj):
"""Helper for EventEncoder. Makes frozendicts serializable by returning
the underlying dict
"""
if type(obj) is frozendict:
# fishing the protected dict out of the object is a bit nasty,
# but we don't really want the overhead of copying the dict.
return obj._dict
raise TypeError('Object of type %s is not JSON serializable' %
obj.__class__.__name__)
# A JSONEncoder which is capable of encoding frozendics without barfing
frozendict_json_encoder = json.JSONEncoder(
default=_handle_frozendict,
)
+2 -2
View File
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.web.resource import Resource
from twisted.web.resource import NoResource
import logging
@@ -45,7 +45,7 @@ def create_resource_tree(desired_tree, root_resource):
for path_seg in full_path.split('/')[1:-1]:
if path_seg not in last_resource.listNames():
# resource doesn't exist, so make a "dummy resource"
child_resource = Resource()
child_resource = NoResource()
last_resource.putChild(path_seg, child_resource)
res_id = _resource_id(last_resource, path_seg)
resource_mappings[res_id] = child_resource
+6 -23
View File
@@ -55,7 +55,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
_regex("@irc_.*")
)
self.event.sender = "@irc_foobar:matrix.org"
self.assertTrue((yield self.service.is_interested(self.event)))
self.assertTrue((yield self.service.is_interested(self.event, None)))
@defer.inlineCallbacks
def test_regex_user_id_prefix_no_match(self):
@@ -63,7 +63,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
_regex("@irc_.*")
)
self.event.sender = "@someone_else:matrix.org"
self.assertFalse((yield self.service.is_interested(self.event)))
self.assertFalse((yield self.service.is_interested(self.event, None)))
@defer.inlineCallbacks
def test_regex_room_member_is_checked(self):
@@ -73,7 +73,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.event.sender = "@someone_else:matrix.org"
self.event.type = "m.room.member"
self.event.state_key = "@irc_foobar:matrix.org"
self.assertTrue((yield self.service.is_interested(self.event)))
self.assertTrue((yield self.service.is_interested(self.event, None)))
@defer.inlineCallbacks
def test_regex_room_id_match(self):
@@ -81,7 +81,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
_regex("!some_prefix.*some_suffix:matrix.org")
)
self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org"
self.assertTrue((yield self.service.is_interested(self.event)))
self.assertTrue((yield self.service.is_interested(self.event, None)))
@defer.inlineCallbacks
def test_regex_room_id_no_match(self):
@@ -89,7 +89,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
_regex("!some_prefix.*some_suffix:matrix.org")
)
self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org"
self.assertFalse((yield self.service.is_interested(self.event)))
self.assertFalse((yield self.service.is_interested(self.event, None)))
@defer.inlineCallbacks
def test_regex_alias_match(self):
@@ -160,7 +160,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.store.get_aliases_for_room.return_value = [
"#xmpp_foobar:matrix.org", "#athing:matrix.org"
]
self.store.get_users_in_room.return_value = []
self.store.get_appservices_with_user_in_room.return_value = []
self.assertFalse((yield self.service.is_interested(
self.event, self.store
)))
@@ -193,20 +193,3 @@ class ApplicationServiceTestCase(unittest.TestCase):
}
self.event.state_key = self.service.sender
self.assertTrue((yield self.service.is_interested(self.event)))
@defer.inlineCallbacks
def test_member_list_match(self):
self.service.namespaces[ApplicationService.NS_USERS].append(
_regex("@irc_.*")
)
self.store.get_users_in_room.return_value = [
"@alice:here",
"@irc_fo:here", # AS user
"@bob:here",
]
self.store.get_aliases_for_room.return_value = []
self.event.sender = "@xmpp_foobar:matrix.org"
self.assertTrue((yield self.service.is_interested(
event=self.event, store=self.store
)))