Compare commits
1139 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3a676b8ee3 | |||
| 0d5622b088 | |||
| 712030aeef | |||
| 9f53491cab | |||
| 02a27a6c4f | |||
| a611c968cc | |||
| 59698906eb | |||
| c0d8e0eb63 | |||
| 68ebb81e86 | |||
| 5bbc321588 | |||
| 4cf4320593 | |||
| eab47ea1e5 | |||
| f52dd35ac3 | |||
| 61c7edfd34 | |||
| 5bbd424ee0 | |||
| 6ac40f7b65 | |||
| f505575f69 | |||
| 4084c58aa1 | |||
| e99365f601 | |||
| e8884e5e9c | |||
| a7001c311b | |||
| 9181e2f4c7 | |||
| fb76a81ff7 | |||
| 0b282d33af | |||
| 48af68ba8e | |||
| 0c93df89b6 | |||
| 43f0941e8f | |||
| 481119f7d6 | |||
| eb8619e256 | |||
| 4ef7a25c10 | |||
| 3727a15764 | |||
| aaabbd3e9e | |||
| 84f9cac4d0 | |||
| 914f1eafac | |||
| 6fd2f685fe | |||
| 737aee9295 | |||
| cb9c465707 | |||
| 3c79bdd7a0 | |||
| a4c56bf67b | |||
| 4c1b32d7e2 | |||
| 03c8df54f0 | |||
| c214d3e36e | |||
| 1c1b2de975 | |||
| f78b479118 | |||
| 4802f9cdb6 | |||
| 83776d6219 | |||
| bd77216d06 | |||
| 5a578ea4c7 | |||
| 9ae64c9910 | |||
| f41b1a8723 | |||
| b42ad359e9 | |||
| 757e2c79b4 | |||
| 86e9bbc74e | |||
| e40f25ebe1 | |||
| ff1d333a02 | |||
| 2ae91a9e2f | |||
| d213d69fe3 | |||
| 56da835eaf | |||
| 96bcfb29c7 | |||
| 7be1065b8f | |||
| 1209d3174e | |||
| a2546b9082 | |||
| ceeb5b909f | |||
| 43a89cca8e | |||
| f338bf9257 | |||
| 767fc0b739 | |||
| 54d08c8868 | |||
| 5880bc5417 | |||
| f613a3e332 | |||
| bfe586843f | |||
| d0633e6dbe | |||
| 0f2ca8cde1 | |||
| c53f9d561e | |||
| 65141161f6 | |||
| 72f454b752 | |||
| 10ebbaea2e | |||
| aa5ce4d450 | |||
| d33d623f0d | |||
| 7984ffdc6a | |||
| c1267d04c5 | |||
| a04c076b7f | |||
| 44891b4a0a | |||
| 7b39bcdaae | |||
| d937f342bb | |||
| 318cb1f207 | |||
| c48465dbaa | |||
| 8be1a37909 | |||
| d3d0be4167 | |||
| 762ada1e07 | |||
| 0d3da210f0 | |||
| cccf86dd05 | |||
| 8a76094965 | |||
| 790f5848b2 | |||
| 82d7eea7e3 | |||
| 2547dffccc | |||
| 9bb041791c | |||
| 17515bae14 | |||
| 4bd3d25218 | |||
| 5ffacc5e84 | |||
| 83b2f83da0 | |||
| b36270b5e1 | |||
| 6ff7a79308 | |||
| af582b66bb | |||
| 2460d904bd | |||
| 1ccabe2965 | |||
| fb83f6a1fc | |||
| b04f81284a | |||
| ec9331f851 | |||
| dafef5a688 | |||
| d96a070a3a | |||
| ed3979df5f | |||
| 79fc4ff6f9 | |||
| 7b6d519482 | |||
| 52d1008661 | |||
| 96bd8ff57c | |||
| ce3fe52498 | |||
| 7e2f971c08 | |||
| d63b49137a | |||
| b9ee5650b0 | |||
| caef337587 | |||
| b4a5002a6e | |||
| 86be915cce | |||
| d9f38561c8 | |||
| 4836864f56 | |||
| a4a31fa8dc | |||
| f942980c0b | |||
| 3fb35cbd6f | |||
| 15e0f1696f | |||
| da84fa3d74 | |||
| d6e7333ae4 | |||
| 6ec02e9ecf | |||
| 25cd5bb697 | |||
| fa129ce5b5 | |||
| e1e042f2a1 | |||
| ceb599e789 | |||
| 8c82b06904 | |||
| 05d044aac3 | |||
| 2d5c693fd3 | |||
| 57fa1801c3 | |||
| a294b04bf0 | |||
| 9c99ab4572 | |||
| d549fdfa22 | |||
| 95ac3078da | |||
| 92e3071623 | |||
| ee5aef6c72 | |||
| 639cd07d6d | |||
| af03ecf352 | |||
| 60ec9793fb | |||
| 674379e673 | |||
| a28d066732 | |||
| 8495b6d365 | |||
| 1ef0365670 | |||
| 87a30890a3 | |||
| ed4d18f516 | |||
| 9c62fcdb68 | |||
| 27a0c21c38 | |||
| 3555a659ec | |||
| 4c5e8adf8b | |||
| 875ed05bdc | |||
| 67f3a50e9a | |||
| afff321e9a | |||
| 8f0e47fae8 | |||
| 823b8be4b7 | |||
| 92767dd703 | |||
| 7b9319b1c8 | |||
| 3d95405e5f | |||
| 8d2bca1a90 | |||
| 0fd1cd2400 | |||
| 6bfec56796 | |||
| e815763b7f | |||
| 7e2c89a37f | |||
| 1e05637e37 | |||
| b713934b2e | |||
| 75fb9ac1be | |||
| 8aab9d87fa | |||
| 7d11f825aa | |||
| 196ebaf662 | |||
| 87f2dec8d4 | |||
| a1e0d316ea | |||
| 11860637e1 | |||
| 2e308a3a38 | |||
| c2b429ab24 | |||
| 6222ae51ce | |||
| b29f98377d | |||
| 1d4deff25a | |||
| df727f2126 | |||
| 7a77f8b6d5 | |||
| 0c53d750e7 | |||
| 92ab45a330 | |||
| 3d76b7cb2b | |||
| bf14883a04 | |||
| 9f7dc2bef7 | |||
| cf51c4120e | |||
| 0834b152fb | |||
| 8b98a7e8c3 | |||
| eab4d462f8 | |||
| c3916462f6 | |||
| 110780b18b | |||
| b09e29a03c | |||
| 7426c86eb8 | |||
| d1b154a10f | |||
| 9377157961 | |||
| 2c838f6459 | |||
| 5037ee0d37 | |||
| b26e8604f1 | |||
| 5fd07da764 | |||
| d76d89323c | |||
| aa82cb38e9 | |||
| 89e6839a48 | |||
| c906f30661 | |||
| 2a37467fa1 | |||
| f2b916534b | |||
| 9bc5b4c663 | |||
| 35b5c4ba1b | |||
| a853cdec5b | |||
| 3f4eb4c924 | |||
| 8d73cd502b | |||
| a2866e2e6a | |||
| e36bfbab38 | |||
| 35bb465b86 | |||
| c42f46ab7d | |||
| 7753fc6570 | |||
| c60b751694 | |||
| 683e564815 | |||
| 431aa8ada9 | |||
| dc4c1579d4 | |||
| 03e406eefc | |||
| 72550c3803 | |||
| 5d06929169 | |||
| 76503f95ed | |||
| fe95943305 | |||
| bb9a2ca87c | |||
| d35780eda0 | |||
| 0d3d7de6fc | |||
| 62e395f0e3 | |||
| 5260db7663 | |||
| 2ec5426035 | |||
| c9500a9c1d | |||
| f9d3665c88 | |||
| c27c51484a | |||
| f699b8f997 | |||
| a8a5dd3b44 | |||
| a68c1b15aa | |||
| 9113316b0e | |||
| 7178ab7da0 | |||
| 1fbb094c6f | |||
| 98c460cecd | |||
| 8b8052909f | |||
| 61407986b4 | |||
| 31a9eceda5 | |||
| fc66df1e60 | |||
| 178c9fb200 | |||
| 73b6bf4629 | |||
| 08a8514b7a | |||
| d24662b88a | |||
| 1e25f62ee6 | |||
| 5fbdf2bcec | |||
| e7aaa7c61e | |||
| fddb6fddc1 | |||
| a932acaa6b | |||
| 82312d4fff | |||
| f5bf45a2e5 | |||
| 3f9948a069 | |||
| ae5831d303 | |||
| 721b2bfa85 | |||
| 19038582d3 | |||
| 64b4aead15 | |||
| dd4287ca5d | |||
| e0c2490a14 | |||
| ec0cf996c9 | |||
| d9d48aad2d | |||
| adafa24b0a | |||
| 3e8bb99a2b | |||
| 77cba688ed | |||
| 54a546091a | |||
| 191c7bef6b | |||
| 31e6f8636f | |||
| 3b554bda26 | |||
| 15844040c2 | |||
| 7a3815b372 | |||
| 647b041d1a | |||
| 8122ad7bab | |||
| 2f0180b09e | |||
| acdfef7b14 | |||
| f96526ffc2 | |||
| fe9794706a | |||
| 75daede92f | |||
| fbdeb1778d | |||
| b275765545 | |||
| 0c1a27b787 | |||
| 84afeb41f3 | |||
| b2802a1351 | |||
| 0677fc1c4e | |||
| 2749da542c | |||
| e14baa7a3b | |||
| 0e7363e0b3 | |||
| d87a846ebc | |||
| 8b0dfc9fc4 | |||
| 34473a9c7f | |||
| b6507869cd | |||
| 9e2e994395 | |||
| d531ebcb57 | |||
| c4a8cbd15a | |||
| 99f929f36b | |||
| d787e41b20 | |||
| 6cf0ba1466 | |||
| 76d18a5776 | |||
| cd9ba1ed89 | |||
| 5defb25ac6 | |||
| fa2f96c2e3 | |||
| f93304e77f | |||
| 2c86187a1b | |||
| d6ac752538 | |||
| 97785bfc0f | |||
| b591277620 | |||
| 63137bb901 | |||
| d3654694d0 | |||
| 5244c0b48e | |||
| 3e7fac0d56 | |||
| 58f8226c7f | |||
| e4054abfdc | |||
| 9adf0e92bc | |||
| 1660145a08 | |||
| 7f79a6405b | |||
| 8595ff7842 | |||
| 58e207cd77 | |||
| 67ed8065db | |||
| 916227b4df | |||
| 3c5f25507b | |||
| 56aa4e7a9a | |||
| 384ee6eafb | |||
| 2ec3460967 | |||
| 7a38612620 | |||
| 2cd9260500 | |||
| 673c96ce97 | |||
| 4ebb688f4f | |||
| 5670205e2a | |||
| f984decd66 | |||
| a7daa5ae13 | |||
| 48b2e853a8 | |||
| b58d10a875 | |||
| 3ee7d7dc7f | |||
| 3176aebf9d | |||
| 9671e6750c | |||
| 742b6c6d15 | |||
| f5e90422f5 | |||
| ff7d3dc3a0 | |||
| 99797947aa | |||
| c12b9d719a | |||
| add89a03a6 | |||
| 467c1599c9 | |||
| 660ae8e0f3 | |||
| ba660ecde2 | |||
| a877209c8b | |||
| ee32d622ce | |||
| 6df1c79c22 | |||
| 12904932c4 | |||
| b6e8420aee | |||
| 91779b49c4 | |||
| e5f0e58931 | |||
| 9e982750ee | |||
| 5ca695cc12 | |||
| 13e29a697c | |||
| 6b1e9b8dfe | |||
| 590fbbef03 | |||
| a547e2df85 | |||
| e462aa97bf | |||
| 398cd1edfb | |||
| 494d0c8e02 | |||
| ffb9dd02fe | |||
| 15122da0e2 | |||
| e9c1cabac2 | |||
| ae6ff09494 | |||
| b13035cc91 | |||
| c081228439 | |||
| b5afe6bc38 | |||
| 2dee03aee5 | |||
| af59826a2f | |||
| f523177850 | |||
| 57c444b3ad | |||
| d5fda6e3b0 | |||
| 58443a022d | |||
| aa11db5f11 | |||
| 2e2be463f8 | |||
| 379c60b08d | |||
| 465605d616 | |||
| 703826886c | |||
| 9669a99d1a | |||
| c22a3f37a9 | |||
| 1be438f2a6 | |||
| 40160e24ab | |||
| 8a88684736 | |||
| af2fe6110c | |||
| 3ecaabc7fd | |||
| 1309b8ca97 | |||
| 07cf96ebf7 | |||
| b7b899cae6 | |||
| b7dbe5147a | |||
| 158a322e82 | |||
| ce829c2aef | |||
| 4814e7c9b2 | |||
| 866d0e7cb8 | |||
| 1748d4b739 | |||
| 5f5817ab05 | |||
| b117f67227 | |||
| 3b97797c8d | |||
| edca2d9891 | |||
| c00f4e48ba | |||
| 7076082ae6 | |||
| ea72bd9600 | |||
| f40131b4d9 | |||
| 9a3c80a348 | |||
| 7bcee4733a | |||
| 239badea9b | |||
| 316c00936f | |||
| 874fd43257 | |||
| 80916e6884 | |||
| 2ab0bf4b97 | |||
| b7a3be693b | |||
| beebc0a40f | |||
| 9848b54cac | |||
| deda48068c | |||
| ebcbb23226 | |||
| 7e9fc9b6af | |||
| 1a1abd8d05 | |||
| 125f674eae | |||
| 13cbd31040 | |||
| 0ff9aaf6c1 | |||
| 3110c37d02 | |||
| ec7460b4f2 | |||
| 1b4f4a936f | |||
| ed61a49169 | |||
| 389d558a3b | |||
| 44b084a75e | |||
| bb0e82fff1 | |||
| 5fc59f009c | |||
| ce82b9e48f | |||
| 09b1d98070 | |||
| dd463e246d | |||
| fa6d6bbceb | |||
| a92b4ea76f | |||
| 361fc53917 | |||
| 62d808becc | |||
| a85179aff3 | |||
| 5d6fbc1655 | |||
| 0b3083c75b | |||
| b4022cc487 | |||
| 50c250b808 | |||
| c037170faa | |||
| 7678ec3f9b | |||
| fc9c7b6cbc | |||
| 246b8c6e4a | |||
| 6789b63131 | |||
| 91f4ac602b | |||
| 690596b770 | |||
| 5c90451ea0 | |||
| 3406eba4ef | |||
| ddf9e7b302 | |||
| 95481e7ba7 | |||
| 79f34bdbc2 | |||
| b139e51041 | |||
| 74cd80e530 | |||
| ff8b87118d | |||
| 2223204eba | |||
| fc1f932cc0 | |||
| c0147f86a1 | |||
| 47c361d2f8 | |||
| 863d3f26b3 | |||
| 9ff940a0ef | |||
| 2a78dac60d | |||
| 27185de752 | |||
| dda2058d90 | |||
| a1cf9e3bf3 | |||
| 05ea111c47 | |||
| 8a1d3b86af | |||
| a612ce6659 | |||
| d50ca1b1ed | |||
| 60a0f81c7a | |||
| f9af8962f8 | |||
| 54172924c8 | |||
| 374f9b2f07 | |||
| ce2cdced61 | |||
| 910fc0f28f | |||
| 742ec37ca3 | |||
| 72165e5b77 | |||
| ff2d7551c7 | |||
| 903fb34b39 | |||
| 9c48f1ed22 | |||
| bfdcc7b9b6 | |||
| 4bf13a8207 | |||
| de27f7fc79 | |||
| 413e36b17a | |||
| 354d3842b5 | |||
| 9329cd5f13 | |||
| 87acd8fb07 | |||
| a53774721a | |||
| 0f0b011440 | |||
| faa3d172ab | |||
| 15c2ac2cac | |||
| fb9b5b6f4a | |||
| 4ecfbac85f | |||
| 9892d017b2 | |||
| e8d34bccbd | |||
| 33300673b7 | |||
| 869580206d | |||
| 278d6c0527 | |||
| e7ab0e0f9f | |||
| 6451fcd085 | |||
| b5f77eb12a | |||
| e3e0ac6ec7 | |||
| f1dd03548f | |||
| 28ad246bb4 | |||
| 577951b032 | |||
| 13f86c3489 | |||
| 6e0209112b | |||
| c77dae7a1a | |||
| a7b2ce32f7 | |||
| 0d4b3a133d | |||
| 02e928cf9b | |||
| 56a94ccd9e | |||
| baf056bae8 | |||
| 10d581d1cf | |||
| 138c405974 | |||
| 8fe3b450d2 | |||
| 210b7d8e00 | |||
| 1dcfb201c4 | |||
| f7e3de02ef | |||
| 4d14655c2b | |||
| 5e2890bd49 | |||
| 5be3944730 | |||
| 7641a90c34 | |||
| c43609e035 | |||
| 9e696bd6a3 | |||
| 60bec24083 | |||
| 5c79ef9396 | |||
| 6c5b147a39 | |||
| b82d6f70a4 | |||
| 700487a7c7 | |||
| 3dbaeef58c | |||
| 42ac5f0c1a | |||
| 05aee12652 | |||
| 24d9f2c140 | |||
| b71ca2b014 | |||
| 4a95eb0a12 | |||
| be799453aa | |||
| ea7786e8ca | |||
| 929cb0ed7d | |||
| 5f4eca3816 | |||
| 5614b4dafb | |||
| e5ad2e5267 | |||
| e12ec335a5 | |||
| 220231d8e3 | |||
| e6c5e3f28a | |||
| 42109a62a4 | |||
| b8cdec92c7 | |||
| 9c902025bf | |||
| b9977ea667 | |||
| 48b652bcbe | |||
| b4796a62ee | |||
| 35cda2e692 | |||
| f8d21e1431 | |||
| 9da9826b85 | |||
| fe95f2217c | |||
| 8351538873 | |||
| 112283e230 | |||
| b31ec214a5 | |||
| 114b929f8b | |||
| ddca9c56fc | |||
| 58371fa263 | |||
| 7e90fb6a57 | |||
| 591af2d074 | |||
| c229c87398 | |||
| e5999bfb1a | |||
| a4e278bfe7 | |||
| 9e7900da1e | |||
| 200de16440 | |||
| 536f949a1a | |||
| 97d1b3a506 | |||
| 71d5d2c669 | |||
| 6605adf669 | |||
| 458782bf67 | |||
| c2025c0425 | |||
| a9c9868957 | |||
| d1fb790818 | |||
| 1f403325ac | |||
| 04686df17a | |||
| feedaa37fa | |||
| a182e5d721 | |||
| 4bfb32f685 | |||
| 1a2197d7bf | |||
| e560045cfd | |||
| 8168341e9b | |||
| 1bbb67c452 | |||
| 150fcde0dc | |||
| 73e616df2a | |||
| f318d4f2a4 | |||
| e71095801f | |||
| dbeed36dec | |||
| 4de08a4672 | |||
| d7aa103f00 | |||
| cf81375b94 | |||
| 66f9a49ce9 | |||
| 58c9f20692 | |||
| ec0f3836ff | |||
| 4d54d87c3e | |||
| ee4f332ec5 | |||
| dc6da63e30 | |||
| 763360594d | |||
| 7e0a1683e6 | |||
| 2a24f906a9 | |||
| a79af259e9 | |||
| ce14c7a995 | |||
| 88a973cde5 | |||
| 1a830b751d | |||
| abc1b22193 | |||
| 0eff740523 | |||
| a1b7902944 | |||
| 7718303e71 | |||
| 103b432c84 | |||
| a45cc801d2 | |||
| 7634687057 | |||
| b3ecb96e36 | |||
| 907c1faf1e | |||
| 6c3126d950 | |||
| 6e89e69d08 | |||
| e66d0bd03a | |||
| 4a2ace1857 | |||
| 5189bfdef4 | |||
| 24f00a6c33 | |||
| 8e49892b21 | |||
| e557dc80b8 | |||
| 4eb8f9ca8a | |||
| f7ef5c1d57 | |||
| 00c9ad49df | |||
| 9777c5f49a | |||
| 0214745239 | |||
| 46a02ff15b | |||
| 6ad9586c84 | |||
| 78a5482267 | |||
| 7b0d846407 | |||
| f28cc45183 | |||
| e664e9737c | |||
| 13ba8d878c | |||
| 78d6c1b5be | |||
| feb294d552 | |||
| 70a8608749 | |||
| 7e3b586c1e | |||
| eff12e838c | |||
| 82631c5f94 | |||
| b58a8b1ee0 | |||
| 5b75b637b8 | |||
| 9ac9b75bc4 | |||
| ebaa999f92 | |||
| 6c558ee8bc | |||
| 31a2b892d8 | |||
| 9daa4e2a85 | |||
| 3e2fcd67b2 | |||
| 241b71852e | |||
| 97294ef2fd | |||
| 549698b1e0 | |||
| c486b7b41c | |||
| f078ecbc8f | |||
| 2bb5f035af | |||
| cca5c06679 | |||
| 0897357993 | |||
| 2c1fbea531 | |||
| 13e6262659 | |||
| 1d19a5ec0f | |||
| 77c7ed0e93 | |||
| b052621f67 | |||
| 8f1031586f | |||
| 79a1c0574b | |||
| 489f92e0e5 | |||
| 737c4223ef | |||
| db0da033eb | |||
| 6a9f1209df | |||
| 34dda7cc7f | |||
| 4d36e73230 | |||
| 709e09e1c3 | |||
| aa4af94c69 | |||
| b84d59c5f0 | |||
| 33c71c3a4b | |||
| c8e4d5de7f | |||
| 156cea5b45 | |||
| 8450114098 | |||
| 24277fbb97 | |||
| 66bb255fcd | |||
| 5054806ec1 | |||
| 430e496050 | |||
| d4f72a5bfb | |||
| f8aae79a72 | |||
| 9cd80a7b5c | |||
| 772b45c745 | |||
| 5f280837a6 | |||
| 6f52e90065 | |||
| 771528ab13 | |||
| b32121a5d1 | |||
| 2e36689df3 | |||
| 2df6114bc4 | |||
| a644ac6d2c | |||
| de11b5b9b5 | |||
| d83d004ccd | |||
| 43e13dbd4d | |||
| 8a391e33ae | |||
| 477b1ed6cf | |||
| 04ad93e6fd | |||
| 65e92eca49 | |||
| 9039904f4c | |||
| 69214ea671 | |||
| b023995538 | |||
| 793369791a | |||
| 7a8ea7e78b | |||
| 854ca32f10 | |||
| d7ac861d3b | |||
| 89b40b225c | |||
| 4bf448be25 | |||
| fa48020a52 | |||
| 1ef7cae41b | |||
| 2d3837bec7 | |||
| 498c2e60fd | |||
| ceb6b8680a | |||
| b264b9548f | |||
| d98a9f2583 | |||
| 226a9a5fa6 | |||
| 25c311eaf6 | |||
| cc9c97e0dc | |||
| e70165039c | |||
| c1de91aca4 | |||
| b55b90bfb4 | |||
| 8da95b6f1b | |||
| b91baae09d | |||
| 13724569ec | |||
| 4a6eb5eb45 | |||
| 6927d0e091 | |||
| b5dbced938 | |||
| f2d5ff5bf2 | |||
| 3d60686c0c | |||
| 45488e0ffa | |||
| f67d60496a | |||
| 18579534ea | |||
| 47374a33fc | |||
| 0fcafbece8 | |||
| 96bb4bf38a | |||
| fd142c29d9 | |||
| ebc5f00efe | |||
| ea320d3464 | |||
| 5687a00e4e | |||
| b18114e19e | |||
| 02a9c3be6c | |||
| 4fce59f274 | |||
| fb7299800f | |||
| c046630c33 | |||
| a30364c1f9 | |||
| 766526e114 | |||
| 50e18938a9 | |||
| f3af1840cb | |||
| 467c27a1f9 | |||
| 3f5dd18bd4 | |||
| 40431251cb | |||
| 82cf3a8043 | |||
| 03b2c2577c | |||
| 0663c5bd52 | |||
| 35981c8b71 | |||
| 8fe8951a8d | |||
| fdca8ec418 | |||
| 45cf827c8f | |||
| 00cb3eb24b | |||
| c23a8c7833 | |||
| e1941442d4 | |||
| 49c328a892 | |||
| 0935802f1e | |||
| 19fd425928 | |||
| 167d1df699 | |||
| 7ed2bbeb11 | |||
| 9101193242 | |||
| 571a566399 | |||
| 3c6518ddbf | |||
| 4e7948b47a | |||
| ba8931829b | |||
| 61eaa6ec64 | |||
| c5e7c0e436 | |||
| e26390ca46 | |||
| a6477d5933 | |||
| 5cba88ea7c | |||
| 5fc9b17518 | |||
| fa90c180ee | |||
| 5610880003 | |||
| e7febf4fbb | |||
| aca3193efb | |||
| b97f6626b6 | |||
| f93ecf8783 | |||
| 0487c9441f | |||
| a955cbfa49 | |||
| 8c97b49886 | |||
| 2152b320c5 | |||
| d6d60b4d6c | |||
| d6c831bd3d | |||
| 97b364cb25 | |||
| 03f4569dc3 | |||
| 8c94833b72 | |||
| 9fda8b5193 | |||
| e4e33c743e | |||
| 87f9477b10 | |||
| 9959d9ece8 | |||
| 27b9775073 | |||
| 766c24b2e6 | |||
| 7179fdd550 | |||
| c887c4cbd5 | |||
| e18257f0e5 | |||
| 8431f62ebb | |||
| f091b73e69 | |||
| ce6fbbea94 | |||
| aea5da0ef6 | |||
| 3a75159832 | |||
| 1ebf5e3d03 | |||
| dc2647cd3d | |||
| 86896408b0 | |||
| 53cb173663 | |||
| d59c58bc95 | |||
| ddd25def01 | |||
| 8c6012a4af | |||
| 42deca50c2 | |||
| d685ae73b4 | |||
| 4021f95261 | |||
| 7dd0c1730a | |||
| f92fe15897 | |||
| 3fe8c56736 | |||
| 60965bd7e5 | |||
| 0e0e441b33 | |||
| b4a41aa542 | |||
| db6e26bb8c | |||
| 88baa3865e | |||
| 74f49f99f9 | |||
| 7065b75bfd | |||
| 7959e8b764 | |||
| 52bdd1b834 | |||
| 7a3fe48ba4 | |||
| 7cd418d38e | |||
| cd80019eec | |||
| d552861346 | |||
| 10f76dc5da | |||
| 5b142788d2 | |||
| eaa836e8ca | |||
| 42eae4634f | |||
| 8acc5cb60f | |||
| 31a051b677 | |||
| 8f9c74e9f1 | |||
| 975903ae17 | |||
| 4efcaa43c8 | |||
| 330be18ec5 | |||
| f1f8122120 | |||
| 297eded261 | |||
| 0e07f2e15d | |||
| 82b46f556d | |||
| 8f66fe6392 | |||
| 3a00f13436 | |||
| c6549117a2 | |||
| ed1d189e10 | |||
| dfe1273d14 | |||
| 91a222c66d | |||
| 0503bdb316 | |||
| 930ba003f8 | |||
| d54005059c | |||
| d7c85ad916 | |||
| c1a3021771 | |||
| d049e81b10 | |||
| c43b6dcc75 | |||
| 367cfab4e6 | |||
| 69adf8c384 | |||
| 73ca8e5834 | |||
| b088291f14 | |||
| a2ae01cc0f | |||
| da417aa56d | |||
| d4315bbf6b | |||
| 3fa344c037 | |||
| 7cc047455e | |||
| d726597737 | |||
| 2309450a76 | |||
| ea5eea2424 | |||
| 746f6e0eb3 | |||
| 7441d8cc0c | |||
| ccf9387d57 | |||
| d4cefb6289 | |||
| 259d1ecd1d | |||
| 191070123d | |||
| afb7b377f2 | |||
| af30140621 | |||
| ac2842ff1e | |||
| 892ee473d9 | |||
| 40d9765123 | |||
| 2818a000aa | |||
| fb5d8e58ff | |||
| 5a7d1ecffc | |||
| d056a0a3d8 | |||
| 7a079adc8f | |||
| b8518ffe65 | |||
| 9654ee0848 | |||
| 7ecd211163 | |||
| 05f78b3b52 | |||
| f5fc8f2928 | |||
| 9a8949f022 | |||
| 3adcc4c86a | |||
| 47e7963e50 | |||
| 88af7bb48b | |||
| 0d241e1114 | |||
| f750a442f7 | |||
| 003853e702 | |||
| a284ad4092 | |||
| 47f82e4408 | |||
| 5cd2126a6a | |||
| 29c353c553 | |||
| 808a8aedab | |||
| 74474a6d63 | |||
| d16dcf642e | |||
| 7dd14e5d1c | |||
| 866fe27e78 | |||
| d1f56f732e | |||
| 0e39dcd135 | |||
| 345ff2196a | |||
| 2c176e02ae | |||
| 63485b3029 | |||
| f59b564507 | |||
| 2068678b8c | |||
| cc66a9a5e3 | |||
| 5de1563997 | |||
| ac5a4477ad | |||
| c049f60d4a | |||
| b5ce4f0427 | |||
| ac12b6d332 | |||
| 5819b7a78c | |||
| 5bf1a3d6dc | |||
| 3f8db3d597 | |||
| a50013fd99 | |||
| 2978053d16 | |||
| 2c760372d6 | |||
| 2680043bc6 | |||
| 8db451f652 | |||
| 430d3d74f6 | |||
| 7ee1879ed4 | |||
| d14fcfd24a | |||
| 27927463a1 | |||
| fcb6df45e5 | |||
| a7927c13fd | |||
| 339c8f0133 | |||
| bce602eb4e | |||
| 939cbd7057 | |||
| 12623c99b6 | |||
| 2655d61d70 | |||
| fcb05b4c82 | |||
| 806bae1ee7 | |||
| f6fcff3602 | |||
| 244b356a37 | |||
| 49f33f6438 | |||
| 93afb40cd4 | |||
| 9c1f853d58 | |||
| 7d09ab8915 | |||
| d9db819e23 | |||
| 37716d55ed | |||
| 4399684582 | |||
| 44b4fc5f50 | |||
| f4dad9f639 | |||
| 8740e4e94a | |||
| c0a279e808 | |||
| 96e400fee5 | |||
| 72ba26679b | |||
| ea47760bd8 | |||
| 70dfe4dc96 | |||
| a8e9e0b916 | |||
| 31de2953a3 | |||
| fd5c28dc52 | |||
| 42aa1f3f33 | |||
| 2110e35fd6 | |||
| b5d33a656f | |||
| fe56138142 | |||
| c110eb92f8 | |||
| 8f8b884430 | |||
| a8cd1eb996 | |||
| 29e595e5d4 | |||
| c232780081 | |||
| 7c816de442 | |||
| 8677b7d698 | |||
| 33bef689c1 | |||
| fcbe63eaad | |||
| 5727922106 | |||
| 5dc5e29b9c | |||
| 3deffcdb1e | |||
| c9ae1d1ee5 | |||
| daadcf36c0 | |||
| 823b679232 | |||
| 6c28ac260c | |||
| 49c34dfd36 | |||
| 4106477e7f | |||
| 7ac6ca7311 | |||
| 11a974da21 | |||
| 09dc9854cd | |||
| 442fcc02f7 | |||
| b6a585348a | |||
| c582f178b7 | |||
| 4cec90a260 | |||
| 0e48f7f245 | |||
| 392773ccb2 | |||
| bf32922e5a | |||
| 797691f908 | |||
| e5ea4fad78 | |||
| 5880de186b | |||
| 992928304f | |||
| ae1262a241 | |||
| c79f221192 | |||
| 8ce5679813 | |||
| eb03625626 | |||
| 87d577e023 | |||
| 2ef6de928d | |||
| 29e131df43 | |||
| cfd07aafff | |||
| a178eb1bc8 | |||
| 8737ead008 | |||
| 90921981be | |||
| acb19068d0 | |||
| d74c4e90d4 | |||
| 85ca8cb90c | |||
| c3ea36304b | |||
| 1b5642604b | |||
| 07c33eff43 | |||
| 4eb7b950c8 | |||
| dc65d0ae9d | |||
| b18b99eb14 | |||
| 5680be70ae | |||
| c77e7e60fc | |||
| d74c6ace24 | |||
| f1b67730fa | |||
| 92a1e74b20 | |||
| c914d67cda | |||
| f35f8d06ea | |||
| d2709a5389 | |||
| 928c575c6f | |||
| 3051c9d002 | |||
| 34c09f33da | |||
| cf3282d103 | |||
| 32d9fd0b26 | |||
| c6e79c84de | |||
| 8d6dde7825 | |||
| d12c00bdc3 | |||
| ba39d3d5d7 | |||
| f3948e001f | |||
| 7fa71e3267 | |||
| 517fb9a023 | |||
| 9ac417fa88 | |||
| d2a92c6bde | |||
| d79e90f078 | |||
| 9b4cd0cd0f | |||
| 140a50f641 | |||
| 5645d9747b | |||
| 3fbb031745 | |||
| 4c8f6a7e42 | |||
| 7df276d219 | |||
| 0ee0138325 | |||
| 77f06856b6 | |||
| 65c451cb38 | |||
| 251aafccca | |||
| c058625959 | |||
| cdd04f7055 | |||
| 542ab0f886 | |||
| e525b46f12 | |||
| b9b4466d0d | |||
| c3fff251a9 | |||
| 45a9e0ae0c | |||
| 489a4cd1cf | |||
| 2a2b2ef834 | |||
| 2e2eeb43a6 | |||
| 7f3148865c | |||
| bb9c7f2dd9 | |||
| 9036d2d6a8 | |||
| c061b47c57 | |||
| f73f154ec2 | |||
| 64b6606824 | |||
| 42a7a09eea | |||
| 091c545c4f | |||
| a6ba41e078 | |||
| f85949bde0 | |||
| 2f871ad143 | |||
| c8ea2d5b1f | |||
| b131fb1fe2 | |||
| 413d0d6a24 | |||
| 0a2d73fd60 | |||
| ce4999268a | |||
| 633ceb9bb1 | |||
| 64374bda5b | |||
| 772ad4f715 | |||
| bdacee476d | |||
| 10f82b4bea | |||
| 8c5f252edb | |||
| 8b9f471d27 | |||
| a64f9bbfe0 | |||
| 42ad49f5b7 | |||
| 2b0f8a9482 | |||
| af4422c42a | |||
| 0311612ce9 | |||
| 5fc03449c8 | |||
| 4fab578b43 | |||
| 661b76615b | |||
| dcfc70e8ed | |||
| 63fdd9fe0b | |||
| 910956b0ec | |||
| d3ac8fd87d | |||
| e98e00558a | |||
| 3ddf0b9722 | |||
| 2acae8300f | |||
| dbe7892e03 | |||
| 28c5181dfe | |||
| 15e9885197 | |||
| 8505a4ddc3 | |||
| 6051266924 | |||
| 070e28e203 | |||
| 834924248f | |||
| 98dfa7d24f | |||
| 338c0a8a69 | |||
| a874c0894a | |||
| f382a3bb7e | |||
| 76e69cc8de | |||
| fde412b240 | |||
| bfc52a2342 | |||
| deeebbfcb7 | |||
| 1ee7280c4c | |||
| cde49d3d2b | |||
| e738920156 | |||
| 0065e554e0 | |||
| 5a3e4e43d8 | |||
| d9a5c56930 | |||
| 51fb590c0e | |||
| 5577a61090 | |||
| 5e909c73d7 | |||
| e0c9f30efa | |||
| 515548a47a | |||
| aa667ee396 | |||
| 7d6b313312 | |||
| a84a693327 | |||
| 99afb4b750 | |||
| 21f135ba76 | |||
| d7ee7b589f |
+10
-1
@@ -29,7 +29,7 @@ Matthew Hodgson <matthew at matrix.org>
|
||||
|
||||
Emmanuel Rohee <manu at matrix.org>
|
||||
* Supporting iOS clients (testability and fallback registration)
|
||||
|
||||
|
||||
Turned to Dust <dwinslow86 at gmail.com>
|
||||
* ArchLinux installation instructions
|
||||
|
||||
@@ -51,3 +51,12 @@ Steven Hammerton <steven.hammerton at openmarket.com>
|
||||
|
||||
Mads Robin Christensen <mads at v42 dot dk>
|
||||
* CentOS 7 installation instructions.
|
||||
|
||||
Florent Violleau <floviolleau at gmail dot com>
|
||||
* Add Raspberry Pi installation instructions and general troubleshooting items
|
||||
|
||||
Niklas Riekenbrauck <nikriek at gmail dot.com>
|
||||
* Add JWT support for registration and login
|
||||
|
||||
Christoph Witzany <christoph at web.crofting.com>
|
||||
* Add LDAP support for authentication
|
||||
|
||||
+190
@@ -1,3 +1,193 @@
|
||||
Changes in synapse v0.14.0 (2016-03-30)
|
||||
=======================================
|
||||
|
||||
No changes from v0.14.0-rc2
|
||||
|
||||
Changes in synapse v0.14.0-rc2 (2016-03-23)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add published room list API (PR #657)
|
||||
|
||||
Changes:
|
||||
|
||||
* Change various caches to consume less memory (PR #656, #658, #660, #662,
|
||||
#663, #665)
|
||||
* Allow rooms to be published without requiring an alias (PR #664)
|
||||
* Intern common strings in caches to reduce memory footprint (#666)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix reject invites over federation (PR #646)
|
||||
* Fix bug where registration was not idempotent (PR #649)
|
||||
* Update aliases event after deleting aliases (PR #652)
|
||||
* Fix unread notification count, which was sometimes wrong (PR #661)
|
||||
|
||||
Changes in synapse v0.14.0-rc1 (2016-03-14)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add event_id to response to state event PUT (PR #581)
|
||||
* Allow guest users access to messages in rooms they have joined (PR #587)
|
||||
* Add config for what state is included in a room invite (PR #598)
|
||||
* Send the inviter's member event in room invite state (PR #607)
|
||||
* Add error codes for malformed/bad JSON in /login (PR #608)
|
||||
* Add support for changing the actions for default rules (PR #609)
|
||||
* Add environment variable SYNAPSE_CACHE_FACTOR, default it to 0.1 (PR #612)
|
||||
* Add ability for alias creators to delete aliases (PR #614)
|
||||
* Add profile information to invites (PR #624)
|
||||
|
||||
Changes:
|
||||
|
||||
* Enforce user_id exclusivity for AS registrations (PR #572)
|
||||
* Make adding push rules idempotent (PR #587)
|
||||
* Improve presence performance (PR #582, #586)
|
||||
* Change presence semantics for ``last_active_ago`` (PR #582, #586)
|
||||
* Don't allow ``m.room.create`` to be changed (PR #596)
|
||||
* Add 800x600 to default list of valid thumbnail sizes (PR #616)
|
||||
* Always include kicks and bans in full /sync (PR #625)
|
||||
* Send history visibility on boundary changes (PR #626)
|
||||
* Register endpoint now returns a refresh_token (PR #637)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where we returned incorrect state in /sync (PR #573)
|
||||
* Always return a JSON object from push rule API (PR #606)
|
||||
* Fix bug where registering without a user id sometimes failed (PR #610)
|
||||
* Report size of ExpiringCache in cache size metrics (PR #611)
|
||||
* Fix rejection of invites to empty rooms (PR #615)
|
||||
* Fix usage of ``bcrypt`` to not use ``checkpw`` (PR #619)
|
||||
* Pin ``pysaml2`` dependency (PR #634)
|
||||
* Fix bug in ``/sync`` where timeline order was incorrect for backfilled events
|
||||
(PR #635)
|
||||
|
||||
Changes in synapse v0.13.3 (2016-02-11)
|
||||
=======================================
|
||||
|
||||
* Fix bug where ``/sync`` would occasionally return events in the wrong room.
|
||||
|
||||
Changes in synapse v0.13.2 (2016-02-11)
|
||||
=======================================
|
||||
|
||||
* Fix bug where ``/events`` would fail to skip some events if there had been
|
||||
more events than the limit specified since the last request (PR #570)
|
||||
|
||||
Changes in synapse v0.13.1 (2016-02-10)
|
||||
=======================================
|
||||
|
||||
* Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to
|
||||
pull in the fix for SYWEB-361 so that the default client can display
|
||||
HTML messages again(!)
|
||||
|
||||
Changes in synapse v0.13.0 (2016-02-10)
|
||||
=======================================
|
||||
|
||||
This version includes an upgrade of the schema, specifically adding an index to
|
||||
the ``events`` table. This may cause synapse to pause for several minutes the
|
||||
first time it is started after the upgrade.
|
||||
|
||||
Changes:
|
||||
|
||||
* Improve general performance (PR #540, #543. #544, #54, #549, #567)
|
||||
* Change guest user ids to be incrementing integers (PR #550)
|
||||
* Improve performance of public room list API (PR #552)
|
||||
* Change profile API to omit keys rather than return null (PR #557)
|
||||
* Add ``/media/r0`` endpoint prefix, which is equivalent to ``/media/v1/``
|
||||
(PR #595)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug with upgrading guest accounts where it would fail if you opened the
|
||||
registration email on a different device (PR #547)
|
||||
* Fix bug where unread count could be wrong (PR #568)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.12.1-rc1 (2016-01-29)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add unread notification counts in ``/sync`` (PR #456)
|
||||
* Add support for inviting 3pids in ``/createRoom`` (PR #460)
|
||||
* Add ability for guest accounts to upgrade (PR #462)
|
||||
* Add ``/versions`` API (PR #468)
|
||||
* Add ``event`` to ``/context`` API (PR #492)
|
||||
* Add specific error code for invalid user names in ``/register`` (PR #499)
|
||||
* Add support for push badge counts (PR #507)
|
||||
* Add support for non-guest users to peek in rooms using ``/events`` (PR #510)
|
||||
|
||||
Changes:
|
||||
|
||||
* Change ``/sync`` so that guest users only get rooms they've joined (PR #469)
|
||||
* Change to require unbanning before other membership changes (PR #501)
|
||||
* Change default push rules to notify for all messages (PR #486)
|
||||
* Change default push rules to not notify on membership changes (PR #514)
|
||||
* Change default push rules in one to one rooms to only notify for events that
|
||||
are messages (PR #529)
|
||||
* Change ``/sync`` to reject requests with a ``from`` query param (PR #512)
|
||||
* Change server manhole to use SSH rather than telnet (PR #473)
|
||||
* Change server to require AS users to be registered before use (PR #487)
|
||||
* Change server not to start when ASes are invalidly configured (PR #494)
|
||||
* Change server to require ID and ``as_token`` to be unique for AS's (PR #496)
|
||||
* Change maximum pagination limit to 1000 (PR #497)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where ``/sync`` didn't return when something under the leave key
|
||||
changed (PR #461)
|
||||
* Fix bug where we returned smaller rather than larger than requested
|
||||
thumbnails when ``method=crop`` (PR #464)
|
||||
* Fix thumbnails API to only return cropped thumbnails when asking for a
|
||||
cropped thumbnail (PR #475)
|
||||
* Fix bug where we occasionally still logged access tokens (PR #477)
|
||||
* Fix bug where ``/events`` would always return immediately for guest users
|
||||
(PR #480)
|
||||
* Fix bug where ``/sync`` unexpectedly returned old left rooms (PR #481)
|
||||
* Fix enabling and disabling push rules (PR #498)
|
||||
* Fix bug where ``/register`` returned 500 when given unicode username
|
||||
(PR #513)
|
||||
|
||||
Changes in synapse v0.12.0 (2016-01-04)
|
||||
=======================================
|
||||
|
||||
* Expose ``/login`` under ``r0`` (PR #459)
|
||||
|
||||
Changes in synapse v0.12.0-rc3 (2015-12-23)
|
||||
===========================================
|
||||
|
||||
* Allow guest accounts access to ``/sync`` (PR #455)
|
||||
* Allow filters to include/exclude rooms at the room level
|
||||
rather than just from the components of the sync for each
|
||||
room. (PR #454)
|
||||
* Include urls for room avatars in the response to ``/publicRooms`` (PR #453)
|
||||
* Don't set a identicon as the avatar for a user when they register (PR #450)
|
||||
* Add a ``display_name`` to third-party invites (PR #449)
|
||||
* Send more information to the identity server for third-party invites so that
|
||||
it can send richer messages to the invitee (PR #446)
|
||||
* Cache the responses to ``/initialSync`` for 5 minutes. If a client
|
||||
retries a request to ``/initialSync`` before the a response was computed
|
||||
to the first request then the same response is used for both requests
|
||||
(PR #457)
|
||||
* Fix a bug where synapse would always request the signing keys of
|
||||
remote servers even when the key was cached locally (PR #452)
|
||||
* Fix 500 when pagination search results (PR #447)
|
||||
* Fix a bug where synapse was leaking raw email address in third-party invites
|
||||
(PR #448)
|
||||
|
||||
Changes in synapse v0.12.0-rc2 (2015-12-14)
|
||||
===========================================
|
||||
|
||||
* Add caches for whether rooms have been forgotten by a user (PR #434)
|
||||
* Remove instructions to use ``--process-dependency-link`` since all of the
|
||||
dependencies of synapse are on PyPI (PR #436)
|
||||
* Parallelise the processing of ``/sync`` requests (PR #437)
|
||||
* Fix race updating presence in ``/events`` (PR #444)
|
||||
* Fix bug back-populating search results (PR #441)
|
||||
* Fix bug calculating state in ``/sync`` requests (PR #442)
|
||||
|
||||
Changes in synapse v0.12.0-rc1 (2015-12-10)
|
||||
===========================================
|
||||
|
||||
|
||||
@@ -21,5 +21,6 @@ recursive-include synapse/static *.html
|
||||
recursive-include synapse/static *.js
|
||||
|
||||
exclude jenkins.sh
|
||||
exclude jenkins*.sh
|
||||
|
||||
prune demo/etc
|
||||
|
||||
+110
-14
@@ -104,7 +104,7 @@ Installing prerequisites on Ubuntu or Debian::
|
||||
|
||||
sudo apt-get install build-essential python2.7-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev
|
||||
libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
|
||||
|
||||
Installing prerequisites on ArchLinux::
|
||||
|
||||
@@ -118,19 +118,27 @@ Installing prerequisites on CentOS 7::
|
||||
python-virtualenv libffi-devel openssl-devel
|
||||
sudo yum groupinstall "Development Tools"
|
||||
|
||||
|
||||
Installing prerequisites on Mac OS X::
|
||||
|
||||
xcode-select --install
|
||||
sudo easy_install pip
|
||||
sudo pip install virtualenv
|
||||
|
||||
Installing prerequisites on Raspbian::
|
||||
|
||||
sudo apt-get install build-essential python2.7-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev
|
||||
sudo pip install --upgrade pip
|
||||
sudo pip install --upgrade ndg-httpsclient
|
||||
sudo pip install --upgrade virtualenv
|
||||
|
||||
To install the synapse homeserver run::
|
||||
|
||||
virtualenv -p python2.7 ~/.synapse
|
||||
source ~/.synapse/bin/activate
|
||||
pip install --upgrade setuptools
|
||||
pip install --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
||||
pip install https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
This installs synapse, along with the libraries it uses, into a virtual
|
||||
environment under ``~/.synapse``. Feel free to pick a different directory
|
||||
@@ -141,10 +149,9 @@ In case of problems, please see the _Troubleshooting section below.
|
||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
||||
|
||||
Another alternative is to install via apt from http://matrix.org/packages/debian/.
|
||||
Note that these packages do not include a client - choose one from
|
||||
https://matrix.org/blog/try-matrix-now/ (or build your own with
|
||||
https://github.com/matrix-org/matrix-js-sdk/).
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
for details.
|
||||
|
||||
To set up your homeserver, run (in your virtualenv, as before)::
|
||||
|
||||
@@ -163,8 +170,7 @@ identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. If, for whatever reason, you do need to
|
||||
change your Home Server's keys, you may find that other Home Servers have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the <server name>.signing.key file (the second word, which by default is
|
||||
, 'auto') to something different.
|
||||
key in the <server name>.signing.key file (the second word) to something different.
|
||||
|
||||
By default, registration of new users is disabled. You can either enable
|
||||
registration in the config by specifying ``enable_registration: true``
|
||||
@@ -217,6 +223,19 @@ For information on how to install and use PostgreSQL, please see
|
||||
Platform Specific Instructions
|
||||
==============================
|
||||
|
||||
Debian
|
||||
------
|
||||
|
||||
Matrix provides official Debian packages via apt from http://matrix.org/packages/debian/.
|
||||
Note that these packages do not include a client - choose one from
|
||||
https://matrix.org/blog/try-matrix-now/ (or build your own with one of our SDKs :)
|
||||
|
||||
Fedora
|
||||
------
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
|
||||
ArchLinux
|
||||
---------
|
||||
|
||||
@@ -235,8 +254,7 @@ pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 )::
|
||||
You also may need to explicitly specify python 2.7 again during the install
|
||||
request::
|
||||
|
||||
pip2.7 install --process-dependency-links \
|
||||
https://github.com/matrix-org/synapse/tarball/master
|
||||
pip2.7 install https://github.com/matrix-org/synapse/tarball/master
|
||||
|
||||
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
||||
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||
@@ -256,6 +274,20 @@ During setup of Synapse you need to call python2.7 directly again::
|
||||
|
||||
...substituting your host and domain name as appropriate.
|
||||
|
||||
FreeBSD
|
||||
-------
|
||||
|
||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||
|
||||
- Ports: ``cd /usr/ports/net/py-matrix-synapse && make install clean``
|
||||
- Packages: ``pkg install py27-matrix-synapse``
|
||||
|
||||
NixOS
|
||||
-----
|
||||
|
||||
Robin Lambertz has packaged Synapse for NixOS at:
|
||||
https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix
|
||||
|
||||
Windows Install
|
||||
---------------
|
||||
Synapse can be installed on Cygwin. It requires the following Cygwin packages:
|
||||
@@ -295,12 +327,23 @@ Troubleshooting
|
||||
Troubleshooting Installation
|
||||
----------------------------
|
||||
|
||||
Synapse requires pip 1.7 or later, so if your OS provides too old a version and
|
||||
you get errors about ``error: no such option: --process-dependency-links`` you
|
||||
Synapse requires pip 1.7 or later, so if your OS provides too old a version you
|
||||
may need to manually upgrade it::
|
||||
|
||||
sudo pip install --upgrade pip
|
||||
|
||||
Installing may fail with ``Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)``.
|
||||
You can fix this by manually upgrading pip and virtualenv::
|
||||
|
||||
sudo pip install --upgrade virtualenv
|
||||
|
||||
You can next rerun ``virtualenv -p python2.7 synapse`` to update the virtual env.
|
||||
|
||||
Installing may fail during installing virtualenv with ``InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.``
|
||||
You can fix this by manually installing ndg-httpsclient::
|
||||
|
||||
pip install --upgrade ndg-httpsclient
|
||||
|
||||
Installing may fail with ``mock requires setuptools>=17.1. Aborting installation``.
|
||||
You can fix this by upgrading setuptools::
|
||||
|
||||
@@ -495,7 +538,6 @@ Logging In To An Existing Account
|
||||
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
|
||||
the form and click the Login button.
|
||||
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
|
||||
@@ -516,6 +558,43 @@ we are running a single identity server (https://matrix.org) at the current
|
||||
time.
|
||||
|
||||
|
||||
URL Previews
|
||||
============
|
||||
|
||||
Synapse 0.15.0 introduces an experimental new API for previewing URLs at
|
||||
/_matrix/media/r0/preview_url. This is disabled by default. To turn it on
|
||||
you must enable the `url_preview_enabled: True` config parameter and explicitly
|
||||
specify the IP ranges that Synapse is not allowed to spider for previewing in
|
||||
the `url_preview_ip_range_blacklist` configuration parameter. This is critical
|
||||
from a security perspective to stop arbitrary Matrix users spidering 'internal'
|
||||
URLs on your network. At the very least we recommend that your loopback and
|
||||
RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional lxml and netaddr python dependencies to be
|
||||
installed.
|
||||
|
||||
|
||||
Password reset
|
||||
==============
|
||||
|
||||
If a user has registered an email address to their account using an identity
|
||||
server, they can request a password-reset token via clients such as Vector.
|
||||
|
||||
A manual password reset can be done via direct database access as follows.
|
||||
|
||||
First calculate the hash of the new password:
|
||||
|
||||
$ source ~/.synapse/bin/activate
|
||||
$ ./scripts/hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
Then update the `users` table in the database:
|
||||
|
||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
WHERE name='@test:test.com';
|
||||
|
||||
Where's the spec?!
|
||||
==================
|
||||
|
||||
@@ -536,3 +615,20 @@ Building internal API documentation::
|
||||
|
||||
python setup.py build_sphinx
|
||||
|
||||
|
||||
|
||||
Halp!! Synapse eats all my RAM!
|
||||
===============================
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1.0 will max out
|
||||
at around 3-4GB of resident memory - this is what we currently run the
|
||||
matrix.org on. The default setting is currently 0.1, which is probably
|
||||
around a ~700MB footprint. You can dial it down further to 0.02 if
|
||||
desired, which targets roughly ~512MB. Conversely you can dial it up if
|
||||
you need performance for lots of users and have a box with a lot of RAM.
|
||||
|
||||
|
||||
@@ -30,6 +30,14 @@ running:
|
||||
python synapse/python_dependencies.py | xargs -n1 pip install
|
||||
|
||||
|
||||
Upgrading to v0.15.0
|
||||
====================
|
||||
|
||||
If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
|
||||
then you have to explicitly enable it in the config and update your dependencies
|
||||
dependencies. See README.rst for details.
|
||||
|
||||
|
||||
Upgrading to v0.11.0
|
||||
====================
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -0,0 +1,151 @@
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import pydot
|
||||
import cgi
|
||||
import simplejson as json
|
||||
import datetime
|
||||
import argparse
|
||||
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
|
||||
def make_graph(file_name, room_id, file_prefix, limit):
|
||||
print "Reading lines"
|
||||
with open(file_name) as f:
|
||||
lines = f.readlines()
|
||||
|
||||
print "Read lines"
|
||||
|
||||
events = [FrozenEvent(json.loads(line)) for line in lines]
|
||||
|
||||
print "Loaded events."
|
||||
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
print "Sorted events"
|
||||
|
||||
if limit:
|
||||
events = events[-int(limit):]
|
||||
|
||||
node_map = {}
|
||||
|
||||
graph = pydot.Dot(graph_name="Test")
|
||||
|
||||
for event in events:
|
||||
t = datetime.datetime.fromtimestamp(
|
||||
float(event.origin_server_ts) / 1000
|
||||
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
||||
|
||||
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
|
||||
content = content.replace("\n", "<br/>\n")
|
||||
|
||||
print content
|
||||
content = []
|
||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||
if value is None:
|
||||
value = "<null>"
|
||||
elif isinstance(value, basestring):
|
||||
pass
|
||||
else:
|
||||
value = json.dumps(value)
|
||||
|
||||
content.append(
|
||||
"<b>%s</b>: %s," % (
|
||||
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
||||
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
||||
)
|
||||
)
|
||||
|
||||
content = "<br/>\n".join(content)
|
||||
|
||||
print content
|
||||
|
||||
label = (
|
||||
"<"
|
||||
"<b>%(name)s </b><br/>"
|
||||
"Type: <b>%(type)s </b><br/>"
|
||||
"State key: <b>%(state_key)s </b><br/>"
|
||||
"Content: <b>%(content)s </b><br/>"
|
||||
"Time: <b>%(time)s </b><br/>"
|
||||
"Depth: <b>%(depth)s </b><br/>"
|
||||
">"
|
||||
) % {
|
||||
"name": event.event_id,
|
||||
"type": event.type,
|
||||
"state_key": event.get("state_key", None),
|
||||
"content": content,
|
||||
"time": t,
|
||||
"depth": event.depth,
|
||||
}
|
||||
|
||||
node = pydot.Node(
|
||||
name=event.event_id,
|
||||
label=label,
|
||||
)
|
||||
|
||||
node_map[event.event_id] = node
|
||||
graph.add_node(node)
|
||||
|
||||
print "Created Nodes"
|
||||
|
||||
for event in events:
|
||||
for prev_id, _ in event.prev_events:
|
||||
try:
|
||||
end_node = node_map[prev_id]
|
||||
except:
|
||||
end_node = pydot.Node(
|
||||
name=prev_id,
|
||||
label="<<b>%s</b>>" % (prev_id,),
|
||||
)
|
||||
|
||||
node_map[prev_id] = end_node
|
||||
graph.add_node(end_node)
|
||||
|
||||
edge = pydot.Edge(node_map[event.event_id], end_node)
|
||||
graph.add_edge(edge)
|
||||
|
||||
print "Created edges"
|
||||
|
||||
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
|
||||
|
||||
print "Created Dot"
|
||||
|
||||
graph.write_svg("%s.svg" % file_prefix, prog='dot')
|
||||
|
||||
print "Created svg"
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate a PDU graph for a given room by reading "
|
||||
"from a file with line deliminated events. \n"
|
||||
"Requires pydot."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--prefix", dest="prefix",
|
||||
help="String to prefix output files with",
|
||||
default="graph_output"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l", "--limit",
|
||||
help="Only retrieve the last N events.",
|
||||
)
|
||||
parser.add_argument('event_file')
|
||||
parser.add_argument('room')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
make_graph(args.event_file, args.room, args.prefix, args.limit)
|
||||
@@ -0,0 +1,58 @@
|
||||
Replication Architecture
|
||||
========================
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
We'd like to be able to split some of the work that synapse does into multiple
|
||||
python processes. In theory multiple synapse processes could share a single
|
||||
postgresql database and we'd scale up by running more synapse processes.
|
||||
However much of synapse assumes that only one process is interacting with the
|
||||
database, both for assigning unique identifiers when inserting into tables,
|
||||
notifying components about new updates, and for invalidating its caches.
|
||||
|
||||
So running multiple copies of the current code isn't an option. One way to
|
||||
run multiple processes would be to have a single writer process and multiple
|
||||
reader processes connected to the same database. In order to do this we'd need
|
||||
a way for the reader process to invalidate its in-memory caches when an update
|
||||
happens on the writer. One way to do this is for the writer to present an
|
||||
append-only log of updates which the readers can consume to invalidate their
|
||||
caches and to push updates to listening clients or pushers.
|
||||
|
||||
Synapse already stores much of its data as an append-only log so that it can
|
||||
correctly respond to /sync requests so the amount of code changes needed to
|
||||
expose the append-only log to the readers should be fairly minimal.
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
The Replication API
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Synapse will optionally expose a long poll HTTP API for extracting updates. The
|
||||
API will have a similar shape to /sync in that clients provide tokens
|
||||
indicating where in the log they have reached and a timeout. The synapse server
|
||||
then either responds with updates immediately if it already has updates or it
|
||||
waits until the timeout for more updates. If the timeout expires and nothing
|
||||
happened then the server returns an empty response.
|
||||
|
||||
However unlike the /sync API this replication API is returning synapse specific
|
||||
data rather than trying to implement a matrix specification. The replication
|
||||
results are returned as arrays of rows where the rows are mostly lifted
|
||||
directly from the database. This avoids unnecessary JSON parsing on the server
|
||||
and hopefully avoids an impedance mismatch between the data returned and the
|
||||
required updates to the datastore.
|
||||
|
||||
This does not replicate all the database tables as many of the database tables
|
||||
are indexes that can be recovered from the contents of other tables.
|
||||
|
||||
The format and parameters for the api are documented in
|
||||
``synapse/replication/resource.py``.
|
||||
|
||||
|
||||
The Slaved DataStore
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are read-only version of the synapse storage layer in
|
||||
``synapse/replication/slave/storage`` that use the response of the replication
|
||||
API to invalidate their caches.
|
||||
@@ -0,0 +1,74 @@
|
||||
URL Previews
|
||||
============
|
||||
|
||||
Design notes on a URL previewing service for Matrix:
|
||||
|
||||
Options are:
|
||||
|
||||
1. Have an AS which listens for URLs, downloads them, and inserts an event that describes their metadata.
|
||||
* Pros:
|
||||
* Decouples the implementation entirely from Synapse.
|
||||
* Uses existing Matrix events & content repo to store the metadata.
|
||||
* Cons:
|
||||
* Which AS should provide this service for a room, and why should you trust it?
|
||||
* Doesn't work well with E2E; you'd have to cut the AS into every room
|
||||
* the AS would end up subscribing to every room anyway.
|
||||
|
||||
2. Have a generic preview API (nothing to do with Matrix) that provides a previewing service:
|
||||
* Pros:
|
||||
* Simple and flexible; can be used by any clients at any point
|
||||
* Cons:
|
||||
* If each HS provides one of these independently, all the HSes in a room may needlessly DoS the target URI
|
||||
* We need somewhere to store the URL metadata rather than just using Matrix itself
|
||||
* We can't piggyback on matrix to distribute the metadata between HSes.
|
||||
|
||||
3. Make the synapse of the sending user responsible for spidering the URL and inserting an event asynchronously which describes the metadata.
|
||||
* Pros:
|
||||
* Works transparently for all clients
|
||||
* Piggy-backs nicely on using Matrix for distributing the metadata.
|
||||
* No confusion as to which AS
|
||||
* Cons:
|
||||
* Doesn't work with E2E
|
||||
* We might want to decouple the implementation of the spider from the HS, given spider behaviour can be quite complicated and evolve much more rapidly than the HS. It's more like a bot than a core part of the server.
|
||||
|
||||
4. Make the sending client use the preview API and insert the event itself when successful.
|
||||
* Pros:
|
||||
* Works well with E2E
|
||||
* No custom server functionality
|
||||
* Lets the client customise the preview that they send (like on FB)
|
||||
* Cons:
|
||||
* Entirely specific to the sending client, whereas it'd be nice if /any/ URL was correctly previewed if clients support it.
|
||||
|
||||
5. Have the option of specifying a shared (centralised) previewing service used by a room, to avoid all the different HSes in the room DoSing the target.
|
||||
|
||||
Best solution is probably a combination of both 2 and 4.
|
||||
* Sending clients do their best to create and send a preview at the point of sending the message, perhaps delaying the message until the preview is computed? (This also lets the user validate the preview before sending)
|
||||
* Receiving clients have the option of going and creating their own preview if one doesn't arrive soon enough (or if the original sender didn't create one)
|
||||
|
||||
This is a bit magical though in that the preview could come from two entirely different sources - the sending HS or your local one. However, this can always be exposed to users: "Generate your own URL previews if none are available?"
|
||||
|
||||
This is tantamount also to senders calculating their own thumbnails for sending in advance of the main content - we are trusting the sender not to lie about the content in the thumbnail. Whereas currently thumbnails are calculated by the receiving homeserver to avoid this attack.
|
||||
|
||||
However, this kind of phishing attack does exist whether we let senders pick their thumbnails or not, in that a malicious sender can send normal text messages around the attachment claiming it to be legitimate. We could rely on (future) reputation/abuse management to punish users who phish (be it with bogus metadata or bogus descriptions). Bogus metadata is particularly bad though, especially if it's avoidable.
|
||||
|
||||
As a first cut, let's do #2 and have the receiver hit the API to calculate its own previews (as it does currently for image thumbnails). We can then extend/optimise this to option 4 as a special extra if needed.
|
||||
|
||||
API
|
||||
---
|
||||
|
||||
GET /_matrix/media/r0/preview_url?url=http://wherever.com
|
||||
200 OK
|
||||
{
|
||||
"og:type" : "article"
|
||||
"og:url" : "https://twitter.com/matrixdotorg/status/684074366691356672"
|
||||
"og:title" : "Matrix on Twitter"
|
||||
"og:image" : "https://pbs.twimg.com/profile_images/500400952029888512/yI0qtFi7_400x400.png"
|
||||
"og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
|
||||
"og:site_name" : "Twitter"
|
||||
}
|
||||
|
||||
* Downloads the URL
|
||||
* If HTML, just stores it in RAM and parses it for OG meta tags
|
||||
* Download any media OG meta tags to the media repo, and refer to them in the OG via mxc:// URIs.
|
||||
* If a media filetype we know we can thumbnail: store it on disk, and hand it to the thumbnailer. Generate OG meta tags from the thumbnailer contents.
|
||||
* Otherwise, don't bother downloading further.
|
||||
Executable
+22
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox -e packaging -e pep8
|
||||
Executable
+63
@@ -0,0 +1,63 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install psycopg2
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PORT_BASE:=8000}
|
||||
|
||||
./jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
echo >&2 "Running sytest with PostgreSQL";
|
||||
./jenkins/install_and_run.sh --coverage \
|
||||
--python $TOX_BIN/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--port-base $PORT_BASE
|
||||
|
||||
cd ..
|
||||
cp sytest/.coverage.* .
|
||||
|
||||
# Combine the coverage reports
|
||||
echo "Combining:" .coverage.*
|
||||
$TOX_BIN/python -m coverage combine
|
||||
# Output coverage to coverage.xml
|
||||
$TOX_BIN/coverage xml -o coverage.xml
|
||||
Executable
+57
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox --notest -e py27
|
||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
||||
$TOX_BIN/pip install lxml
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch -p)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PORT_BASE:=8500}
|
||||
./jenkins/install_and_run.sh --coverage \
|
||||
--python $TOX_BIN/python \
|
||||
--synapse-directory $WORKSPACE \
|
||||
--port-base $PORT_BASE
|
||||
|
||||
cd ..
|
||||
cp sytest/.coverage.* .
|
||||
|
||||
# Combine the coverage reports
|
||||
echo "Combining:" .coverage.*
|
||||
$TOX_BIN/python -m coverage combine
|
||||
# Output coverage to coverage.xml
|
||||
$TOX_BIN/coverage xml -o coverage.xml
|
||||
Executable
+25
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
: ${WORKSPACE:="$(pwd)"}
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
export SYNAPSE_CACHE_FACTOR=1
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
# Write coverage reports to a separate file for each process
|
||||
export COVERAGE_OPTS="-p"
|
||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
rm .coverage* || echo "No coverage files to remove"
|
||||
|
||||
tox -e py27
|
||||
-70
@@ -1,70 +0,0 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
export PYTHONDONTWRITEBYTECODE=yep
|
||||
|
||||
# Output test results as junit xml
|
||||
export TRIAL_FLAGS="--reporter=subunit"
|
||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
||||
|
||||
# Output coverage to coverage.xml
|
||||
export DUMP_COVERAGE_COMMAND="coverage xml -o coverage.xml"
|
||||
|
||||
# Output flake8 violations to violations.flake8.log
|
||||
# Don't exit with non-0 status code on Jenkins,
|
||||
# so that the build steps continue and a later step can decided whether to
|
||||
# UNSTABLE or FAILURE this build.
|
||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
||||
|
||||
tox
|
||||
|
||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
||||
|
||||
set +u
|
||||
. .tox/py27/bin/activate
|
||||
set -u
|
||||
|
||||
if [[ ! -e .sytest-base ]]; then
|
||||
git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
|
||||
else
|
||||
(cd .sytest-base; git fetch)
|
||||
fi
|
||||
|
||||
rm -rf sytest
|
||||
git clone .sytest-base sytest --shared
|
||||
cd sytest
|
||||
|
||||
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
|
||||
|
||||
: ${PERL5LIB:=$WORKSPACE/perl5/lib/perl5}
|
||||
: ${PERL_MB_OPT:=--install_base=$WORKSPACE/perl5}
|
||||
: ${PERL_MM_OPT:=INSTALL_BASE=$WORKSPACE/perl5}
|
||||
export PERL5LIB PERL_MB_OPT PERL_MM_OPT
|
||||
|
||||
./install-deps.pl
|
||||
|
||||
: ${PORT_BASE:=8000}
|
||||
|
||||
echo >&2 "Running sytest with SQLite3";
|
||||
./run-tests.pl -O tap --synapse-directory .. --all --port-base $PORT_BASE > results-sqlite3.tap
|
||||
|
||||
RUN_POSTGRES=""
|
||||
|
||||
for port in $(($PORT_BASE + 1)) $(($PORT_BASE + 2)); do
|
||||
if psql synapse_jenkins_$port <<< ""; then
|
||||
RUN_POSTGRES=$RUN_POSTGRES:$port
|
||||
cat > localhost-$port/database.yaml << EOF
|
||||
name: psycopg2
|
||||
args:
|
||||
database: synapse_jenkins_$port
|
||||
EOF
|
||||
fi
|
||||
done
|
||||
|
||||
# Run if both postgresql databases exist
|
||||
if test $RUN_POSTGRES = ":$(($PORT_BASE + 1)):$(($PORT_BASE + 2))"; then
|
||||
echo >&2 "Running sytest with PostgreSQL";
|
||||
pip install psycopg2
|
||||
./run-tests.pl -O tap --synapse-directory .. --all --port-base $PORT_BASE > results-postgresql.tap
|
||||
else
|
||||
echo >&2 "Skipping running sytest with PostgreSQL, $RUN_POSTGRES"
|
||||
fi
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/perl -pi
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
$copyright = <<EOT;
|
||||
/* Copyright 2015 OpenMarket Ltd
|
||||
/* Copyright 2016 OpenMarket Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/perl -pi
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
$copyright = <<EOT;
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -86,9 +86,12 @@ def used_names(prefix, item, defs, names):
|
||||
for name, funcs in defs.get('class', {}).items():
|
||||
used_names(prefix + name + ".", name, funcs, names)
|
||||
|
||||
path = prefix.rstrip('.')
|
||||
for used in defs.get('uses', ()):
|
||||
if used in names:
|
||||
names[used].setdefault('used', {}).setdefault(item, []).append(prefix.rstrip('.'))
|
||||
if item:
|
||||
names[item].setdefault('uses', []).append(used)
|
||||
names[used].setdefault('used', {}).setdefault(item, []).append(path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
@@ -113,6 +116,10 @@ if __name__ == '__main__':
|
||||
"--referrers", default=0, type=int,
|
||||
help="Include referrers up to the given depth"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--referred", default=0, type=int,
|
||||
help="Include referred down to the given depth"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--format", default="yaml",
|
||||
help="Output format, one of 'yaml' or 'dot'"
|
||||
@@ -161,6 +168,20 @@ if __name__ == '__main__':
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
referred_depth = args.referred
|
||||
referred = set()
|
||||
while referred_depth:
|
||||
referred_depth -= 1
|
||||
for entry in result.values():
|
||||
for uses in entry.get("uses", ()):
|
||||
referred.add(uses)
|
||||
for name, definition in names.items():
|
||||
if not name in referred:
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
result[name] = definition
|
||||
|
||||
if args.format == 'yaml':
|
||||
yaml.dump(result, sys.stdout, default_flow_style=False)
|
||||
elif args.format == 'dot':
|
||||
|
||||
Executable
+24
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env python2
|
||||
|
||||
import pymacaroons
|
||||
import sys
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
|
||||
sys.exit(1)
|
||||
|
||||
macaroon_string = sys.argv[1]
|
||||
key = sys.argv[2] if len(sys.argv) > 2 else None
|
||||
|
||||
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
|
||||
print macaroon.inspect()
|
||||
|
||||
print ""
|
||||
|
||||
verifier = pymacaroons.Verifier()
|
||||
verifier.satisfy_general(lambda c: True)
|
||||
try:
|
||||
verifier.verify(macaroon, key)
|
||||
print "Signature is correct"
|
||||
except Exception as e:
|
||||
print e.message
|
||||
Executable
+62
@@ -0,0 +1,62 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import ast
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
PATTERNS_V1 = []
|
||||
PATTERNS_V2 = []
|
||||
|
||||
RESULT = {
|
||||
"v1": PATTERNS_V1,
|
||||
"v2": PATTERNS_V2,
|
||||
}
|
||||
|
||||
class CallVisitor(ast.NodeVisitor):
|
||||
def visit_Call(self, node):
|
||||
if isinstance(node.func, ast.Name):
|
||||
name = node.func.id
|
||||
else:
|
||||
return
|
||||
|
||||
|
||||
if name == "client_path_patterns":
|
||||
PATTERNS_V1.append(node.args[0].s)
|
||||
elif name == "client_v2_patterns":
|
||||
PATTERNS_V2.append(node.args[0].s)
|
||||
|
||||
|
||||
def find_patterns_in_code(input_code):
|
||||
input_ast = ast.parse(input_code)
|
||||
visitor = CallVisitor()
|
||||
visitor.visit(input_ast)
|
||||
|
||||
|
||||
def find_patterns_in_file(filepath):
|
||||
with open(filepath) as f:
|
||||
find_patterns_in_code(f.read())
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Find url patterns.')
|
||||
|
||||
parser.add_argument(
|
||||
"directories", nargs='+', metavar="DIR",
|
||||
help="Directories to search for definitions"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
for directory in args.directories:
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for filename in files:
|
||||
if filename.endswith(".py"):
|
||||
filepath = os.path.join(root, filename)
|
||||
find_patterns_in_file(filepath)
|
||||
|
||||
PATTERNS_V1.sort()
|
||||
PATTERNS_V2.sort()
|
||||
|
||||
yaml.dump(RESULT, sys.stdout, default_flow_style=False)
|
||||
@@ -0,0 +1,67 @@
|
||||
import requests
|
||||
import collections
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
|
||||
Entry = collections.namedtuple("Entry", "name position rows")
|
||||
|
||||
ROW_TYPES = {}
|
||||
|
||||
|
||||
def row_type_for_columns(name, column_names):
|
||||
column_names = tuple(column_names)
|
||||
row_type = ROW_TYPES.get((name, column_names))
|
||||
if row_type is None:
|
||||
row_type = collections.namedtuple(name, column_names)
|
||||
ROW_TYPES[(name, column_names)] = row_type
|
||||
return row_type
|
||||
|
||||
|
||||
def parse_response(content):
|
||||
streams = json.loads(content)
|
||||
result = {}
|
||||
for name, value in streams.items():
|
||||
row_type = row_type_for_columns(name, value["field_names"])
|
||||
position = value["position"]
|
||||
rows = [row_type(*row) for row in value["rows"]]
|
||||
result[name] = Entry(name, position, rows)
|
||||
return result
|
||||
|
||||
|
||||
def replicate(server, streams):
|
||||
return parse_response(requests.get(
|
||||
server + "/_synapse/replication",
|
||||
verify=False,
|
||||
params=streams
|
||||
).content)
|
||||
|
||||
|
||||
def main():
|
||||
server = sys.argv[1]
|
||||
|
||||
streams = None
|
||||
while not streams:
|
||||
try:
|
||||
streams = {
|
||||
row.name: row.position
|
||||
for row in replicate(server, {"streams":"-1"})["streams"].rows
|
||||
}
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
time.sleep(0.1)
|
||||
|
||||
while True:
|
||||
try:
|
||||
results = replicate(server, streams)
|
||||
except:
|
||||
sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
|
||||
break
|
||||
for update in results.values():
|
||||
for row in update.rows:
|
||||
sys.stdout.write(repr(row) + "\n")
|
||||
streams[update.name] = update.position
|
||||
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
Executable
+39
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
import bcrypt
|
||||
import getpass
|
||||
|
||||
bcrypt_rounds=12
|
||||
|
||||
def prompt_for_pass():
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
if not password:
|
||||
raise Exception("Password cannot be blank.")
|
||||
|
||||
confirm_password = getpass.getpass("Confirm password: ")
|
||||
|
||||
if password != confirm_password:
|
||||
raise Exception("Passwords do not match.")
|
||||
|
||||
return password
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Calculate the hash of a new password, so that passwords"
|
||||
" can be reset")
|
||||
parser.add_argument(
|
||||
"-p", "--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
password = args.password
|
||||
|
||||
if not password:
|
||||
password = prompt_for_pass()
|
||||
|
||||
print bcrypt.hashpw(password, bcrypt.gensalt(bcrypt_rounds))
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -19,6 +19,7 @@ from twisted.enterprise import adbapi
|
||||
|
||||
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
|
||||
import argparse
|
||||
import curses
|
||||
@@ -37,6 +38,7 @@ BOOLEAN_COLUMNS = {
|
||||
"rooms": ["is_public"],
|
||||
"event_edges": ["is_state"],
|
||||
"presence_list": ["accepted"],
|
||||
"presence_stream": ["currently_active"],
|
||||
}
|
||||
|
||||
|
||||
@@ -292,7 +294,7 @@ class Porter(object):
|
||||
}
|
||||
)
|
||||
|
||||
database_engine.prepare_database(db_conn)
|
||||
prepare_database(db_conn, database_engine, config=None)
|
||||
|
||||
db_conn.commit()
|
||||
|
||||
@@ -309,8 +311,8 @@ class Porter(object):
|
||||
**self.postgres_config["args"]
|
||||
)
|
||||
|
||||
sqlite_engine = create_engine("sqlite3")
|
||||
postgres_engine = create_engine("psycopg2")
|
||||
sqlite_engine = create_engine(sqlite_config)
|
||||
postgres_engine = create_engine(postgres_config)
|
||||
|
||||
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
||||
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
||||
|
||||
@@ -16,3 +16,7 @@ ignore =
|
||||
|
||||
[flake8]
|
||||
max-line-length = 90
|
||||
ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||
|
||||
[pep8]
|
||||
max-line-length = 90
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
+2
-2
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,4 +16,4 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.12.0-rc1"
|
||||
__version__ = "0.14.0"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
+136
-70
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -22,8 +22,10 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership, JoinRules
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError
|
||||
from synapse.types import RoomID, UserID, EventID
|
||||
from synapse.types import Requester, RoomID, UserID, EventID
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import preserve_context_over_fn
|
||||
from synapse.util.metrics import Measure
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
import logging
|
||||
@@ -43,6 +45,7 @@ class Auth(object):
|
||||
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||
@@ -65,9 +68,9 @@ class Auth(object):
|
||||
Returns:
|
||||
True if the auth checks pass.
|
||||
"""
|
||||
self.check_size_limits(event)
|
||||
with Measure(self.clock, "auth.check"):
|
||||
self.check_size_limits(event)
|
||||
|
||||
try:
|
||||
if not hasattr(event, "room_id"):
|
||||
raise AuthError(500, "Event has no room_id: %s" % event)
|
||||
if auth_events is None:
|
||||
@@ -126,13 +129,6 @@ class Auth(object):
|
||||
self.check_redaction(event, auth_events)
|
||||
|
||||
logger.debug("Allowing! %s", event)
|
||||
except AuthError as e:
|
||||
logger.info(
|
||||
"Event auth check failed on event %s with msg: %s",
|
||||
event, e.msg
|
||||
)
|
||||
logger.info("Denying! %s", event)
|
||||
raise
|
||||
|
||||
def check_size_limits(self, event):
|
||||
def too_big(field):
|
||||
@@ -433,31 +429,46 @@ class Auth(object):
|
||||
|
||||
if event.user_id != invite_event.user_id:
|
||||
return False
|
||||
try:
|
||||
public_key = invite_event.content["public_key"]
|
||||
if signed["mxid"] != event.state_key:
|
||||
return False
|
||||
if signed["token"] != token:
|
||||
return False
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
return False
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_name,
|
||||
decode_base64(public_key)
|
||||
)
|
||||
verify_signed_json(signed, server, verify_key)
|
||||
|
||||
# We got the public key from the invite, so we know that the
|
||||
# correct server signed the signed bundle.
|
||||
# The caller is responsible for checking that the signing
|
||||
# server has not revoked that public key.
|
||||
return True
|
||||
if signed["mxid"] != event.state_key:
|
||||
return False
|
||||
except (KeyError, SignatureVerifyException,):
|
||||
if signed["token"] != token:
|
||||
return False
|
||||
|
||||
for public_key_object in self.get_public_keys(invite_event):
|
||||
public_key = public_key_object["public_key"]
|
||||
try:
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
continue
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_name,
|
||||
decode_base64(public_key)
|
||||
)
|
||||
verify_signed_json(signed, server, verify_key)
|
||||
|
||||
# We got the public key from the invite, so we know that the
|
||||
# correct server signed the signed bundle.
|
||||
# The caller is responsible for checking that the signing
|
||||
# server has not revoked that public key.
|
||||
return True
|
||||
except (KeyError, SignatureVerifyException,):
|
||||
continue
|
||||
return False
|
||||
|
||||
def get_public_keys(self, invite_event):
|
||||
public_keys = []
|
||||
if "public_key" in invite_event.content:
|
||||
o = {
|
||||
"public_key": invite_event.content["public_key"],
|
||||
}
|
||||
if "key_validity_url" in invite_event.content:
|
||||
o["key_validity_url"] = invite_event.content["key_validity_url"]
|
||||
public_keys.append(o)
|
||||
public_keys.extend(invite_event.content.get("public_keys", []))
|
||||
return public_keys
|
||||
|
||||
def _get_power_level_event(self, auth_events):
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
return auth_events.get(key)
|
||||
@@ -510,36 +521,15 @@ class Auth(object):
|
||||
"""
|
||||
# Can optionally look elsewhere in the request (e.g. headers)
|
||||
try:
|
||||
access_token = request.args["access_token"][0]
|
||||
|
||||
# Check for application service tokens with a user_id override
|
||||
try:
|
||||
app_service = yield self.store.get_app_service_by_token(
|
||||
access_token
|
||||
)
|
||||
if not app_service:
|
||||
raise KeyError
|
||||
|
||||
user_id = app_service.sender
|
||||
if "user_id" in request.args:
|
||||
user_id = request.args["user_id"][0]
|
||||
if not app_service.is_interested_in_user(user_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"Application service cannot masquerade as this user."
|
||||
)
|
||||
|
||||
if not user_id:
|
||||
raise KeyError
|
||||
|
||||
user_id = yield self._get_appservice_user_id(request.args)
|
||||
if user_id:
|
||||
request.authenticated_entity = user_id
|
||||
defer.returnValue(
|
||||
Requester(UserID.from_string(user_id), "", False)
|
||||
)
|
||||
|
||||
defer.returnValue((UserID.from_string(user_id), "", False))
|
||||
return
|
||||
except KeyError:
|
||||
pass # normal users won't have the user_id query parameter set.
|
||||
|
||||
user_info = yield self._get_user_by_access_token(access_token)
|
||||
access_token = request.args["access_token"][0]
|
||||
user_info = yield self.get_user_by_access_token(access_token)
|
||||
user = user_info["user"]
|
||||
token_id = user_info["token_id"]
|
||||
is_guest = user_info["is_guest"]
|
||||
@@ -550,7 +540,8 @@ class Auth(object):
|
||||
default=[""]
|
||||
)[0]
|
||||
if user and access_token and ip_addr:
|
||||
self.store.insert_client_ip(
|
||||
preserve_context_over_fn(
|
||||
self.store.insert_client_ip,
|
||||
user=user,
|
||||
access_token=access_token,
|
||||
ip=ip_addr,
|
||||
@@ -564,7 +555,7 @@ class Auth(object):
|
||||
|
||||
request.authenticated_entity = user.to_string()
|
||||
|
||||
defer.returnValue((user, token_id, is_guest,))
|
||||
defer.returnValue(Requester(user, token_id, is_guest))
|
||||
except KeyError:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.",
|
||||
@@ -572,7 +563,34 @@ class Auth(object):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_user_by_access_token(self, token):
|
||||
def _get_appservice_user_id(self, request_args):
|
||||
app_service = yield self.store.get_app_service_by_token(
|
||||
request_args["access_token"][0]
|
||||
)
|
||||
if app_service is None:
|
||||
defer.returnValue(None)
|
||||
|
||||
if "user_id" not in request_args:
|
||||
defer.returnValue(app_service.sender)
|
||||
|
||||
user_id = request_args["user_id"][0]
|
||||
if app_service.sender == user_id:
|
||||
defer.returnValue(app_service.sender)
|
||||
|
||||
if not app_service.is_interested_in_user(user_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"Application service cannot masquerade as this user."
|
||||
)
|
||||
if not (yield self.store.get_user_by_id(user_id)):
|
||||
raise AuthError(
|
||||
403,
|
||||
"Application service has not registered this user"
|
||||
)
|
||||
defer.returnValue(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_access_token(self, token):
|
||||
""" Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
@@ -583,7 +601,7 @@ class Auth(object):
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
"""
|
||||
try:
|
||||
ret = yield self._get_user_from_macaroon(token)
|
||||
ret = yield self.get_user_from_macaroon(token)
|
||||
except AuthError:
|
||||
# TODO(daniel): Remove this fallback when all existing access tokens
|
||||
# have been re-issued as macaroons.
|
||||
@@ -591,7 +609,7 @@ class Auth(object):
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_user_from_macaroon(self, macaroon_str):
|
||||
def get_user_from_macaroon(self, macaroon_str):
|
||||
try:
|
||||
macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
|
||||
self.validate_macaroon(macaroon, "access", False)
|
||||
@@ -690,6 +708,7 @@ class Auth(object):
|
||||
def _look_up_user_by_access_token(self, token):
|
||||
ret = yield self.store.get_user_by_access_token(token)
|
||||
if not ret:
|
||||
logger.warn("Unrecognised access token - not in store: %s" % (token,))
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN
|
||||
@@ -707,6 +726,7 @@ class Auth(object):
|
||||
token = request.args["access_token"][0]
|
||||
service = yield self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token: %s" % (token,))
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unrecognised access token.",
|
||||
@@ -778,7 +798,7 @@ class Auth(object):
|
||||
if "third_party_invite" in event.content:
|
||||
key = (
|
||||
EventTypes.ThirdPartyInvite,
|
||||
event.content["third_party_invite"]["token"]
|
||||
event.content["third_party_invite"]["signed"]["token"]
|
||||
)
|
||||
third_party_invite = current_state.get(key)
|
||||
if third_party_invite:
|
||||
@@ -789,17 +809,16 @@ class Auth(object):
|
||||
|
||||
return auth_ids
|
||||
|
||||
@log_function
|
||||
def _can_send_event(self, event, auth_events):
|
||||
def _get_send_level(self, etype, state_key, auth_events):
|
||||
key = (EventTypes.PowerLevels, "", )
|
||||
send_level_event = auth_events.get(key)
|
||||
send_level = None
|
||||
if send_level_event:
|
||||
send_level = send_level_event.content.get("events", {}).get(
|
||||
event.type
|
||||
etype
|
||||
)
|
||||
if send_level is None:
|
||||
if hasattr(event, "state_key"):
|
||||
if state_key is not None:
|
||||
send_level = send_level_event.content.get(
|
||||
"state_default", 50
|
||||
)
|
||||
@@ -813,6 +832,13 @@ class Auth(object):
|
||||
else:
|
||||
send_level = 0
|
||||
|
||||
return send_level
|
||||
|
||||
@log_function
|
||||
def _can_send_event(self, event, auth_events):
|
||||
send_level = self._get_send_level(
|
||||
event.type, event.get("state_key", None), auth_events
|
||||
)
|
||||
user_level = self._get_user_power_level(event.user_id, auth_events)
|
||||
|
||||
if user_level < send_level:
|
||||
@@ -957,3 +983,43 @@ class Auth(object):
|
||||
"You don't have permission to add ops level greater "
|
||||
"than your own"
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_can_change_room_list(self, room_id, user):
|
||||
"""Check if the user is allowed to edit the room's entry in the
|
||||
published room list.
|
||||
|
||||
Args:
|
||||
room_id (str)
|
||||
user (UserID)
|
||||
"""
|
||||
|
||||
is_admin = yield self.is_server_admin(user)
|
||||
if is_admin:
|
||||
defer.returnValue(True)
|
||||
|
||||
user_id = user.to_string()
|
||||
yield self.check_joined_room(room_id, user_id)
|
||||
|
||||
# We currently require the user is a "moderator" in the room. We do this
|
||||
# by checking if they would (theoretically) be able to change the
|
||||
# m.room.aliases events
|
||||
power_level_event = yield self.state.get_current_state(
|
||||
room_id, EventTypes.PowerLevels, ""
|
||||
)
|
||||
|
||||
auth_events = {}
|
||||
if power_level_event:
|
||||
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
|
||||
|
||||
send_level = self._get_send_level(
|
||||
EventTypes.Aliases, "", auth_events
|
||||
)
|
||||
user_level = self._get_user_power_level(user_id, auth_events)
|
||||
|
||||
if user_level < send_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
"This server requires you to be a moderator in the room to"
|
||||
" edit its room list entry"
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -32,7 +32,6 @@ class PresenceState(object):
|
||||
OFFLINE = u"offline"
|
||||
UNAVAILABLE = u"unavailable"
|
||||
ONLINE = u"online"
|
||||
FREE_FOR_CHAT = u"free_for_chat"
|
||||
|
||||
|
||||
class JoinRules(object):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,6 +29,7 @@ class Codes(object):
|
||||
USER_IN_USE = "M_USER_IN_USE"
|
||||
ROOM_IN_USE = "M_ROOM_IN_USE"
|
||||
BAD_PAGINATION = "M_BAD_PAGINATION"
|
||||
BAD_STATE = "M_BAD_STATE"
|
||||
UNKNOWN = "M_UNKNOWN"
|
||||
NOT_FOUND = "M_NOT_FOUND"
|
||||
MISSING_TOKEN = "M_MISSING_TOKEN"
|
||||
@@ -42,6 +43,7 @@ class Codes(object):
|
||||
EXCLUSIVE = "M_EXCLUSIVE"
|
||||
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
|
||||
THREEPID_IN_USE = "THREEPID_IN_USE"
|
||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
|
||||
+69
-55
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,6 +15,8 @@
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import UserID, RoomID
|
||||
|
||||
import ujson as json
|
||||
|
||||
|
||||
class Filtering(object):
|
||||
|
||||
@@ -28,14 +30,14 @@ class Filtering(object):
|
||||
return result
|
||||
|
||||
def add_user_filter(self, user_localpart, user_filter):
|
||||
self._check_valid_filter(user_filter)
|
||||
self.check_valid_filter(user_filter)
|
||||
return self.store.add_user_filter(user_localpart, user_filter)
|
||||
|
||||
# TODO(paul): surely we should probably add a delete_user_filter or
|
||||
# replace_user_filter at some point? There's no REST API specified for
|
||||
# them however
|
||||
|
||||
def _check_valid_filter(self, user_filter_json):
|
||||
def check_valid_filter(self, user_filter_json):
|
||||
"""Check if the provided filter is valid.
|
||||
|
||||
This inspects all definitions contained within the filter.
|
||||
@@ -62,10 +64,29 @@ class Filtering(object):
|
||||
self._check_definition(user_filter_json[key])
|
||||
|
||||
if "room" in user_filter_json:
|
||||
self._check_definition_room_lists(user_filter_json["room"])
|
||||
for key in room_level_definitions:
|
||||
if key in user_filter_json["room"]:
|
||||
self._check_definition(user_filter_json["room"][key])
|
||||
|
||||
def _check_definition_room_lists(self, definition):
|
||||
"""Check that "rooms" and "not_rooms" are lists of room ids if they
|
||||
are present
|
||||
|
||||
Args:
|
||||
definition(dict): The filter definition
|
||||
Raises:
|
||||
SynapseError: If there was a problem with this definition.
|
||||
"""
|
||||
# check rooms are valid room IDs
|
||||
room_id_keys = ["rooms", "not_rooms"]
|
||||
for key in room_id_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for room_id in definition[key]:
|
||||
RoomID.from_string(room_id)
|
||||
|
||||
def _check_definition(self, definition):
|
||||
"""Check if the provided definition is valid.
|
||||
|
||||
@@ -85,14 +106,7 @@ class Filtering(object):
|
||||
400, "Expected JSON object, not %s" % (definition,)
|
||||
)
|
||||
|
||||
# check rooms are valid room IDs
|
||||
room_id_keys = ["rooms", "not_rooms"]
|
||||
for key in room_id_keys:
|
||||
if key in definition:
|
||||
if type(definition[key]) != list:
|
||||
raise SynapseError(400, "Expected %s to be a list." % key)
|
||||
for room_id in definition[key]:
|
||||
RoomID.from_string(room_id)
|
||||
self._check_definition_room_lists(definition)
|
||||
|
||||
# check senders are valid user IDs
|
||||
user_id_keys = ["senders", "not_senders"]
|
||||
@@ -117,62 +131,58 @@ class Filtering(object):
|
||||
|
||||
class FilterCollection(object):
|
||||
def __init__(self, filter_json):
|
||||
self.filter_json = filter_json
|
||||
self._filter_json = filter_json
|
||||
|
||||
self.room_timeline_filter = Filter(
|
||||
self.filter_json.get("room", {}).get("timeline", {})
|
||||
)
|
||||
room_filter_json = self._filter_json.get("room", {})
|
||||
|
||||
self.room_state_filter = Filter(
|
||||
self.filter_json.get("room", {}).get("state", {})
|
||||
)
|
||||
self._room_filter = Filter({
|
||||
k: v for k, v in room_filter_json.items()
|
||||
if k in ("rooms", "not_rooms")
|
||||
})
|
||||
|
||||
self.room_ephemeral_filter = Filter(
|
||||
self.filter_json.get("room", {}).get("ephemeral", {})
|
||||
)
|
||||
self._room_timeline_filter = Filter(room_filter_json.get("timeline", {}))
|
||||
self._room_state_filter = Filter(room_filter_json.get("state", {}))
|
||||
self._room_ephemeral_filter = Filter(room_filter_json.get("ephemeral", {}))
|
||||
self._room_account_data = Filter(room_filter_json.get("account_data", {}))
|
||||
self._presence_filter = Filter(filter_json.get("presence", {}))
|
||||
self._account_data = Filter(filter_json.get("account_data", {}))
|
||||
|
||||
self.room_account_data = Filter(
|
||||
self.filter_json.get("room", {}).get("account_data", {})
|
||||
)
|
||||
|
||||
self.presence_filter = Filter(
|
||||
self.filter_json.get("presence", {})
|
||||
)
|
||||
|
||||
self.account_data = Filter(
|
||||
self.filter_json.get("account_data", {})
|
||||
)
|
||||
|
||||
self.include_leave = self.filter_json.get("room", {}).get(
|
||||
self.include_leave = filter_json.get("room", {}).get(
|
||||
"include_leave", False
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||
|
||||
def get_filter_json(self):
|
||||
return self._filter_json
|
||||
|
||||
def timeline_limit(self):
|
||||
return self.room_timeline_filter.limit()
|
||||
return self._room_timeline_filter.limit()
|
||||
|
||||
def presence_limit(self):
|
||||
return self.presence_filter.limit()
|
||||
return self._presence_filter.limit()
|
||||
|
||||
def ephemeral_limit(self):
|
||||
return self.room_ephemeral_filter.limit()
|
||||
return self._room_ephemeral_filter.limit()
|
||||
|
||||
def filter_presence(self, events):
|
||||
return self.presence_filter.filter(events)
|
||||
return self._presence_filter.filter(events)
|
||||
|
||||
def filter_account_data(self, events):
|
||||
return self.account_data.filter(events)
|
||||
return self._account_data.filter(events)
|
||||
|
||||
def filter_room_state(self, events):
|
||||
return self.room_state_filter.filter(events)
|
||||
return self._room_state_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_timeline(self, events):
|
||||
return self.room_timeline_filter.filter(events)
|
||||
return self._room_timeline_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_ephemeral(self, events):
|
||||
return self.room_ephemeral_filter.filter(events)
|
||||
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
|
||||
|
||||
def filter_room_account_data(self, events):
|
||||
return self.room_account_data.filter(events)
|
||||
return self._room_account_data.filter(self._room_filter.filter(events))
|
||||
|
||||
|
||||
class Filter(object):
|
||||
@@ -185,18 +195,19 @@ class Filter(object):
|
||||
Returns:
|
||||
bool: True if the event matches
|
||||
"""
|
||||
if isinstance(event, dict):
|
||||
return self.check_fields(
|
||||
event.get("room_id", None),
|
||||
event.get("sender", None),
|
||||
event.get("type", None),
|
||||
)
|
||||
else:
|
||||
return self.check_fields(
|
||||
getattr(event, "room_id", None),
|
||||
getattr(event, "sender", None),
|
||||
event.type,
|
||||
)
|
||||
sender = event.get("sender", None)
|
||||
if not sender:
|
||||
# Presence events have their 'sender' in content.user_id
|
||||
content = event.get("content")
|
||||
# account_data has been allowed to have non-dict content, so check type first
|
||||
if isinstance(content, dict):
|
||||
sender = content.get("user_id")
|
||||
|
||||
return self.check_fields(
|
||||
event.get("room_id", None),
|
||||
sender,
|
||||
event.get("type", None),
|
||||
)
|
||||
|
||||
def check_fields(self, room_id, sender, event_type):
|
||||
"""Checks whether the filter matches the given event fields.
|
||||
@@ -256,3 +267,6 @@ def _matches_wildcard(actual_value, filter_value):
|
||||
return actual_value.startswith(type_prefix)
|
||||
else:
|
||||
return actual_value == filter_value
|
||||
|
||||
|
||||
DEFAULT_FILTER_COLLECTION = FilterCollection({})
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
+3
-2
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -23,5 +23,6 @@ WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
APP_SERVICE_PREFIX = "/_matrix/appservice/v1"
|
||||
|
||||
+20
-1
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,3 +12,22 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
sys.dont_write_bytecode = True
|
||||
|
||||
from synapse.python_dependencies import (
|
||||
check_requirements, MissingRequirementError
|
||||
) # NOQA
|
||||
|
||||
try:
|
||||
check_requirements()
|
||||
except MissingRequirementError as e:
|
||||
message = "\n".join([
|
||||
"Missing Requirement: %s" % (e.message,),
|
||||
"To install run:",
|
||||
" pip install --upgrade --force \"%s\"" % (e.dependency,),
|
||||
"",
|
||||
])
|
||||
sys.stderr.writelines(message)
|
||||
sys.exit(1)
|
||||
|
||||
+168
-250
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,153 +14,104 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
from synapse.rest import ClientRestResource
|
||||
|
||||
sys.dont_write_bytecode = True
|
||||
from synapse.python_dependencies import (
|
||||
check_requirements, DEPENDENCY_LINKS, MissingRequirementError
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
check_requirements()
|
||||
except MissingRequirementError as e:
|
||||
message = "\n".join([
|
||||
"Missing Requirement: %s" % (e.message,),
|
||||
"To install run:",
|
||||
" pip install --upgrade --force \"%s\"" % (e.dependency,),
|
||||
"",
|
||||
])
|
||||
sys.stderr.writelines(message)
|
||||
sys.exit(1)
|
||||
|
||||
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
from twisted.internet import reactor, task, defer
|
||||
from twisted.application import service
|
||||
from twisted.enterprise import adbapi
|
||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
||||
from twisted.web.static import File
|
||||
from twisted.web.server import Site, GzipEncoderFactory, Request
|
||||
from synapse.http.server import JsonResource, RootRedirect
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
||||
SERVER_KEY_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
)
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
|
||||
from synapse import events
|
||||
|
||||
from daemonize import Daemonize
|
||||
import twisted.manhole.telnet
|
||||
|
||||
import synapse
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import resource
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from synapse.config._base import ConfigError
|
||||
|
||||
from synapse.python_dependencies import (
|
||||
check_requirements, DEPENDENCY_LINKS
|
||||
)
|
||||
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.storage.engines import create_engine, IncorrectDatabaseSetup
|
||||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
from twisted.conch.manhole import ColoredManhole
|
||||
from twisted.conch.insults import insults
|
||||
from twisted.conch import manhole_ssh
|
||||
from twisted.cred import checkers, portal
|
||||
|
||||
|
||||
from twisted.internet import reactor, task, defer
|
||||
from twisted.application import service
|
||||
from twisted.web.resource import Resource, EncodingResourceWrapper
|
||||
from twisted.web.static import File
|
||||
from twisted.web.server import Site, GzipEncoderFactory, Request
|
||||
from synapse.http.server import RootRedirect
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.rest.media.v1.media_repository import MediaRepositoryResource
|
||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX,
|
||||
SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
)
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||
from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from synapse import events
|
||||
|
||||
from daemonize import Daemonize
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
|
||||
ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$')
|
||||
|
||||
|
||||
def gz_wrap(r):
|
||||
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
|
||||
|
||||
|
||||
def build_resource_for_web_client(hs):
|
||||
webclient_path = hs.get_config().web_client_location
|
||||
if not webclient_path:
|
||||
try:
|
||||
import syweb
|
||||
except ImportError:
|
||||
quit_with_error(
|
||||
"Could not find a webclient.\n\n"
|
||||
"Please either install the matrix-angular-sdk or configure\n"
|
||||
"the location of the source to serve via the configuration\n"
|
||||
"option `web_client_location`\n\n"
|
||||
"To install the `matrix-angular-sdk` via pip, run:\n\n"
|
||||
" pip install '%(dep)s'\n"
|
||||
"\n"
|
||||
"You can also disable hosting of the webclient via the\n"
|
||||
"configuration option `web_client`\n"
|
||||
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
|
||||
)
|
||||
syweb_path = os.path.dirname(syweb.__file__)
|
||||
webclient_path = os.path.join(syweb_path, "webclient")
|
||||
# GZip is disabled here due to
|
||||
# https://twistedmatrix.com/trac/ticket/7678
|
||||
# (It can stay enabled for the API resources: they call
|
||||
# write() with the whole body and then finish() straight
|
||||
# after and so do not trigger the bug.
|
||||
# GzipFile was removed in commit 184ba09
|
||||
# return GzipFile(webclient_path) # TODO configurable?
|
||||
return File(webclient_path) # TODO configurable?
|
||||
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
|
||||
def build_http_client(self):
|
||||
return MatrixFederationHttpClient(self)
|
||||
|
||||
def build_client_resource(self):
|
||||
return ClientRestResource(self)
|
||||
|
||||
def build_resource_for_federation(self):
|
||||
return JsonResource(self)
|
||||
|
||||
def build_resource_for_web_client(self):
|
||||
webclient_path = self.get_config().web_client_location
|
||||
if not webclient_path:
|
||||
try:
|
||||
import syweb
|
||||
except ImportError:
|
||||
quit_with_error(
|
||||
"Could not find a webclient.\n\n"
|
||||
"Please either install the matrix-angular-sdk or configure\n"
|
||||
"the location of the source to serve via the configuration\n"
|
||||
"option `web_client_location`\n\n"
|
||||
"To install the `matrix-angular-sdk` via pip, run:\n\n"
|
||||
" pip install '%(dep)s'\n"
|
||||
"\n"
|
||||
"You can also disable hosting of the webclient via the\n"
|
||||
"configuration option `web_client`\n"
|
||||
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
|
||||
)
|
||||
syweb_path = os.path.dirname(syweb.__file__)
|
||||
webclient_path = os.path.join(syweb_path, "webclient")
|
||||
# GZip is disabled here due to
|
||||
# https://twistedmatrix.com/trac/ticket/7678
|
||||
# (It can stay enabled for the API resources: they call
|
||||
# write() with the whole body and then finish() straight
|
||||
# after and so do not trigger the bug.
|
||||
# GzipFile was removed in commit 184ba09
|
||||
# return GzipFile(webclient_path) # TODO configurable?
|
||||
return File(webclient_path) # TODO configurable?
|
||||
|
||||
def build_resource_for_static_content(self):
|
||||
# This is old and should go away: not going to bother adding gzip
|
||||
return File(
|
||||
os.path.join(os.path.dirname(synapse.__file__), "static")
|
||||
)
|
||||
|
||||
def build_resource_for_content_repo(self):
|
||||
return ContentRepoResource(
|
||||
self, self.config.uploads_path, self.auth, self.content_addr
|
||||
)
|
||||
|
||||
def build_resource_for_media_repository(self):
|
||||
return MediaRepositoryResource(self)
|
||||
|
||||
def build_resource_for_server_key(self):
|
||||
return LocalKey(self)
|
||||
|
||||
def build_resource_for_server_key_v2(self):
|
||||
return KeyApiV2Resource(self)
|
||||
|
||||
def build_resource_for_metrics(self):
|
||||
if self.get_config().enable_metrics:
|
||||
return MetricsResource(self)
|
||||
else:
|
||||
return None
|
||||
|
||||
def build_db_pool(self):
|
||||
name = self.db_config["name"]
|
||||
|
||||
return adbapi.ConnectionPool(
|
||||
name,
|
||||
**self.db_config.get("args", {})
|
||||
)
|
||||
|
||||
def _listener_http(self, config, listener_config):
|
||||
port = listener_config["port"]
|
||||
bind_address = listener_config.get("bind_address", "")
|
||||
@@ -170,13 +121,11 @@ class SynapseHomeServer(HomeServer):
|
||||
if tls and config.no_tls:
|
||||
return
|
||||
|
||||
metrics_resource = self.get_resource_for_metrics()
|
||||
|
||||
resources = {}
|
||||
for res in listener_config["resources"]:
|
||||
for name in res["names"]:
|
||||
if name == "client":
|
||||
client_resource = self.get_client_resource()
|
||||
client_resource = ClientRestResource(self)
|
||||
if res["compress"]:
|
||||
client_resource = gz_wrap(client_resource)
|
||||
|
||||
@@ -185,35 +134,45 @@ class SynapseHomeServer(HomeServer):
|
||||
"/_matrix/client/r0": client_resource,
|
||||
"/_matrix/client/unstable": client_resource,
|
||||
"/_matrix/client/v2_alpha": client_resource,
|
||||
"/_matrix/client/versions": client_resource,
|
||||
})
|
||||
|
||||
if name == "federation":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: self.get_resource_for_federation(),
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
|
||||
if name in ["static", "client"]:
|
||||
resources.update({
|
||||
STATIC_PREFIX: self.get_resource_for_static_content(),
|
||||
STATIC_PREFIX: File(
|
||||
os.path.join(os.path.dirname(synapse.__file__), "static")
|
||||
),
|
||||
})
|
||||
|
||||
if name in ["media", "federation", "client"]:
|
||||
media_repo = MediaRepositoryResource(self)
|
||||
resources.update({
|
||||
MEDIA_PREFIX: self.get_resource_for_media_repository(),
|
||||
CONTENT_REPO_PREFIX: self.get_resource_for_content_repo(),
|
||||
MEDIA_PREFIX: media_repo,
|
||||
LEGACY_MEDIA_PREFIX: media_repo,
|
||||
CONTENT_REPO_PREFIX: ContentRepoResource(
|
||||
self, self.config.uploads_path, self.auth, self.content_addr
|
||||
),
|
||||
})
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources.update({
|
||||
SERVER_KEY_PREFIX: self.get_resource_for_server_key(),
|
||||
SERVER_KEY_V2_PREFIX: self.get_resource_for_server_key_v2(),
|
||||
SERVER_KEY_PREFIX: LocalKey(self),
|
||||
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
||||
})
|
||||
|
||||
if name == "webclient":
|
||||
resources[WEB_CLIENT_PREFIX] = self.get_resource_for_web_client()
|
||||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||
|
||||
if name == "metrics" and metrics_resource:
|
||||
resources[METRICS_PREFIX] = metrics_resource
|
||||
if name == "metrics" and self.get_config().enable_metrics:
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
if name == "replication":
|
||||
resources[REPLICATION_PREFIX] = ReplicationResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources)
|
||||
if tls:
|
||||
@@ -248,10 +207,21 @@ class SynapseHomeServer(HomeServer):
|
||||
if listener["type"] == "http":
|
||||
self._listener_http(config, listener)
|
||||
elif listener["type"] == "manhole":
|
||||
f = twisted.manhole.telnet.ShellFactory()
|
||||
f.username = "matrix"
|
||||
f.password = "rabbithole"
|
||||
f.namespace['hs'] = self
|
||||
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(
|
||||
matrix="rabbithole"
|
||||
)
|
||||
|
||||
rlm = manhole_ssh.TerminalRealm()
|
||||
rlm.chainedProtocolFactory = lambda: insults.ServerProtocol(
|
||||
ColoredManhole,
|
||||
{
|
||||
"__name__": "__console__",
|
||||
"hs": self,
|
||||
}
|
||||
)
|
||||
|
||||
f = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker]))
|
||||
|
||||
reactor.listenTCP(
|
||||
listener["port"],
|
||||
f,
|
||||
@@ -276,6 +246,19 @@ class SynapseHomeServer(HomeServer):
|
||||
except IncorrectDatabaseSetup as e:
|
||||
quit_with_error(e.message)
|
||||
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
|
||||
def quit_with_error(error_string):
|
||||
message_lines = error_string.split("\n")
|
||||
@@ -287,83 +270,6 @@ def quit_with_error(error_string):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_version_string():
|
||||
try:
|
||||
null = open(os.devnull, 'w')
|
||||
cwd = os.path.dirname(os.path.abspath(__file__))
|
||||
try:
|
||||
git_branch = subprocess.check_output(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_branch = "b=" + git_branch
|
||||
except subprocess.CalledProcessError:
|
||||
git_branch = ""
|
||||
|
||||
try:
|
||||
git_tag = subprocess.check_output(
|
||||
['git', 'describe', '--exact-match'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
git_tag = "t=" + git_tag
|
||||
except subprocess.CalledProcessError:
|
||||
git_tag = ""
|
||||
|
||||
try:
|
||||
git_commit = subprocess.check_output(
|
||||
['git', 'rev-parse', '--short', 'HEAD'],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip()
|
||||
except subprocess.CalledProcessError:
|
||||
git_commit = ""
|
||||
|
||||
try:
|
||||
dirty_string = "-this_is_a_dirty_checkout"
|
||||
is_dirty = subprocess.check_output(
|
||||
['git', 'describe', '--dirty=' + dirty_string],
|
||||
stderr=null,
|
||||
cwd=cwd,
|
||||
).strip().endswith(dirty_string)
|
||||
|
||||
git_dirty = "dirty" if is_dirty else ""
|
||||
except subprocess.CalledProcessError:
|
||||
git_dirty = ""
|
||||
|
||||
if git_branch or git_tag or git_commit or git_dirty:
|
||||
git_version = ",".join(
|
||||
s for s in
|
||||
(git_branch, git_tag, git_commit, git_dirty,)
|
||||
if s
|
||||
)
|
||||
|
||||
return (
|
||||
"Synapse/%s (%s)" % (
|
||||
synapse.__version__, git_version,
|
||||
)
|
||||
).encode("ascii")
|
||||
except Exception as e:
|
||||
logger.info("Failed to check for git repository: %s", e)
|
||||
|
||||
return ("Synapse/%s" % (synapse.__version__,)).encode("ascii")
|
||||
|
||||
|
||||
def change_resource_limit(soft_file_no):
|
||||
try:
|
||||
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
|
||||
if not soft_file_no:
|
||||
soft_file_no = hard
|
||||
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_file_no, hard))
|
||||
|
||||
logger.info("Set file limit to: %d", soft_file_no)
|
||||
except (ValueError, resource.error) as e:
|
||||
logger.warn("Failed to set file limit: %s", e)
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
"""
|
||||
Args:
|
||||
@@ -373,18 +279,27 @@ def setup(config_options):
|
||||
Returns:
|
||||
HomeServer
|
||||
"""
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse Homeserver",
|
||||
config_options,
|
||||
generate_section="Homeserver"
|
||||
)
|
||||
try:
|
||||
config = HomeServerConfig.load_config(
|
||||
"Synapse Homeserver",
|
||||
config_options,
|
||||
generate_section="Homeserver"
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
if not config:
|
||||
# If a config isn't returned, and an exception isn't raised, we're just
|
||||
# generating config files and shouldn't try to continue.
|
||||
sys.exit(0)
|
||||
|
||||
config.setup_logging()
|
||||
|
||||
# check any extra requirements we have now we have a config
|
||||
check_requirements(config)
|
||||
|
||||
version_string = get_version_string()
|
||||
version_string = get_version_string("Synapse", synapse)
|
||||
|
||||
logger.info("Server hostname: %s", config.server_name)
|
||||
logger.info("Server version: %s", version_string)
|
||||
@@ -393,7 +308,7 @@ def setup(config_options):
|
||||
|
||||
tls_server_context_factory = context_factory.ServerContextFactory(config)
|
||||
|
||||
database_engine = create_engine(config.database_config["name"])
|
||||
database_engine = create_engine(config.database_config)
|
||||
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
|
||||
|
||||
hs = SynapseHomeServer(
|
||||
@@ -409,14 +324,10 @@ def setup(config_options):
|
||||
logger.info("Preparing database: %s...", config.database_config['name'])
|
||||
|
||||
try:
|
||||
db_conn = database_engine.module.connect(
|
||||
**{
|
||||
k: v for k, v in config.database_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
)
|
||||
db_conn = hs.get_db_conn(run_new_connection=False)
|
||||
prepare_database(db_conn, database_engine, config=config)
|
||||
database_engine.on_new_connection(db_conn)
|
||||
|
||||
database_engine.prepare_database(db_conn)
|
||||
hs.run_startup_checks(db_conn, database_engine)
|
||||
|
||||
db_conn.commit()
|
||||
@@ -430,13 +341,17 @@ def setup(config_options):
|
||||
|
||||
logger.info("Database prepared in %s.", config.database_config['name'])
|
||||
|
||||
hs.setup()
|
||||
hs.start_listening()
|
||||
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_state_handler().start_caching()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
def start():
|
||||
hs.get_pusherpool().start()
|
||||
hs.get_state_handler().start_caching()
|
||||
hs.get_datastore().start_profiling()
|
||||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_replication_layer().start_get_pdu_cache()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
return hs
|
||||
|
||||
@@ -475,9 +390,8 @@ class SynapseRequest(Request):
|
||||
)
|
||||
|
||||
def get_redacted_uri(self):
|
||||
return re.sub(
|
||||
r'(\?.*access_token=)[^&]*(.*)$',
|
||||
r'\1<redacted>\2',
|
||||
return ACCESS_TOKEN_RE.sub(
|
||||
r'\1<redacted>\3',
|
||||
self.uri
|
||||
)
|
||||
|
||||
@@ -653,7 +567,7 @@ def _resource_id(resource, path_seg):
|
||||
the mapping should looks like _resource_id(A,C) = B.
|
||||
|
||||
Args:
|
||||
resource (Resource): The *parent* Resource
|
||||
resource (Resource): The *parent* Resourceb
|
||||
path_seg (str): The name of the child Resource to be attached.
|
||||
Returns:
|
||||
str: A unique string which can be a key to the child Resource.
|
||||
@@ -688,6 +602,7 @@ def run(hs):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
now = int(hs.get_clock().time())
|
||||
uptime = int(now - start_time)
|
||||
if uptime < 0:
|
||||
@@ -699,8 +614,8 @@ def run(hs):
|
||||
stats["uptime_seconds"] = uptime
|
||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||
|
||||
all_rooms = yield hs.get_datastore().get_rooms(False)
|
||||
stats["total_room_count"] = len(all_rooms)
|
||||
room_count = yield hs.get_datastore().get_room_count()
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||
daily_messages = yield hs.get_datastore().count_daily_messages()
|
||||
@@ -718,9 +633,12 @@ def run(hs):
|
||||
|
||||
if hs.config.report_stats:
|
||||
phone_home_task = task.LoopingCall(phone_stats_home)
|
||||
logger.info("Scheduling stats reporting for 24 hour intervals")
|
||||
phone_home_task.start(60 * 60 * 24, now=False)
|
||||
|
||||
def in_thread():
|
||||
# Uncomment to enable tracing of log context changes.
|
||||
# sys.settrace(logcontext_tracer)
|
||||
with LoggingContext("run"):
|
||||
change_resource_limit(hs.config.soft_file_limit)
|
||||
reactor.run()
|
||||
@@ -728,7 +646,7 @@ def run(hs):
|
||||
if hs.config.daemonize:
|
||||
|
||||
if hs.config.print_pidfile:
|
||||
print hs.config.pid_file
|
||||
print (hs.config.pid_file)
|
||||
|
||||
daemon = Daemonize(
|
||||
app="synapse-homeserver",
|
||||
|
||||
@@ -0,0 +1,206 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import synapse
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.database import DatabaseConfig
|
||||
from synapse.config.logger import LoggingConfig
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage import DataStore
|
||||
from synapse.util.async import sleep
|
||||
from synapse.util.logcontext import (LoggingContext, preserve_fn)
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
|
||||
import sys
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
|
||||
class SlaveConfig(DatabaseConfig):
|
||||
def read_config(self, config):
|
||||
self.replication_url = config["replication_url"]
|
||||
self.server_name = config["server_name"]
|
||||
self.use_insecure_ssl_client_just_for_testing_do_not_use = True
|
||||
self.user_agent_suffix = None
|
||||
self.start_pushers = True
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
## Slave ##
|
||||
#replication_url: https://localhost:{replication_port}/_synapse/replication
|
||||
|
||||
report_stats: False
|
||||
"""
|
||||
|
||||
|
||||
class PusherSlaveConfig(SlaveConfig, LoggingConfig):
|
||||
pass
|
||||
|
||||
|
||||
class PusherSlaveStore(
|
||||
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore
|
||||
):
|
||||
update_pusher_last_stream_ordering_and_success = (
|
||||
DataStore.update_pusher_last_stream_ordering_and_success.__func__
|
||||
)
|
||||
|
||||
|
||||
class PusherServer(HomeServer):
|
||||
|
||||
def get_db_conn(self, run_new_connection=True):
|
||||
# Any param beginning with cp_ is a parameter for adbapi, and should
|
||||
# not be passed to the database engine.
|
||||
db_params = {
|
||||
k: v for k, v in self.db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
db_conn = self.database_engine.module.connect(**db_params)
|
||||
|
||||
if run_new_connection:
|
||||
self.database_engine.on_new_connection(db_conn)
|
||||
return db_conn
|
||||
|
||||
def setup(self):
|
||||
logger.info("Setting up.")
|
||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
||||
logger.info("Finished setting up.")
|
||||
|
||||
def remove_pusher(self, app_id, push_key, user_id):
|
||||
http_client = self.get_simple_http_client()
|
||||
replication_url = self.config.replication_url
|
||||
url = replication_url + "/remove_pushers"
|
||||
return http_client.post_json_get_json(url, {
|
||||
"remove": [{
|
||||
"app_id": app_id,
|
||||
"push_key": push_key,
|
||||
"user_id": user_id,
|
||||
}]
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def replicate(self):
|
||||
http_client = self.get_simple_http_client()
|
||||
store = self.get_datastore()
|
||||
replication_url = self.config.replication_url
|
||||
pusher_pool = self.get_pusherpool()
|
||||
|
||||
def stop_pusher(user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
pushers_for_user = pusher_pool.pushers.get(user_id, {})
|
||||
pusher = pushers_for_user.pop(key, None)
|
||||
if pusher is None:
|
||||
return
|
||||
logger.info("Stopping pusher %r / %r", user_id, key)
|
||||
pusher.on_stop()
|
||||
|
||||
def start_pusher(user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
logger.info("Starting pusher %r / %r", user_id, key)
|
||||
return pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def poke_pushers(results):
|
||||
pushers_rows = set(
|
||||
map(tuple, results.get("pushers", {}).get("rows", []))
|
||||
)
|
||||
deleted_pushers_rows = set(
|
||||
map(tuple, results.get("deleted_pushers", {}).get("rows", []))
|
||||
)
|
||||
for row in sorted(pushers_rows | deleted_pushers_rows):
|
||||
if row in deleted_pushers_rows:
|
||||
user_id, app_id, pushkey = row[1:4]
|
||||
stop_pusher(user_id, app_id, pushkey)
|
||||
elif row in pushers_rows:
|
||||
user_id = row[1]
|
||||
app_id = row[5]
|
||||
pushkey = row[8]
|
||||
yield start_pusher(user_id, app_id, pushkey)
|
||||
|
||||
stream = results.get("events")
|
||||
if stream:
|
||||
min_stream_id = stream["rows"][0][0]
|
||||
max_stream_id = stream["position"]
|
||||
preserve_fn(pusher_pool.on_new_notifications)(
|
||||
min_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
stream = results.get("receipts")
|
||||
if stream:
|
||||
rows = stream["rows"]
|
||||
affected_room_ids = set(row[1] for row in rows)
|
||||
min_stream_id = rows[0][0]
|
||||
max_stream_id = stream["position"]
|
||||
preserve_fn(pusher_pool.on_new_receipts)(
|
||||
min_stream_id, max_stream_id, affected_room_ids
|
||||
)
|
||||
|
||||
while True:
|
||||
try:
|
||||
args = store.stream_positions()
|
||||
args["timeout"] = 30000
|
||||
result = yield http_client.get_json(replication_url, args=args)
|
||||
yield store.process_replication(result)
|
||||
poke_pushers(result)
|
||||
except:
|
||||
logger.exception("Error replicating from %r", replication_url)
|
||||
sleep(30)
|
||||
|
||||
|
||||
def setup(config_options):
|
||||
try:
|
||||
config = PusherSlaveConfig.load_config(
|
||||
"Synapse pusher", config_options
|
||||
)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
config.setup_logging()
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ps = PusherServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string=get_version_string("Synapse", synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ps.setup()
|
||||
|
||||
def start():
|
||||
ps.replicate()
|
||||
ps.get_pusherpool().start()
|
||||
ps.get_datastore().start_profiling()
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
return ps
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with LoggingContext("main"):
|
||||
ps = setup(sys.argv[1:])
|
||||
reactor.run()
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,13 +29,13 @@ NORMAL = "\x1b[m"
|
||||
|
||||
|
||||
def start(configfile):
|
||||
print "Starting ...",
|
||||
print ("Starting ...")
|
||||
args = SYNAPSE
|
||||
args.extend(["--daemonize", "-c", configfile])
|
||||
|
||||
try:
|
||||
subprocess.check_call(args)
|
||||
print GREEN + "started" + NORMAL
|
||||
print (GREEN + "started" + NORMAL)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print (
|
||||
RED +
|
||||
@@ -48,7 +48,7 @@ def stop(pidfile):
|
||||
if os.path.exists(pidfile):
|
||||
pid = int(open(pidfile).read())
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
print GREEN + "stopped" + NORMAL
|
||||
print (GREEN + "stopped" + NORMAL)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,7 +29,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
pushing.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
def __init__(self, hs):
|
||||
super(ApplicationServiceApi, self).__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@@ -100,11 +100,6 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
logger.warning("push_bulk to %s threw exception %s", uri, ex)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push(self, service, event, txn_id=None):
|
||||
response = yield self.push_bulk(service, [event], txn_id)
|
||||
defer.returnValue(response)
|
||||
|
||||
def _serialize(self, events):
|
||||
time_now = self.clock.time_msec()
|
||||
return [
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,6 +12,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from synapse.config._base import ConfigError
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
@@ -21,9 +22,13 @@ if __name__ == "__main__":
|
||||
|
||||
if action == "read":
|
||||
key = sys.argv[2]
|
||||
config = HomeServerConfig.load_config("", sys.argv[3:])
|
||||
try:
|
||||
config = HomeServerConfig.load_config("", sys.argv[3:])
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + e.message + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
print getattr(config, key)
|
||||
print (getattr(config, key))
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.stderr.write("Unknown command %r\n" % (action,))
|
||||
|
||||
+24
-15
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,7 +17,6 @@ import argparse
|
||||
import errno
|
||||
import os
|
||||
import yaml
|
||||
import sys
|
||||
from textwrap import dedent
|
||||
|
||||
|
||||
@@ -105,7 +104,7 @@ class Config(object):
|
||||
dir_path = cls.abspath(dir_path)
|
||||
try:
|
||||
os.makedirs(dir_path)
|
||||
except OSError, e:
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
if not os.path.isdir(dir_path):
|
||||
@@ -136,13 +135,20 @@ class Config(object):
|
||||
results.append(getattr(cls, name)(self, *args, **kargs))
|
||||
return results
|
||||
|
||||
def generate_config(self, config_dir_path, server_name, report_stats=None):
|
||||
def generate_config(
|
||||
self,
|
||||
config_dir_path,
|
||||
server_name,
|
||||
is_generating_file,
|
||||
report_stats=None,
|
||||
):
|
||||
default_config = "# vim:ft=yaml\n"
|
||||
|
||||
default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all(
|
||||
"default_config",
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
is_generating_file=is_generating_file,
|
||||
report_stats=report_stats,
|
||||
))
|
||||
|
||||
@@ -244,8 +250,10 @@ class Config(object):
|
||||
|
||||
server_name = config_args.server_name
|
||||
if not server_name:
|
||||
print "Must specify a server_name to a generate config for."
|
||||
sys.exit(1)
|
||||
raise ConfigError(
|
||||
"Must specify a server_name to a generate config for."
|
||||
" Pass -H server.name."
|
||||
)
|
||||
if not os.path.exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
with open(config_path, "wb") as config_file:
|
||||
@@ -253,6 +261,7 @@ class Config(object):
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
report_stats=(config_args.report_stats == "yes"),
|
||||
is_generating_file=True
|
||||
)
|
||||
obj.invoke_all("generate_files", config)
|
||||
config_file.write(config_bytes)
|
||||
@@ -266,7 +275,7 @@ class Config(object):
|
||||
"If this server name is incorrect, you will need to"
|
||||
" regenerate the SSL certificates"
|
||||
)
|
||||
sys.exit(0)
|
||||
return
|
||||
else:
|
||||
print (
|
||||
"Config file %r already exists. Generating any missing key"
|
||||
@@ -302,25 +311,25 @@ class Config(object):
|
||||
specified_config.update(yaml_config)
|
||||
|
||||
if "server_name" not in specified_config:
|
||||
sys.stderr.write("\n" + MISSING_SERVER_NAME + "\n")
|
||||
sys.exit(1)
|
||||
raise ConfigError(MISSING_SERVER_NAME)
|
||||
|
||||
server_name = specified_config["server_name"]
|
||||
_, config = obj.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name
|
||||
server_name=server_name,
|
||||
is_generating_file=False,
|
||||
)
|
||||
config.pop("log_config")
|
||||
config.update(specified_config)
|
||||
if "report_stats" not in config:
|
||||
sys.stderr.write(
|
||||
"\n" + MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
|
||||
MISSING_REPORT_STATS_SPIEL + "\n")
|
||||
sys.exit(1)
|
||||
raise ConfigError(
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
|
||||
MISSING_REPORT_STATS_SPIEL
|
||||
)
|
||||
|
||||
if generate_keys:
|
||||
obj.invoke_all("generate_files", config)
|
||||
sys.exit(0)
|
||||
return
|
||||
|
||||
obj.invoke_all("read_config", config)
|
||||
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
|
||||
|
||||
class ApiConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.room_invite_state_types = config.get("room_invite_state_types", [
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.Name,
|
||||
])
|
||||
|
||||
def default_config(cls, **kwargs):
|
||||
return """\
|
||||
## API Configuration ##
|
||||
|
||||
# A list of event types that will be included in the room_invite_state
|
||||
room_invite_state_types:
|
||||
- "{JoinRules}"
|
||||
- "{CanonicalAlias}"
|
||||
- "{RoomAvatar}"
|
||||
- "{Name}"
|
||||
""".format(**vars(EventTypes))
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -29,10 +29,10 @@ class CaptchaConfig(Config):
|
||||
## Captcha ##
|
||||
|
||||
# This Home Server's ReCAPTCHA public key.
|
||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
|
||||
# This Home Server's ReCAPTCHA private key.
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
|
||||
# Enables ReCaptcha checks when registering, preventing signup
|
||||
# unless a captcha is answered. Requires a valid ReCaptcha
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -23,18 +23,21 @@ from .captcha import CaptchaConfig
|
||||
from .voip import VoipConfig
|
||||
from .registration import RegistrationConfig
|
||||
from .metrics import MetricsConfig
|
||||
from .api import ApiConfig
|
||||
from .appservice import AppServiceConfig
|
||||
from .key import KeyConfig
|
||||
from .saml2 import SAML2Config
|
||||
from .cas import CasConfig
|
||||
from .password import PasswordConfig
|
||||
from .jwt import JWTConfig
|
||||
from .ldap import LDAPConfig
|
||||
|
||||
|
||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||
VoipConfig, RegistrationConfig, MetricsConfig,
|
||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||
PasswordConfig,):
|
||||
JWTConfig, LDAPConfig, PasswordConfig,):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 Niklas Riekenbrauck
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class JWTConfig(Config):
|
||||
def read_config(self, config):
|
||||
jwt_config = config.get("jwt_config", None)
|
||||
if jwt_config:
|
||||
self.jwt_enabled = jwt_config.get("enabled", False)
|
||||
self.jwt_secret = jwt_config["secret"]
|
||||
self.jwt_algorithm = jwt_config["algorithm"]
|
||||
else:
|
||||
self.jwt_enabled = False
|
||||
self.jwt_secret = None
|
||||
self.jwt_algorithm = None
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# jwt_config:
|
||||
# enabled: true
|
||||
# secret: "a secret"
|
||||
# algorithm: "HS256"
|
||||
"""
|
||||
+28
-2
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -22,8 +22,14 @@ from signedjson.key import (
|
||||
read_signing_keys, write_signing_keys, NACL_ED25519
|
||||
)
|
||||
from unpaddedbase64 import decode_base64
|
||||
from synapse.util.stringutils import random_string_with_symbols
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KeyConfig(Config):
|
||||
@@ -40,9 +46,29 @@ class KeyConfig(Config):
|
||||
config["perspectives"]
|
||||
)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
self.macaroon_secret_key = config.get(
|
||||
"macaroon_secret_key", self.registration_shared_secret
|
||||
)
|
||||
|
||||
if not self.macaroon_secret_key:
|
||||
# Unfortunately, there are people out there that don't have this
|
||||
# set. Lets just be "nice" and derive one from their secret key.
|
||||
logger.warn("Config is missing missing macaroon_secret_key")
|
||||
seed = self.signing_key[0].seed
|
||||
self.macaroon_secret_key = hashlib.sha256(seed)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, is_generating_file=False,
|
||||
**kwargs):
|
||||
base_key_name = os.path.join(config_dir_path, server_name)
|
||||
|
||||
if is_generating_file:
|
||||
macaroon_secret_key = random_string_with_symbols(50)
|
||||
else:
|
||||
macaroon_secret_key = None
|
||||
|
||||
return """\
|
||||
macaroon_secret_key: "%(macaroon_secret_key)s"
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
# Path to the signing key to sign messages with
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 Niklas Riekenbrauck
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class LDAPConfig(Config):
|
||||
def read_config(self, config):
|
||||
ldap_config = config.get("ldap_config", None)
|
||||
if ldap_config:
|
||||
self.ldap_enabled = ldap_config.get("enabled", False)
|
||||
self.ldap_server = ldap_config["server"]
|
||||
self.ldap_port = ldap_config["port"]
|
||||
self.ldap_tls = ldap_config.get("tls", False)
|
||||
self.ldap_search_base = ldap_config["search_base"]
|
||||
self.ldap_search_property = ldap_config["search_property"]
|
||||
self.ldap_email_property = ldap_config["email_property"]
|
||||
self.ldap_full_name_property = ldap_config["full_name_property"]
|
||||
else:
|
||||
self.ldap_enabled = False
|
||||
self.ldap_server = None
|
||||
self.ldap_port = None
|
||||
self.ldap_tls = False
|
||||
self.ldap_search_base = None
|
||||
self.ldap_search_property = None
|
||||
self.ldap_email_property = None
|
||||
self.ldap_full_name_property = None
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return """\
|
||||
# ldap_config:
|
||||
# enabled: true
|
||||
# server: "ldap://localhost"
|
||||
# port: 389
|
||||
# tls: false
|
||||
# search_base: "ou=Users,dc=example,dc=com"
|
||||
# search_property: "cn"
|
||||
# email_property: "email"
|
||||
# full_name_property: "givenName"
|
||||
"""
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -23,22 +23,27 @@ from distutils.util import strtobool
|
||||
class RegistrationConfig(Config):
|
||||
|
||||
def read_config(self, config):
|
||||
self.disable_registration = not bool(
|
||||
self.enable_registration = bool(
|
||||
strtobool(str(config["enable_registration"]))
|
||||
)
|
||||
if "disable_registration" in config:
|
||||
self.disable_registration = bool(
|
||||
self.enable_registration = not bool(
|
||||
strtobool(str(config["disable_registration"]))
|
||||
)
|
||||
|
||||
self.registration_shared_secret = config.get("registration_shared_secret")
|
||||
self.macaroon_secret_key = config.get("macaroon_secret_key")
|
||||
|
||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||
self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
|
||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||
|
||||
self.invite_3pid_guest = (
|
||||
self.allow_guest_access and config.get("invite_3pid_guest", False)
|
||||
)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
registration_shared_secret = random_string_with_symbols(50)
|
||||
macaroon_secret_key = random_string_with_symbols(50)
|
||||
|
||||
return """\
|
||||
## Registration ##
|
||||
|
||||
@@ -49,8 +54,6 @@ class RegistrationConfig(Config):
|
||||
# secret, even if registration is otherwise disabled.
|
||||
registration_shared_secret: "%(registration_shared_secret)s"
|
||||
|
||||
macaroon_secret_key: "%(macaroon_secret_key)s"
|
||||
|
||||
# Set the number of bcrypt rounds used to generate password hash.
|
||||
# Larger numbers increase the work factor needed to generate the hash.
|
||||
# The default number of rounds is 12.
|
||||
@@ -60,6 +63,12 @@ class RegistrationConfig(Config):
|
||||
# participate in rooms hosted on this server which have been made
|
||||
# accessible to anonymous users.
|
||||
allow_guest_access: False
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
""" % locals()
|
||||
|
||||
def add_arguments(self, parser):
|
||||
@@ -71,6 +80,6 @@ class RegistrationConfig(Config):
|
||||
|
||||
def read_arguments(self, args):
|
||||
if args.enable_registration is not None:
|
||||
self.disable_registration = not bool(
|
||||
self.enable_registration = bool(
|
||||
strtobool(str(args.enable_registration))
|
||||
)
|
||||
|
||||
@@ -13,9 +13,25 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
from ._base import Config, ConfigError
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
MISSING_NETADDR = (
|
||||
"Missing netaddr library. This is required for URL preview API."
|
||||
)
|
||||
|
||||
MISSING_LXML = (
|
||||
"""Missing lxml library. This is required for URL preview API.
|
||||
|
||||
Install by running:
|
||||
pip install lxml
|
||||
|
||||
Requires libxslt1-dev system package.
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
ThumbnailRequirement = namedtuple(
|
||||
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
|
||||
)
|
||||
@@ -23,7 +39,7 @@ ThumbnailRequirement = namedtuple(
|
||||
|
||||
def parse_thumbnail_requirements(thumbnail_sizes):
|
||||
""" Takes a list of dictionaries with "width", "height", and "method" keys
|
||||
and creates a map from image media types to the thumbnail size, thumnailing
|
||||
and creates a map from image media types to the thumbnail size, thumbnailing
|
||||
method, and thumbnail media type to precalculate
|
||||
|
||||
Args:
|
||||
@@ -53,12 +69,39 @@ class ContentRepositoryConfig(Config):
|
||||
def read_config(self, config):
|
||||
self.max_upload_size = self.parse_size(config["max_upload_size"])
|
||||
self.max_image_pixels = self.parse_size(config["max_image_pixels"])
|
||||
self.max_spider_size = self.parse_size(config["max_spider_size"])
|
||||
self.media_store_path = self.ensure_directory(config["media_store_path"])
|
||||
self.uploads_path = self.ensure_directory(config["uploads_path"])
|
||||
self.dynamic_thumbnails = config["dynamic_thumbnails"]
|
||||
self.thumbnail_requirements = parse_thumbnail_requirements(
|
||||
config["thumbnail_sizes"]
|
||||
)
|
||||
self.url_preview_enabled = config.get("url_preview_enabled", False)
|
||||
if self.url_preview_enabled:
|
||||
try:
|
||||
import lxml
|
||||
lxml # To stop unused lint.
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_LXML)
|
||||
|
||||
try:
|
||||
from netaddr import IPSet
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_NETADDR)
|
||||
|
||||
if "url_preview_ip_range_blacklist" in config:
|
||||
self.url_preview_ip_range_blacklist = IPSet(
|
||||
config["url_preview_ip_range_blacklist"]
|
||||
)
|
||||
else:
|
||||
raise ConfigError(
|
||||
"For security, you must specify an explicit target IP address "
|
||||
"blacklist in url_preview_ip_range_blacklist for url previewing "
|
||||
"to work"
|
||||
)
|
||||
|
||||
if "url_preview_url_blacklist" in config:
|
||||
self.url_preview_url_blacklist = config["url_preview_url_blacklist"]
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
media_store = self.default_path("media_store")
|
||||
@@ -80,7 +123,7 @@ class ContentRepositoryConfig(Config):
|
||||
# the resolution requested by the client. If true then whenever
|
||||
# a new resolution is requested by the client the server will
|
||||
# generate a new thumbnail. If false the server will pick a thumbnail
|
||||
# from a precalcualted list.
|
||||
# from a precalculated list.
|
||||
dynamic_thumbnails: false
|
||||
|
||||
# List of thumbnail to precalculate when an image is uploaded.
|
||||
@@ -97,4 +140,65 @@ class ContentRepositoryConfig(Config):
|
||||
- width: 640
|
||||
height: 480
|
||||
method: scale
|
||||
- width: 800
|
||||
height: 600
|
||||
method: scale
|
||||
|
||||
# Is the preview URL API enabled? If enabled, you *must* specify
|
||||
# an explicit url_preview_ip_range_blacklist of IPs that the spider is
|
||||
# denied from accessing.
|
||||
url_preview_enabled: False
|
||||
|
||||
# List of IP address CIDR ranges that the URL preview spider is denied
|
||||
# from accessing. There are no defaults: you must explicitly
|
||||
# specify a list for URL previewing to work. You should specify any
|
||||
# internal services in your network that you do not want synapse to try
|
||||
# to connect to, otherwise anyone in any Matrix room could cause your
|
||||
# synapse to issue arbitrary GET requests to your internal services,
|
||||
# causing serious security issues.
|
||||
#
|
||||
# url_preview_ip_range_blacklist:
|
||||
# - '127.0.0.0/8'
|
||||
# - '10.0.0.0/8'
|
||||
# - '172.16.0.0/12'
|
||||
# - '192.168.0.0/16'
|
||||
|
||||
# Optional list of URL matches that the URL preview spider is
|
||||
# denied from accessing. You should use url_preview_ip_range_blacklist
|
||||
# in preference to this, otherwise someone could define a public DNS
|
||||
# entry that points to a private IP address and circumvent the blacklist.
|
||||
# This is more useful if you know there is an entire shape of URL that
|
||||
# you know that will never want synapse to try to spider.
|
||||
#
|
||||
# Each list entry is a dictionary of url component attributes as returned
|
||||
# by urlparse.urlsplit as applied to the absolute form of the URL. See
|
||||
# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
|
||||
# The values of the dictionary are treated as an filename match pattern
|
||||
# applied to that component of URLs, unless they start with a ^ in which
|
||||
# case they are treated as a regular expression match. If all the
|
||||
# specified component matches for a given list item succeed, the URL is
|
||||
# blacklisted.
|
||||
#
|
||||
# url_preview_url_blacklist:
|
||||
# # blacklist any URL with a username in its URI
|
||||
# - username: '*'
|
||||
#
|
||||
# # blacklist all *.google.com URLs
|
||||
# - netloc: 'google.com'
|
||||
# - netloc: '*.google.com'
|
||||
#
|
||||
# # blacklist all plain HTTP URLs
|
||||
# - scheme: 'http'
|
||||
#
|
||||
# # blacklist http(s)://www.acme.com/foo
|
||||
# - netloc: 'www.acme.com'
|
||||
# path: '/foo'
|
||||
#
|
||||
# # blacklist any URL with a literal IPv4 address
|
||||
# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
|
||||
# The largest allowed URL preview spidering size in bytes
|
||||
max_spider_size: "10M"
|
||||
|
||||
|
||||
""" % locals()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -28,6 +28,7 @@ class ServerConfig(Config):
|
||||
self.print_pidfile = config.get("print_pidfile")
|
||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||
self.use_frozen_dicts = config.get("use_frozen_dicts", True)
|
||||
self.start_pushers = config.get("start_pushers", True)
|
||||
|
||||
self.listeners = config.get("listeners", [])
|
||||
|
||||
@@ -200,7 +201,7 @@ class ServerConfig(Config):
|
||||
- names: [federation]
|
||||
compress: false
|
||||
|
||||
# Turn on the twisted telnet manhole service on localhost on the given
|
||||
# Turn on the twisted ssh manhole service on localhost on the given
|
||||
# port.
|
||||
# - port: 9000
|
||||
# bind_address: 127.0.0.1
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -36,6 +36,7 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1):
|
||||
|
||||
factory = SynapseKeyClientFactory()
|
||||
factory.path = path
|
||||
factory.host = server_name
|
||||
endpoint = matrix_federation_endpoint(
|
||||
reactor, server_name, ssl_context_factory, timeout=30
|
||||
)
|
||||
@@ -81,6 +82,8 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||
self.host = self.transport.getHost()
|
||||
logger.debug("Connected to %s", self.host)
|
||||
self.sendCommand(b"GET", self.path)
|
||||
if self.host:
|
||||
self.sendHeader(b"Host", self.host)
|
||||
self.endHeaders()
|
||||
self.timer = reactor.callLater(
|
||||
self.timeout,
|
||||
|
||||
+50
-39
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -18,6 +18,10 @@ from synapse.api.errors import SynapseError, Codes
|
||||
from synapse.util.retryutils import get_retry_limiter
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async import ObservableDeferred
|
||||
from synapse.util.logcontext import (
|
||||
preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext,
|
||||
preserve_fn
|
||||
)
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -142,40 +146,43 @@ class Keyring(object):
|
||||
for server_name, _ in server_and_json
|
||||
}
|
||||
|
||||
# We want to wait for any previous lookups to complete before
|
||||
# proceeding.
|
||||
wait_on_deferred = self.wait_for_previous_lookups(
|
||||
[server_name for server_name, _ in server_and_json],
|
||||
server_to_deferred,
|
||||
)
|
||||
with PreserveLoggingContext():
|
||||
|
||||
# Actually start fetching keys.
|
||||
wait_on_deferred.addBoth(
|
||||
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
|
||||
)
|
||||
# We want to wait for any previous lookups to complete before
|
||||
# proceeding.
|
||||
wait_on_deferred = self.wait_for_previous_lookups(
|
||||
[server_name for server_name, _ in server_and_json],
|
||||
server_to_deferred,
|
||||
)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
server_to_gids = {}
|
||||
# Actually start fetching keys.
|
||||
wait_on_deferred.addBoth(
|
||||
lambda _: self.get_server_verify_keys(group_id_to_group, deferreds)
|
||||
)
|
||||
|
||||
def remove_deferreds(res, server_name, group_id):
|
||||
server_to_gids[server_name].discard(group_id)
|
||||
if not server_to_gids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
server_to_gids = {}
|
||||
|
||||
for g_id, deferred in deferreds.items():
|
||||
server_name = group_id_to_group[g_id].server_name
|
||||
server_to_gids.setdefault(server_name, set()).add(g_id)
|
||||
deferred.addBoth(remove_deferreds, server_name, g_id)
|
||||
def remove_deferreds(res, server_name, group_id):
|
||||
server_to_gids[server_name].discard(group_id)
|
||||
if not server_to_gids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for g_id, deferred in deferreds.items():
|
||||
server_name = group_id_to_group[g_id].server_name
|
||||
server_to_gids.setdefault(server_name, set()).add(g_id)
|
||||
deferred.addBoth(remove_deferreds, server_name, g_id)
|
||||
|
||||
# Pass those keys to handle_key_deferred so that the json object
|
||||
# signatures can be verified
|
||||
return [
|
||||
handle_key_deferred(
|
||||
preserve_context_over_fn(
|
||||
handle_key_deferred,
|
||||
group_id_to_group[g_id],
|
||||
deferreds[g_id],
|
||||
)
|
||||
@@ -198,12 +205,13 @@ class Keyring(object):
|
||||
if server_name in self.key_downloads
|
||||
]
|
||||
if wait_on:
|
||||
yield defer.DeferredList(wait_on)
|
||||
with PreserveLoggingContext():
|
||||
yield defer.DeferredList(wait_on)
|
||||
else:
|
||||
break
|
||||
|
||||
for server_name, deferred in server_to_deferred.items():
|
||||
d = ObservableDeferred(deferred)
|
||||
d = ObservableDeferred(preserve_context_over_deferred(deferred))
|
||||
self.key_downloads[server_name] = d
|
||||
|
||||
def rm(r, server_name):
|
||||
@@ -230,7 +238,9 @@ class Keyring(object):
|
||||
|
||||
missing_keys = {}
|
||||
for group in group_id_to_group.values():
|
||||
missing_keys.setdefault(group.server_name, set()).union(group.key_ids)
|
||||
missing_keys.setdefault(group.server_name, set()).update(
|
||||
group.key_ids
|
||||
)
|
||||
|
||||
for fn in key_fetch_fns:
|
||||
results = yield fn(missing_keys.items())
|
||||
@@ -242,12 +252,13 @@ class Keyring(object):
|
||||
for group in group_id_to_group.values():
|
||||
for key_id in group.key_ids:
|
||||
if key_id in merged_results[group.server_name]:
|
||||
group_id_to_deferred[group.group_id].callback((
|
||||
group.group_id,
|
||||
group.server_name,
|
||||
key_id,
|
||||
merged_results[group.server_name][key_id],
|
||||
))
|
||||
with PreserveLoggingContext():
|
||||
group_id_to_deferred[group.group_id].callback((
|
||||
group.group_id,
|
||||
group.server_name,
|
||||
key_id,
|
||||
merged_results[group.server_name][key_id],
|
||||
))
|
||||
break
|
||||
else:
|
||||
missing_groups.setdefault(
|
||||
@@ -502,7 +513,7 @@ class Keyring(object):
|
||||
|
||||
yield defer.gatherResults(
|
||||
[
|
||||
self.store_keys(
|
||||
preserve_fn(self.store_keys)(
|
||||
server_name=key_server_name,
|
||||
from_server=server_name,
|
||||
verify_keys=verify_keys,
|
||||
@@ -571,7 +582,7 @@ class Keyring(object):
|
||||
|
||||
yield defer.gatherResults(
|
||||
[
|
||||
self.store.store_server_keys_json(
|
||||
preserve_fn(self.store.store_server_keys_json)(
|
||||
server_name=server_name,
|
||||
key_id=key_id,
|
||||
from_server=server_name,
|
||||
@@ -673,7 +684,7 @@ class Keyring(object):
|
||||
# TODO(markjh): Store whether the keys have expired.
|
||||
yield defer.gatherResults(
|
||||
[
|
||||
self.store.store_server_verify_key(
|
||||
preserve_fn(self.store.store_server_verify_key)(
|
||||
server_name, server_name, key.time_added, key
|
||||
)
|
||||
for key_id, key in verify_keys.items()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.util.frozenutils import freeze
|
||||
from synapse.util.caches import intern_dict
|
||||
|
||||
|
||||
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
|
||||
@@ -30,7 +31,10 @@ class _EventInternalMetadata(object):
|
||||
return dict(self.__dict__)
|
||||
|
||||
def is_outlier(self):
|
||||
return hasattr(self, "outlier") and self.outlier
|
||||
return getattr(self, "outlier", False)
|
||||
|
||||
def is_invite_from_remote(self):
|
||||
return getattr(self, "invite_from_remote", False)
|
||||
|
||||
|
||||
def _event_dict_property(key):
|
||||
@@ -117,6 +121,15 @@ class EventBase(object):
|
||||
def __set__(self, instance, value):
|
||||
raise AttributeError("Unrecognized attribute %s" % (instance,))
|
||||
|
||||
def __getitem__(self, field):
|
||||
return self._event_dict[field]
|
||||
|
||||
def __contains__(self, field):
|
||||
return field in self._event_dict
|
||||
|
||||
def items(self):
|
||||
return self._event_dict.items()
|
||||
|
||||
|
||||
class FrozenEvent(EventBase):
|
||||
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
|
||||
@@ -131,6 +144,10 @@ class FrozenEvent(EventBase):
|
||||
|
||||
unsigned = dict(event_dict.pop("unsigned", {}))
|
||||
|
||||
# We intern these strings because they turn up a lot (especially when
|
||||
# caching).
|
||||
event_dict = intern_dict(event_dict)
|
||||
|
||||
if USE_FROZEN_DICTS:
|
||||
frozen_dict = freeze(event_dict)
|
||||
else:
|
||||
@@ -159,5 +176,7 @@ class FrozenEvent(EventBase):
|
||||
|
||||
def __repr__(self):
|
||||
return "<FrozenEvent event_id='%s', type='%s', state_key='%s'>" % (
|
||||
self.event_id, self.type, self.get("state_key", None),
|
||||
self.get("event_id", None),
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,3 +20,4 @@ class EventContext(object):
|
||||
self.current_state = current_state
|
||||
self.state_group = None
|
||||
self.rejected = False
|
||||
self.push_actions = []
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,15 +17,10 @@
|
||||
"""
|
||||
|
||||
from .replication import ReplicationLayer
|
||||
from .transport import TransportLayer
|
||||
from .transport.client import TransportLayerClient
|
||||
|
||||
|
||||
def initialize_http_replication(homeserver):
|
||||
transport = TransportLayer(
|
||||
homeserver,
|
||||
homeserver.hostname,
|
||||
server=homeserver.get_resource_for_federation(),
|
||||
client=homeserver.get_http_client()
|
||||
)
|
||||
transport = TransportLayerClient(homeserver)
|
||||
|
||||
return ReplicationLayer(homeserver, transport)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -57,7 +57,7 @@ class FederationClient(FederationBase):
|
||||
cache_name="get_pdu_cache",
|
||||
clock=self._clock,
|
||||
max_len=1000,
|
||||
expiry_ms=120*1000,
|
||||
expiry_ms=120 * 1000,
|
||||
reset_expiry_on_get=False,
|
||||
)
|
||||
|
||||
@@ -114,7 +114,7 @@ class FederationClient(FederationBase):
|
||||
|
||||
@log_function
|
||||
def make_query(self, destination, query_type, args,
|
||||
retry_on_dns_fail=True):
|
||||
retry_on_dns_fail=False):
|
||||
"""Sends a federation Query to a remote homeserver of the given type
|
||||
and arguments.
|
||||
|
||||
@@ -418,6 +418,7 @@ class FederationClient(FederationBase):
|
||||
"Failed to make_%s via %s: %s",
|
||||
membership, destination, e.message
|
||||
)
|
||||
raise
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -126,10 +126,8 @@ class FederationServer(FederationBase):
|
||||
results = []
|
||||
|
||||
for pdu in pdu_list:
|
||||
d = self._handle_new_pdu(transaction.origin, pdu)
|
||||
|
||||
try:
|
||||
yield d
|
||||
yield self._handle_new_pdu(transaction.origin, pdu)
|
||||
results.append({})
|
||||
except FederationError as e:
|
||||
self.send_failure(e, transaction.origin)
|
||||
@@ -139,8 +137,8 @@ class FederationServer(FederationBase):
|
||||
logger.exception("Failed to handle PDU")
|
||||
|
||||
if hasattr(transaction, "edus"):
|
||||
for edu in [Edu(**x) for x in transaction.edus]:
|
||||
self.received_edu(
|
||||
for edu in (Edu(**x) for x in transaction.edus):
|
||||
yield self.received_edu(
|
||||
transaction.origin,
|
||||
edu.edu_type,
|
||||
edu.content
|
||||
@@ -163,11 +161,17 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
defer.returnValue((200, response))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def received_edu(self, origin, edu_type, content):
|
||||
received_edus_counter.inc()
|
||||
|
||||
if edu_type in self.edu_handlers:
|
||||
self.edu_handlers[edu_type](origin, content)
|
||||
try:
|
||||
yield self.edu_handlers[edu_type](origin, content)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception as e:
|
||||
logger.exception("Failed to handle edu %r", edu_type, e)
|
||||
else:
|
||||
logger.warn("Received EDU of type %s with no handler", edu_type)
|
||||
|
||||
@@ -527,7 +531,6 @@ class FederationServer(FederationBase):
|
||||
yield self.handler.on_receive_pdu(
|
||||
origin,
|
||||
pdu,
|
||||
backfilled=False,
|
||||
state=state,
|
||||
auth_chain=auth_chain,
|
||||
)
|
||||
@@ -545,8 +548,19 @@ class FederationServer(FederationBase):
|
||||
return event
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def exchange_third_party_invite(self, invite):
|
||||
ret = yield self.handler.exchange_third_party_invite(invite)
|
||||
def exchange_third_party_invite(
|
||||
self,
|
||||
sender_user_id,
|
||||
target_user_id,
|
||||
room_id,
|
||||
signed,
|
||||
):
|
||||
ret = yield self.handler.exchange_third_party_invite(
|
||||
sender_user_id,
|
||||
target_user_id,
|
||||
room_id,
|
||||
signed,
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -54,8 +54,6 @@ class ReplicationLayer(FederationClient, FederationServer):
|
||||
self.keyring = hs.get_keyring()
|
||||
|
||||
self.transport_layer = transport_layer
|
||||
self.transport_layer.register_received_handler(self)
|
||||
self.transport_layer.register_request_handler(self)
|
||||
|
||||
self.federation_client = self
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -103,7 +103,6 @@ class TransactionQueue(object):
|
||||
else:
|
||||
return not destination.startswith("localhost")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def enqueue_pdu(self, pdu, destinations, order):
|
||||
# We loop through all destinations to see whether we already have
|
||||
# a transaction in progress. If we do, stick it in the pending_pdus
|
||||
@@ -141,8 +140,6 @@ class TransactionQueue(object):
|
||||
|
||||
deferreds.append(deferred)
|
||||
|
||||
yield defer.DeferredList(deferreds, consumeErrors=True)
|
||||
|
||||
# NO inlineCallbacks
|
||||
def enqueue_edu(self, edu):
|
||||
destination = edu.destination
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -20,55 +20,3 @@ By default this is done over HTTPS (and all home servers are required to
|
||||
support HTTPS), however individual pairings of servers may decide to
|
||||
communicate over a different (albeit still reliable) protocol.
|
||||
"""
|
||||
|
||||
from .server import TransportLayerServer
|
||||
from .client import TransportLayerClient
|
||||
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
|
||||
|
||||
class TransportLayer(TransportLayerServer, TransportLayerClient):
|
||||
"""This is a basic implementation of the transport layer that translates
|
||||
transactions and other requests to/from HTTP.
|
||||
|
||||
Attributes:
|
||||
server_name (str): Local home server host
|
||||
|
||||
server (synapse.http.server.HttpServer): the http server to
|
||||
register listeners on
|
||||
|
||||
client (synapse.http.client.HttpClient): the http client used to
|
||||
send requests
|
||||
|
||||
request_handler (TransportRequestHandler): The handler to fire when we
|
||||
receive requests for data.
|
||||
|
||||
received_handler (TransportReceivedHandler): The handler to fire when
|
||||
we receive data.
|
||||
"""
|
||||
|
||||
def __init__(self, homeserver, server_name, server, client):
|
||||
"""
|
||||
Args:
|
||||
server_name (str): Local home server host
|
||||
server (synapse.protocol.http.HttpServer): the http server to
|
||||
register listeners on
|
||||
client (synapse.protocol.http.HttpClient): the http client used to
|
||||
send requests
|
||||
"""
|
||||
self.keyring = homeserver.get_keyring()
|
||||
self.clock = homeserver.get_clock()
|
||||
self.server_name = server_name
|
||||
self.server = server
|
||||
self.client = client
|
||||
self.request_handler = None
|
||||
self.received_handler = None
|
||||
|
||||
self.ratelimiter = FederationRateLimiter(
|
||||
self.clock,
|
||||
window_size=homeserver.config.federation_rc_window_size,
|
||||
sleep_limit=homeserver.config.federation_rc_sleep_limit,
|
||||
sleep_msec=homeserver.config.federation_rc_sleep_delay,
|
||||
reject_limit=homeserver.config.federation_rc_reject_limit,
|
||||
concurrent_requests=homeserver.config.federation_rc_concurrent,
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -28,6 +28,10 @@ logger = logging.getLogger(__name__)
|
||||
class TransportLayerClient(object):
|
||||
"""Sends federation HTTP requests to other servers"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.server_name = hs.hostname
|
||||
self.client = hs.get_http_client()
|
||||
|
||||
@log_function
|
||||
def get_room_state(self, destination, room_id, event_id):
|
||||
""" Requests all state for a given room from the given server at the
|
||||
@@ -156,6 +160,7 @@ class TransportLayerClient(object):
|
||||
path=path,
|
||||
args=args,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
timeout=10000,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
@@ -174,7 +179,8 @@ class TransportLayerClient(object):
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
retry_on_dns_fail=True,
|
||||
retry_on_dns_fail=False,
|
||||
timeout=20000,
|
||||
)
|
||||
|
||||
defer.returnValue(content)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,7 +17,9 @@ from twisted.internet import defer
|
||||
|
||||
from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
|
||||
import functools
|
||||
import logging
|
||||
@@ -28,9 +30,41 @@ import re
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TransportLayerServer(object):
|
||||
class TransportLayerServer(JsonResource):
|
||||
"""Handles incoming federation HTTP requests"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
super(TransportLayerServer, self).__init__(hs)
|
||||
|
||||
self.authenticator = Authenticator(hs)
|
||||
self.ratelimiter = FederationRateLimiter(
|
||||
self.clock,
|
||||
window_size=hs.config.federation_rc_window_size,
|
||||
sleep_limit=hs.config.federation_rc_sleep_limit,
|
||||
sleep_msec=hs.config.federation_rc_sleep_delay,
|
||||
reject_limit=hs.config.federation_rc_reject_limit,
|
||||
concurrent_requests=hs.config.federation_rc_concurrent,
|
||||
)
|
||||
|
||||
self.register_servlets()
|
||||
|
||||
def register_servlets(self):
|
||||
register_servlets(
|
||||
self.hs,
|
||||
resource=self,
|
||||
ratelimiter=self.ratelimiter,
|
||||
authenticator=self.authenticator,
|
||||
)
|
||||
|
||||
|
||||
class Authenticator(object):
|
||||
def __init__(self, hs):
|
||||
self.keyring = hs.get_keyring()
|
||||
self.server_name = hs.hostname
|
||||
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
@defer.inlineCallbacks
|
||||
def authenticate_request(self, request):
|
||||
@@ -98,37 +132,9 @@ class TransportLayerServer(object):
|
||||
|
||||
defer.returnValue((origin, content))
|
||||
|
||||
@log_function
|
||||
def register_received_handler(self, handler):
|
||||
""" Register a handler that will be fired when we receive data.
|
||||
|
||||
Args:
|
||||
handler (TransportReceivedHandler)
|
||||
"""
|
||||
FederationSendServlet(
|
||||
handler,
|
||||
authenticator=self,
|
||||
ratelimiter=self.ratelimiter,
|
||||
server_name=self.server_name,
|
||||
).register(self.server)
|
||||
|
||||
@log_function
|
||||
def register_request_handler(self, handler):
|
||||
""" Register a handler that will be fired when we get asked for data.
|
||||
|
||||
Args:
|
||||
handler (TransportRequestHandler)
|
||||
"""
|
||||
for servletclass in SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler,
|
||||
authenticator=self,
|
||||
ratelimiter=self.ratelimiter,
|
||||
).register(self.server)
|
||||
|
||||
|
||||
class BaseFederationServlet(object):
|
||||
def __init__(self, handler, authenticator, ratelimiter):
|
||||
def __init__(self, handler, authenticator, ratelimiter, server_name):
|
||||
self.handler = handler
|
||||
self.authenticator = authenticator
|
||||
self.ratelimiter = ratelimiter
|
||||
@@ -169,10 +175,12 @@ class BaseFederationServlet(object):
|
||||
|
||||
|
||||
class FederationSendServlet(BaseFederationServlet):
|
||||
PATH = "/send/([^/]*)/"
|
||||
PATH = "/send/(?P<transaction_id>[^/]*)/"
|
||||
|
||||
def __init__(self, handler, server_name, **kwargs):
|
||||
super(FederationSendServlet, self).__init__(handler, **kwargs)
|
||||
super(FederationSendServlet, self).__init__(
|
||||
handler, server_name=server_name, **kwargs
|
||||
)
|
||||
self.server_name = server_name
|
||||
|
||||
# This is when someone is trying to send us a bunch of data.
|
||||
@@ -242,7 +250,7 @@ class FederationPullServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationEventServlet(BaseFederationServlet):
|
||||
PATH = "/event/([^/]*)/"
|
||||
PATH = "/event/(?P<event_id>[^/]*)/"
|
||||
|
||||
# This is when someone asks for a data item for a given server data_id pair.
|
||||
def on_GET(self, origin, content, query, event_id):
|
||||
@@ -250,7 +258,7 @@ class FederationEventServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationStateServlet(BaseFederationServlet):
|
||||
PATH = "/state/([^/]*)/"
|
||||
PATH = "/state/(?P<context>[^/]*)/"
|
||||
|
||||
# This is when someone asks for all data for a given context.
|
||||
def on_GET(self, origin, content, query, context):
|
||||
@@ -262,7 +270,7 @@ class FederationStateServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationBackfillServlet(BaseFederationServlet):
|
||||
PATH = "/backfill/([^/]*)/"
|
||||
PATH = "/backfill/(?P<context>[^/]*)/"
|
||||
|
||||
def on_GET(self, origin, content, query, context):
|
||||
versions = query["v"]
|
||||
@@ -277,7 +285,7 @@ class FederationBackfillServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationQueryServlet(BaseFederationServlet):
|
||||
PATH = "/query/([^/]*)"
|
||||
PATH = "/query/(?P<query_type>[^/]*)"
|
||||
|
||||
# This is when we receive a server-server Query
|
||||
def on_GET(self, origin, content, query, query_type):
|
||||
@@ -288,7 +296,7 @@ class FederationQueryServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationMakeJoinServlet(BaseFederationServlet):
|
||||
PATH = "/make_join/([^/]*)/([^/]*)"
|
||||
PATH = "/make_join/(?P<context>[^/]*)/(?P<user_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, context, user_id):
|
||||
@@ -297,7 +305,7 @@ class FederationMakeJoinServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationMakeLeaveServlet(BaseFederationServlet):
|
||||
PATH = "/make_leave/([^/]*)/([^/]*)"
|
||||
PATH = "/make_leave/(?P<context>[^/]*)/(?P<user_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, origin, content, query, context, user_id):
|
||||
@@ -306,7 +314,7 @@ class FederationMakeLeaveServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationSendLeaveServlet(BaseFederationServlet):
|
||||
PATH = "/send_leave/([^/]*)/([^/]*)"
|
||||
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<txid>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, room_id, txid):
|
||||
@@ -315,14 +323,14 @@ class FederationSendLeaveServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationEventAuthServlet(BaseFederationServlet):
|
||||
PATH = "/event_auth/([^/]*)/([^/]*)"
|
||||
PATH = "/event_auth(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
def on_GET(self, origin, content, query, context, event_id):
|
||||
return self.handler.on_event_auth(origin, context, event_id)
|
||||
|
||||
|
||||
class FederationSendJoinServlet(BaseFederationServlet):
|
||||
PATH = "/send_join/([^/]*)/([^/]*)"
|
||||
PATH = "/send_join/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, context, event_id):
|
||||
@@ -333,7 +341,7 @@ class FederationSendJoinServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationInviteServlet(BaseFederationServlet):
|
||||
PATH = "/invite/([^/]*)/([^/]*)"
|
||||
PATH = "/invite/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, context, event_id):
|
||||
@@ -344,7 +352,7 @@ class FederationInviteServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
|
||||
PATH = "/exchange_third_party_invite/([^/]*)"
|
||||
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, room_id):
|
||||
@@ -373,7 +381,7 @@ class FederationClientKeysClaimServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
class FederationQueryAuthServlet(BaseFederationServlet):
|
||||
PATH = "/query_auth/([^/]*)/([^/]*)"
|
||||
PATH = "/query_auth/(?P<context>[^/]*)/(?P<event_id>[^/]*)"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, context, event_id):
|
||||
@@ -386,7 +394,7 @@ class FederationQueryAuthServlet(BaseFederationServlet):
|
||||
|
||||
class FederationGetMissingEventsServlet(BaseFederationServlet):
|
||||
# TODO(paul): Why does this path alone end with "/?" optional?
|
||||
PATH = "/get_missing_events/([^/]*)/?"
|
||||
PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, room_id):
|
||||
@@ -412,13 +420,22 @@ class On3pidBindServlet(BaseFederationServlet):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
content_bytes = request.content.read()
|
||||
content = json.loads(content_bytes)
|
||||
content = parse_json_object_from_request(request)
|
||||
if "invites" in content:
|
||||
last_exception = None
|
||||
for invite in content["invites"]:
|
||||
try:
|
||||
yield self.handler.exchange_third_party_invite(invite)
|
||||
if "signed" not in invite or "token" not in invite["signed"]:
|
||||
message = ("Rejecting received notification of third-"
|
||||
"party invite without signed: %s" % (invite,))
|
||||
logger.info(message)
|
||||
raise SynapseError(400, message)
|
||||
yield self.handler.exchange_third_party_invite(
|
||||
invite["sender"],
|
||||
invite["mxid"],
|
||||
invite["room_id"],
|
||||
invite["signed"],
|
||||
)
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
if last_exception:
|
||||
@@ -432,6 +449,7 @@ class On3pidBindServlet(BaseFederationServlet):
|
||||
|
||||
|
||||
SERVLET_CLASSES = (
|
||||
FederationSendServlet,
|
||||
FederationPullServlet,
|
||||
FederationEventServlet,
|
||||
FederationStateServlet,
|
||||
@@ -451,3 +469,13 @@ SERVLET_CLASSES = (
|
||||
FederationThirdPartyInviteExchangeServlet,
|
||||
On3pidBindServlet,
|
||||
)
|
||||
|
||||
|
||||
def register_servlets(hs, resource, authenticator, ratelimiter):
|
||||
for servletclass in SERVLET_CLASSES:
|
||||
servletclass(
|
||||
handler=hs.get_replication_layer(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,8 +17,9 @@ from synapse.appservice.scheduler import AppServiceScheduler
|
||||
from synapse.appservice.api import ApplicationServiceApi
|
||||
from .register import RegistrationHandler
|
||||
from .room import (
|
||||
RoomCreationHandler, RoomMemberHandler, RoomListHandler, RoomContextHandler,
|
||||
RoomCreationHandler, RoomListHandler, RoomContextHandler,
|
||||
)
|
||||
from .room_member import RoomMemberHandler
|
||||
from .message import MessageHandler
|
||||
from .events import EventStreamHandler, EventHandler
|
||||
from .federation import FederationHandler
|
||||
|
||||
+249
-109
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -18,9 +18,10 @@ from twisted.internet import defer
|
||||
from synapse.api.errors import LimitExceededError, SynapseError, AuthError
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.api.constants import Membership, EventTypes
|
||||
from synapse.types import UserID, RoomAlias
|
||||
from synapse.types import UserID, RoomAlias, Requester
|
||||
from synapse.push.action_generator import ActionGenerator
|
||||
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.util.logcontext import PreserveLoggingContext, preserve_fn
|
||||
|
||||
import logging
|
||||
|
||||
@@ -28,12 +29,30 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
VISIBILITY_PRIORITY = (
|
||||
"world_readable",
|
||||
"shared",
|
||||
"invited",
|
||||
"joined",
|
||||
)
|
||||
|
||||
|
||||
MEMBERSHIP_PRIORITY = (
|
||||
Membership.JOIN,
|
||||
Membership.INVITE,
|
||||
Membership.KNOCK,
|
||||
Membership.LEAVE,
|
||||
Membership.BAN,
|
||||
)
|
||||
|
||||
|
||||
class BaseHandler(object):
|
||||
"""
|
||||
Common base class for the event handlers.
|
||||
|
||||
:type store: synapse.storage.events.StateStore
|
||||
:type state_handler: synapse.state.StateHandler
|
||||
Attributes:
|
||||
store (synapse.storage.events.StateStore):
|
||||
state_handler (synapse.state.StateHandler):
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -52,105 +71,187 @@ class BaseHandler(object):
|
||||
self.event_builder_factory = hs.get_event_builder_factory()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _filter_events_for_client(self, user_id, events, is_guest=False,
|
||||
require_all_visible_for_guests=True):
|
||||
# Assumes that user has at some point joined the room if not is_guest.
|
||||
def filter_events_for_clients(self, user_tuples, events, event_id_to_state):
|
||||
""" Returns dict of user_id -> list of events that user is allowed to
|
||||
see.
|
||||
|
||||
def allowed(event, membership, visibility):
|
||||
if visibility == "world_readable":
|
||||
return True
|
||||
|
||||
if is_guest:
|
||||
return False
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
return True
|
||||
|
||||
if event.type == EventTypes.RoomHistoryVisibility:
|
||||
return not is_guest
|
||||
|
||||
if visibility == "shared":
|
||||
return True
|
||||
elif visibility == "joined":
|
||||
return membership == Membership.JOIN
|
||||
elif visibility == "invited":
|
||||
return membership == Membership.INVITE
|
||||
|
||||
return True
|
||||
|
||||
event_id_to_state = yield self.store.get_state_for_events(
|
||||
frozenset(e.event_id for e in events),
|
||||
types=(
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
(EventTypes.Member, user_id),
|
||||
Args:
|
||||
user_tuples (str, bool): (user id, is_peeking) for each user to be
|
||||
checked. is_peeking should be true if:
|
||||
* the user is not currently a member of the room, and:
|
||||
* the user has not been a member of the room since the
|
||||
given events
|
||||
events ([synapse.events.EventBase]): list of events to filter
|
||||
"""
|
||||
forgotten = yield defer.gatherResults([
|
||||
self.store.who_forgot_in_room(
|
||||
room_id,
|
||||
)
|
||||
for room_id in frozenset(e.room_id for e in events)
|
||||
], consumeErrors=True)
|
||||
|
||||
# Set of membership event_ids that have been forgotten
|
||||
event_id_forgotten = frozenset(
|
||||
row["event_id"] for rows in forgotten for row in rows
|
||||
)
|
||||
|
||||
events_to_return = []
|
||||
for event in events:
|
||||
def allowed(event, user_id, is_peeking):
|
||||
"""
|
||||
Args:
|
||||
event (synapse.events.EventBase): event to check
|
||||
user_id (str)
|
||||
is_peeking (bool)
|
||||
"""
|
||||
state = event_id_to_state[event.event_id]
|
||||
|
||||
membership_event = state.get((EventTypes.Member, user_id), None)
|
||||
if membership_event:
|
||||
was_forgotten_at_event = yield self.store.was_forgotten_at(
|
||||
membership_event.state_key,
|
||||
membership_event.room_id,
|
||||
membership_event.event_id
|
||||
)
|
||||
if was_forgotten_at_event:
|
||||
membership = None
|
||||
else:
|
||||
membership = membership_event.membership
|
||||
else:
|
||||
membership = None
|
||||
|
||||
# get the room_visibility at the time of the event.
|
||||
visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
|
||||
if visibility_event:
|
||||
visibility = visibility_event.content.get("history_visibility", "shared")
|
||||
else:
|
||||
visibility = "shared"
|
||||
|
||||
should_include = allowed(event, membership, visibility)
|
||||
if should_include:
|
||||
events_to_return.append(event)
|
||||
if visibility not in VISIBILITY_PRIORITY:
|
||||
visibility = "shared"
|
||||
|
||||
if (require_all_visible_for_guests
|
||||
and is_guest
|
||||
and len(events_to_return) < len(events)):
|
||||
# This indicates that some events in the requested range were not
|
||||
# visible to guest users. To be safe, we reject the entire request,
|
||||
# so that we don't have to worry about interpreting visibility
|
||||
# boundaries.
|
||||
raise AuthError(403, "User %s does not have permission" % (
|
||||
user_id
|
||||
))
|
||||
# if it was world_readable, it's easy: everyone can read it
|
||||
if visibility == "world_readable":
|
||||
return True
|
||||
|
||||
defer.returnValue(events_to_return)
|
||||
# Always allow history visibility events on boundaries. This is done
|
||||
# by setting the effective visibility to the least restrictive
|
||||
# of the old vs new.
|
||||
if event.type == EventTypes.RoomHistoryVisibility:
|
||||
prev_content = event.unsigned.get("prev_content", {})
|
||||
prev_visibility = prev_content.get("history_visibility", None)
|
||||
|
||||
def ratelimit(self, user_id):
|
||||
if prev_visibility not in VISIBILITY_PRIORITY:
|
||||
prev_visibility = "shared"
|
||||
|
||||
new_priority = VISIBILITY_PRIORITY.index(visibility)
|
||||
old_priority = VISIBILITY_PRIORITY.index(prev_visibility)
|
||||
if old_priority < new_priority:
|
||||
visibility = prev_visibility
|
||||
|
||||
# likewise, if the event is the user's own membership event, use
|
||||
# the 'most joined' membership
|
||||
membership = None
|
||||
if event.type == EventTypes.Member and event.state_key == user_id:
|
||||
membership = event.content.get("membership", None)
|
||||
if membership not in MEMBERSHIP_PRIORITY:
|
||||
membership = "leave"
|
||||
|
||||
prev_content = event.unsigned.get("prev_content", {})
|
||||
prev_membership = prev_content.get("membership", None)
|
||||
if prev_membership not in MEMBERSHIP_PRIORITY:
|
||||
prev_membership = "leave"
|
||||
|
||||
new_priority = MEMBERSHIP_PRIORITY.index(membership)
|
||||
old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
|
||||
if old_priority < new_priority:
|
||||
membership = prev_membership
|
||||
|
||||
# otherwise, get the user's membership at the time of the event.
|
||||
if membership is None:
|
||||
membership_event = state.get((EventTypes.Member, user_id), None)
|
||||
if membership_event:
|
||||
if membership_event.event_id not in event_id_forgotten:
|
||||
membership = membership_event.membership
|
||||
|
||||
# if the user was a member of the room at the time of the event,
|
||||
# they can see it.
|
||||
if membership == Membership.JOIN:
|
||||
return True
|
||||
|
||||
if visibility == "joined":
|
||||
# we weren't a member at the time of the event, so we can't
|
||||
# see this event.
|
||||
return False
|
||||
|
||||
elif visibility == "invited":
|
||||
# user can also see the event if they were *invited* at the time
|
||||
# of the event.
|
||||
return membership == Membership.INVITE
|
||||
|
||||
else:
|
||||
# visibility is shared: user can also see the event if they have
|
||||
# become a member since the event
|
||||
#
|
||||
# XXX: if the user has subsequently joined and then left again,
|
||||
# ideally we would share history up to the point they left. But
|
||||
# we don't know when they left.
|
||||
return not is_peeking
|
||||
|
||||
defer.returnValue({
|
||||
user_id: [
|
||||
event
|
||||
for event in events
|
||||
if allowed(event, user_id, is_peeking)
|
||||
]
|
||||
for user_id, is_peeking in user_tuples
|
||||
})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _filter_events_for_client(self, user_id, events, is_peeking=False):
|
||||
"""
|
||||
Check which events a user is allowed to see
|
||||
|
||||
Args:
|
||||
user_id(str): user id to be checked
|
||||
events([synapse.events.EventBase]): list of events to be checked
|
||||
is_peeking(bool): should be True if:
|
||||
* the user is not currently a member of the room, and:
|
||||
* the user has not been a member of the room since the given
|
||||
events
|
||||
|
||||
Returns:
|
||||
[synapse.events.EventBase]
|
||||
"""
|
||||
types = (
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
(EventTypes.Member, user_id),
|
||||
)
|
||||
event_id_to_state = yield self.store.get_state_for_events(
|
||||
frozenset(e.event_id for e in events),
|
||||
types=types
|
||||
)
|
||||
res = yield self.filter_events_for_clients(
|
||||
[(user_id, is_peeking)], events, event_id_to_state
|
||||
)
|
||||
defer.returnValue(res.get(user_id, []))
|
||||
|
||||
def ratelimit(self, requester):
|
||||
time_now = self.clock.time()
|
||||
allowed, time_allowed = self.ratelimiter.send_message(
|
||||
user_id, time_now,
|
||||
requester.user.to_string(), time_now,
|
||||
msg_rate_hz=self.hs.config.rc_messages_per_second,
|
||||
burst_count=self.hs.config.rc_message_burst_count,
|
||||
)
|
||||
if not allowed:
|
||||
raise LimitExceededError(
|
||||
retry_after_ms=int(1000*(time_allowed - time_now)),
|
||||
retry_after_ms=int(1000 * (time_allowed - time_now)),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_new_client_event(self, builder):
|
||||
latest_ret = yield self.store.get_latest_events_in_room(
|
||||
builder.room_id,
|
||||
)
|
||||
|
||||
if latest_ret:
|
||||
depth = max([d for _, _, d in latest_ret]) + 1
|
||||
def _create_new_client_event(self, builder, prev_event_ids=None):
|
||||
if prev_event_ids:
|
||||
prev_events = yield self.store.add_event_hashes(prev_event_ids)
|
||||
prev_max_depth = yield self.store.get_max_depth_of_events(prev_event_ids)
|
||||
depth = prev_max_depth + 1
|
||||
else:
|
||||
depth = 1
|
||||
latest_ret = yield self.store.get_latest_event_ids_and_hashes_in_room(
|
||||
builder.room_id,
|
||||
)
|
||||
|
||||
prev_events = [(e, h) for e, h, _ in latest_ret]
|
||||
if latest_ret:
|
||||
depth = max([d for _, _, d in latest_ret]) + 1
|
||||
else:
|
||||
depth = 1
|
||||
|
||||
prev_events = [
|
||||
(event_id, prev_hashes)
|
||||
for event_id, prev_hashes, _ in latest_ret
|
||||
]
|
||||
|
||||
builder.prev_events = prev_events
|
||||
builder.depth = depth
|
||||
@@ -181,13 +282,45 @@ class BaseHandler(object):
|
||||
(event, context,)
|
||||
)
|
||||
|
||||
def is_host_in_room(self, current_state):
|
||||
room_members = [
|
||||
(state_key, event.membership)
|
||||
for ((event_type, state_key), event) in current_state.items()
|
||||
if event_type == EventTypes.Member
|
||||
]
|
||||
if len(room_members) == 0:
|
||||
# Have we just created the room, and is this about to be the very
|
||||
# first member event?
|
||||
create_event = current_state.get(("m.room.create", ""))
|
||||
if create_event:
|
||||
return True
|
||||
for (state_key, membership) in room_members:
|
||||
if (
|
||||
UserID.from_string(state_key).domain == self.hs.hostname
|
||||
and membership == Membership.JOIN
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_new_client_event(self, event, context, extra_destinations=[],
|
||||
extra_users=[], suppress_auth=False):
|
||||
def handle_new_client_event(
|
||||
self,
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
ratelimit=True,
|
||||
extra_users=[]
|
||||
):
|
||||
# We now need to go and hit out to wherever we need to hit out to.
|
||||
|
||||
if not suppress_auth:
|
||||
if ratelimit:
|
||||
self.ratelimit(requester)
|
||||
|
||||
try:
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
except AuthError as err:
|
||||
logger.warn("Denying new event %r because %s", event, err)
|
||||
raise err
|
||||
|
||||
yield self.maybe_kick_guest_users(event, context.current_state.values())
|
||||
|
||||
@@ -211,6 +344,12 @@ class BaseHandler(object):
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
if event.content["membership"] == Membership.INVITE:
|
||||
def is_inviter_member_event(e):
|
||||
return (
|
||||
e.type == EventTypes.Member and
|
||||
e.sender == event.sender
|
||||
)
|
||||
|
||||
event.unsigned["invite_room_state"] = [
|
||||
{
|
||||
"type": e.type,
|
||||
@@ -219,12 +358,8 @@ class BaseHandler(object):
|
||||
"sender": e.sender,
|
||||
}
|
||||
for k, e in context.current_state.items()
|
||||
if e.type in (
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.CanonicalAlias,
|
||||
EventTypes.RoomAvatar,
|
||||
EventTypes.Name,
|
||||
)
|
||||
if e.type in self.hs.config.room_invite_state_types
|
||||
or is_inviter_member_event(e)
|
||||
]
|
||||
|
||||
invitee = UserID.from_string(event.state_key)
|
||||
@@ -260,11 +395,28 @@ class BaseHandler(object):
|
||||
"You don't have permission to redact events"
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Create and context.current_state:
|
||||
raise AuthError(
|
||||
403,
|
||||
"Changing the room create event is forbidden",
|
||||
)
|
||||
|
||||
action_generator = ActionGenerator(self.hs)
|
||||
yield action_generator.handle_push_actions_for_event(
|
||||
event, context, self
|
||||
)
|
||||
|
||||
(event_stream_id, max_stream_id) = yield self.store.persist_event(
|
||||
event, context=context
|
||||
)
|
||||
|
||||
destinations = set(extra_destinations)
|
||||
# this intentionally does not yield: we don't care about the result
|
||||
# and don't need to wait for it.
|
||||
preserve_fn(self.hs.get_pusherpool().on_new_notifications)(
|
||||
event_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
destinations = set()
|
||||
for k, s in context.current_state.items():
|
||||
try:
|
||||
if k[0] == EventTypes.Member:
|
||||
@@ -279,19 +431,11 @@ class BaseHandler(object):
|
||||
|
||||
with PreserveLoggingContext():
|
||||
# Don't block waiting on waking up all the listeners.
|
||||
notify_d = self.notifier.on_new_room_event(
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=extra_users
|
||||
)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn(
|
||||
"Failed to notify about %s: %s",
|
||||
event.event_id, f.value
|
||||
)
|
||||
|
||||
notify_d.addErrback(log_failure)
|
||||
|
||||
# If invite, remove room_state from unsigned before sending.
|
||||
event.unsigned.pop("invite_room_state", None)
|
||||
|
||||
@@ -315,7 +459,8 @@ class BaseHandler(object):
|
||||
if member_event.type != EventTypes.Member:
|
||||
continue
|
||||
|
||||
if not self.hs.is_mine(UserID.from_string(member_event.state_key)):
|
||||
target_user = UserID.from_string(member_event.state_key)
|
||||
if not self.hs.is_mine(target_user):
|
||||
continue
|
||||
|
||||
if member_event.content["membership"] not in {
|
||||
@@ -337,18 +482,13 @@ class BaseHandler(object):
|
||||
# and having homeservers have their own users leave keeps more
|
||||
# of that decision-making and control local to the guest-having
|
||||
# homeserver.
|
||||
message_handler = self.hs.get_handlers().message_handler
|
||||
yield message_handler.create_and_send_event(
|
||||
{
|
||||
"type": EventTypes.Member,
|
||||
"state_key": member_event.state_key,
|
||||
"content": {
|
||||
"membership": Membership.LEAVE,
|
||||
"kind": "guest"
|
||||
},
|
||||
"room_id": member_event.room_id,
|
||||
"sender": member_event.state_key
|
||||
},
|
||||
requester = Requester(target_user, "", True)
|
||||
handler = self.hs.get_handlers().room_member_handler
|
||||
yield handler.update_membership(
|
||||
requester,
|
||||
target_user,
|
||||
member_event.room_id,
|
||||
"leave",
|
||||
ratelimit=False,
|
||||
)
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
+165
-27
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -35,6 +35,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AuthHandler(BaseHandler):
|
||||
SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000
|
||||
|
||||
def __init__(self, hs):
|
||||
super(AuthHandler, self).__init__(hs)
|
||||
@@ -48,6 +49,21 @@ class AuthHandler(BaseHandler):
|
||||
self.sessions = {}
|
||||
self.INVALID_TOKEN_HTTP_STATUS = 401
|
||||
|
||||
self.ldap_enabled = hs.config.ldap_enabled
|
||||
self.ldap_server = hs.config.ldap_server
|
||||
self.ldap_port = hs.config.ldap_port
|
||||
self.ldap_tls = hs.config.ldap_tls
|
||||
self.ldap_search_base = hs.config.ldap_search_base
|
||||
self.ldap_search_property = hs.config.ldap_search_property
|
||||
self.ldap_email_property = hs.config.ldap_email_property
|
||||
self.ldap_full_name_property = hs.config.ldap_full_name_property
|
||||
|
||||
if self.ldap_enabled is True:
|
||||
import ldap
|
||||
logger.info("Import ldap version: %s", ldap.__version__)
|
||||
|
||||
self.hs = hs # FIXME better possibility to access registrationHandler later?
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_auth(self, flows, clientdict, clientip):
|
||||
"""
|
||||
@@ -66,15 +82,18 @@ class AuthHandler(BaseHandler):
|
||||
'auth' key: this method prompts for auth if none is sent.
|
||||
clientip (str): The IP address of the client.
|
||||
Returns:
|
||||
A tuple of (authed, dict, dict) where authed is true if the client
|
||||
has successfully completed an auth flow. If it is true, the first
|
||||
dict contains the authenticated credentials of each stage.
|
||||
A tuple of (authed, dict, dict, session_id) where authed is true if
|
||||
the client has successfully completed an auth flow. If it is true
|
||||
the first dict contains the authenticated credentials of each stage.
|
||||
|
||||
If authed is false, the first dictionary is the server response to
|
||||
the login request and should be passed back to the client.
|
||||
|
||||
In either case, the second dict contains the parameters for this
|
||||
request (which may have been given only in a previous call).
|
||||
|
||||
session_id is the ID of this session, either passed in by the client
|
||||
or assigned by the call to check_auth
|
||||
"""
|
||||
|
||||
authdict = None
|
||||
@@ -103,7 +122,10 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
if not authdict:
|
||||
defer.returnValue(
|
||||
(False, self._auth_dict_for_flows(flows, session), clientdict)
|
||||
(
|
||||
False, self._auth_dict_for_flows(flows, session),
|
||||
clientdict, session['id']
|
||||
)
|
||||
)
|
||||
|
||||
if 'creds' not in session:
|
||||
@@ -122,12 +144,11 @@ class AuthHandler(BaseHandler):
|
||||
for f in flows:
|
||||
if len(set(f) - set(creds.keys())) == 0:
|
||||
logger.info("Auth completed with creds: %r", creds)
|
||||
self._remove_session(session)
|
||||
defer.returnValue((True, creds, clientdict))
|
||||
defer.returnValue((True, creds, clientdict, session['id']))
|
||||
|
||||
ret = self._auth_dict_for_flows(flows, session)
|
||||
ret['completed'] = creds.keys()
|
||||
defer.returnValue((False, ret, clientdict))
|
||||
defer.returnValue((False, ret, clientdict, session['id']))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_oob_auth(self, stagetype, authdict, clientip):
|
||||
@@ -154,6 +175,51 @@ class AuthHandler(BaseHandler):
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
def get_session_id(self, clientdict):
|
||||
"""
|
||||
Gets the session ID for a client given the client dictionary
|
||||
|
||||
Args:
|
||||
clientdict: The dictionary sent by the client in the request
|
||||
|
||||
Returns:
|
||||
str|None: The string session ID the client sent. If the client did
|
||||
not send a session ID, returns None.
|
||||
"""
|
||||
sid = None
|
||||
if clientdict and 'auth' in clientdict:
|
||||
authdict = clientdict['auth']
|
||||
if 'session' in authdict:
|
||||
sid = authdict['session']
|
||||
return sid
|
||||
|
||||
def set_session_data(self, session_id, key, value):
|
||||
"""
|
||||
Store a key-value pair into the sessions data associated with this
|
||||
request. This data is stored server-side and cannot be modified by
|
||||
the client.
|
||||
|
||||
Args:
|
||||
session_id (string): The ID of this session as returned from check_auth
|
||||
key (string): The key to store the data under
|
||||
value (any): The data to store
|
||||
"""
|
||||
sess = self._get_session_info(session_id)
|
||||
sess.setdefault('serverdict', {})[key] = value
|
||||
self._save_session(sess)
|
||||
|
||||
def get_session_data(self, session_id, key, default=None):
|
||||
"""
|
||||
Retrieve data stored with set_session_data
|
||||
|
||||
Args:
|
||||
session_id (string): The ID of this session as returned from check_auth
|
||||
key (string): The key to store the data under
|
||||
default (any): Value to return if the key has not been set
|
||||
"""
|
||||
sess = self._get_session_info(session_id)
|
||||
return sess.setdefault('serverdict', {}).get(key, default)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_password_auth(self, authdict, _):
|
||||
if "user" not in authdict or "password" not in authdict:
|
||||
@@ -164,8 +230,10 @@ class AuthHandler(BaseHandler):
|
||||
if not user_id.startswith('@'):
|
||||
user_id = UserID.create(user_id, self.hs.hostname).to_string()
|
||||
|
||||
user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
self._check_password(user_id, password, password_hash)
|
||||
if not (yield self._check_password(user_id, password)):
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
||||
defer.returnValue(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -289,8 +357,10 @@ class AuthHandler(BaseHandler):
|
||||
StoreError if there was a problem storing the token.
|
||||
LoginError if there was an authentication problem.
|
||||
"""
|
||||
user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
self._check_password(user_id, password, password_hash)
|
||||
|
||||
if not (yield self._check_password(user_id, password)):
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
|
||||
logger.info("Logging in user %s", user_id)
|
||||
access_token = yield self.issue_access_token(user_id)
|
||||
@@ -356,11 +426,67 @@ class AuthHandler(BaseHandler):
|
||||
else:
|
||||
defer.returnValue(user_infos.popitem())
|
||||
|
||||
def _check_password(self, user_id, password, stored_hash):
|
||||
"""Checks that user_id has passed password, raises LoginError if not."""
|
||||
if not self.validate_hash(password, stored_hash):
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
@defer.inlineCallbacks
|
||||
def _check_password(self, user_id, password):
|
||||
"""
|
||||
Returns:
|
||||
True if the user_id successfully authenticated
|
||||
"""
|
||||
valid_ldap = yield self._check_ldap_password(user_id, password)
|
||||
if valid_ldap:
|
||||
defer.returnValue(True)
|
||||
|
||||
valid_local_password = yield self._check_local_password(user_id, password)
|
||||
if valid_local_password:
|
||||
defer.returnValue(True)
|
||||
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_local_password(self, user_id, password):
|
||||
try:
|
||||
user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
|
||||
defer.returnValue(self.validate_hash(password, password_hash))
|
||||
except LoginError:
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_ldap_password(self, user_id, password):
|
||||
if not self.ldap_enabled:
|
||||
logger.debug("LDAP not configured")
|
||||
defer.returnValue(False)
|
||||
|
||||
import ldap
|
||||
|
||||
logger.info("Authenticating %s with LDAP" % user_id)
|
||||
try:
|
||||
ldap_url = "%s:%s" % (self.ldap_server, self.ldap_port)
|
||||
logger.debug("Connecting LDAP server at %s" % ldap_url)
|
||||
l = ldap.initialize(ldap_url)
|
||||
if self.ldap_tls:
|
||||
logger.debug("Initiating TLS")
|
||||
self._connection.start_tls_s()
|
||||
|
||||
local_name = UserID.from_string(user_id).localpart
|
||||
|
||||
dn = "%s=%s, %s" % (
|
||||
self.ldap_search_property,
|
||||
local_name,
|
||||
self.ldap_search_base)
|
||||
logger.debug("DN for LDAP authentication: %s" % dn)
|
||||
|
||||
l.simple_bind_s(dn.encode('utf-8'), password.encode('utf-8'))
|
||||
|
||||
if not (yield self.does_user_exist(user_id)):
|
||||
handler = self.hs.get_handlers().registration_handler
|
||||
user_id, access_token = (
|
||||
yield handler.register(localpart=local_name)
|
||||
)
|
||||
|
||||
defer.returnValue(True)
|
||||
except ldap.LDAPError, e:
|
||||
logger.warn("LDAP error: %s", e)
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def issue_access_token(self, user_id):
|
||||
@@ -408,7 +534,7 @@ class AuthHandler(BaseHandler):
|
||||
macaroon = pymacaroons.Macaroon.deserialize(login_token)
|
||||
auth_api = self.hs.get_auth()
|
||||
auth_api.validate_macaroon(macaroon, "login", True)
|
||||
return self._get_user_from_macaroon(macaroon)
|
||||
return self.get_user_from_macaroon(macaroon)
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
raise AuthError(401, "Invalid token", errcode=Codes.UNKNOWN_TOKEN)
|
||||
|
||||
@@ -421,7 +547,7 @@ class AuthHandler(BaseHandler):
|
||||
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
|
||||
return macaroon
|
||||
|
||||
def _get_user_from_macaroon(self, macaroon):
|
||||
def get_user_from_macaroon(self, macaroon):
|
||||
user_prefix = "user_id = "
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(user_prefix):
|
||||
@@ -432,13 +558,18 @@ class AuthHandler(BaseHandler):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_password(self, user_id, newpassword):
|
||||
def set_password(self, user_id, newpassword, requester=None):
|
||||
password_hash = self.hash(newpassword)
|
||||
|
||||
except_access_token_ids = [requester.access_token_id] if requester else []
|
||||
|
||||
yield self.store.user_set_password_hash(user_id, password_hash)
|
||||
yield self.store.user_delete_access_tokens(user_id)
|
||||
yield self.hs.get_pusherpool().remove_pushers_by_user(user_id)
|
||||
yield self.store.flush_user(user_id)
|
||||
yield self.store.user_delete_access_tokens(
|
||||
user_id, except_access_token_ids
|
||||
)
|
||||
yield self.hs.get_pusherpool().remove_pushers_by_user(
|
||||
user_id, except_access_token_ids
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_threepid(self, user_id, medium, address, validated_at):
|
||||
@@ -450,11 +581,18 @@ class AuthHandler(BaseHandler):
|
||||
def _save_session(self, session):
|
||||
# TODO: Persistent storage
|
||||
logger.debug("Saving session %s", session)
|
||||
session["last_used"] = self.hs.get_clock().time_msec()
|
||||
self.sessions[session["id"]] = session
|
||||
self._prune_sessions()
|
||||
|
||||
def _remove_session(self, session):
|
||||
logger.debug("Removing session %s", session)
|
||||
del self.sessions[session["id"]]
|
||||
def _prune_sessions(self):
|
||||
for sid, sess in self.sessions.items():
|
||||
last_used = 0
|
||||
if 'last_used' in sess:
|
||||
last_used = sess['last_used']
|
||||
now = self.hs.get_clock().time_msec()
|
||||
if last_used < now - AuthHandler.SESSION_EXPIRE_MS:
|
||||
del self.sessions[sid]
|
||||
|
||||
def hash(self, password):
|
||||
"""Computes a secure hash of password.
|
||||
@@ -477,4 +615,4 @@ class AuthHandler(BaseHandler):
|
||||
Returns:
|
||||
Whether self.hash(password) == stored_hash (bool).
|
||||
"""
|
||||
return bcrypt.checkpw(password, stored_hash)
|
||||
return bcrypt.hashpw(password, stored_hash) == stored_hash
|
||||
|
||||
+105
-23
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,9 +17,9 @@
|
||||
from twisted.internet import defer
|
||||
from ._base import BaseHandler
|
||||
|
||||
from synapse.api.errors import SynapseError, Codes, CodeMessageException
|
||||
from synapse.api.errors import SynapseError, Codes, CodeMessageException, AuthError
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.types import RoomAlias
|
||||
from synapse.types import RoomAlias, UserID
|
||||
|
||||
import logging
|
||||
import string
|
||||
@@ -32,13 +32,15 @@ class DirectoryHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(DirectoryHandler, self).__init__(hs)
|
||||
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
self.federation = hs.get_replication_layer()
|
||||
self.federation.register_query_handler(
|
||||
"directory", self.on_directory_query
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _create_association(self, room_alias, room_id, servers=None):
|
||||
def _create_association(self, room_alias, room_id, servers=None, creator=None):
|
||||
# general association creation for both human users and app services
|
||||
|
||||
for wchar in string.whitespace:
|
||||
@@ -60,7 +62,8 @@ class DirectoryHandler(BaseHandler):
|
||||
yield self.store.create_room_alias_association(
|
||||
room_alias,
|
||||
room_id,
|
||||
servers
|
||||
servers,
|
||||
creator=creator,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -77,7 +80,7 @@ class DirectoryHandler(BaseHandler):
|
||||
400, "This alias is reserved by an application service.",
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
yield self._create_association(room_alias, room_id, servers)
|
||||
yield self._create_association(room_alias, room_id, servers, creator=user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_appservice_association(self, service, room_alias, room_id,
|
||||
@@ -92,10 +95,14 @@ class DirectoryHandler(BaseHandler):
|
||||
yield self._create_association(room_alias, room_id, servers)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_association(self, user_id, room_alias):
|
||||
def delete_association(self, requester, user_id, room_alias):
|
||||
# association deletion for human users
|
||||
|
||||
# TODO Check if server admin
|
||||
can_delete = yield self._user_can_delete_alias(room_alias, user_id)
|
||||
if not can_delete:
|
||||
raise AuthError(
|
||||
403, "You don't have permission to delete the alias.",
|
||||
)
|
||||
|
||||
can_delete = yield self.can_modify_alias(
|
||||
room_alias,
|
||||
@@ -107,7 +114,25 @@ class DirectoryHandler(BaseHandler):
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
|
||||
yield self._delete_association(room_alias)
|
||||
room_id = yield self._delete_association(room_alias)
|
||||
|
||||
try:
|
||||
yield self.send_room_alias_update_event(
|
||||
requester,
|
||||
requester.user.to_string(),
|
||||
room_id
|
||||
)
|
||||
|
||||
yield self._update_canonical_alias(
|
||||
requester,
|
||||
requester.user.to_string(),
|
||||
room_id,
|
||||
room_alias,
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.info("Failed to update alias events: %s", e)
|
||||
|
||||
defer.returnValue(room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_appservice_association(self, service, room_alias):
|
||||
@@ -124,11 +149,9 @@ class DirectoryHandler(BaseHandler):
|
||||
if not self.hs.is_mine(room_alias):
|
||||
raise SynapseError(400, "Room alias must be local")
|
||||
|
||||
yield self.store.delete_room_alias(room_alias)
|
||||
room_id = yield self.store.delete_room_alias(room_alias)
|
||||
|
||||
# TODO - Looks like _update_room_alias_event has never been implemented
|
||||
# if room_id:
|
||||
# yield self._update_room_alias_events(user_id, room_id)
|
||||
defer.returnValue(room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_association(self, room_alias):
|
||||
@@ -175,8 +198,8 @@ class DirectoryHandler(BaseHandler):
|
||||
# If this server is in the list of servers, return it first.
|
||||
if self.server_name in servers:
|
||||
servers = (
|
||||
[self.server_name]
|
||||
+ [s for s in servers if s != self.server_name]
|
||||
[self.server_name] +
|
||||
[s for s in servers if s != self.server_name]
|
||||
)
|
||||
else:
|
||||
servers = list(servers)
|
||||
@@ -212,17 +235,44 @@ class DirectoryHandler(BaseHandler):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_room_alias_update_event(self, user_id, room_id):
|
||||
def send_room_alias_update_event(self, requester, user_id, room_id):
|
||||
aliases = yield self.store.get_aliases_for_room(room_id)
|
||||
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
yield msg_handler.create_and_send_event({
|
||||
"type": EventTypes.Aliases,
|
||||
"state_key": self.hs.hostname,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"content": {"aliases": aliases},
|
||||
}, ratelimit=False)
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Aliases,
|
||||
"state_key": self.hs.hostname,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"content": {"aliases": aliases},
|
||||
},
|
||||
ratelimit=False
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_canonical_alias(self, requester, user_id, room_id, room_alias):
|
||||
alias_event = yield self.state.get_current_state(
|
||||
room_id, EventTypes.CanonicalAlias, ""
|
||||
)
|
||||
|
||||
alias_str = room_alias.to_string()
|
||||
if not alias_event or alias_event.content.get("alias", "") != alias_str:
|
||||
return
|
||||
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
yield msg_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.CanonicalAlias,
|
||||
"state_key": "",
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"content": {},
|
||||
},
|
||||
ratelimit=False
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_association_from_room_alias(self, room_alias):
|
||||
@@ -257,3 +307,35 @@ class DirectoryHandler(BaseHandler):
|
||||
return
|
||||
# either no interested services, or no service with an exclusive lock
|
||||
defer.returnValue(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _user_can_delete_alias(self, alias, user_id):
|
||||
creator = yield self.store.get_room_alias_creator(alias.to_string())
|
||||
|
||||
if creator and creator == user_id:
|
||||
defer.returnValue(True)
|
||||
|
||||
is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
|
||||
defer.returnValue(is_admin)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def edit_published_room_list(self, requester, room_id, visibility):
|
||||
"""Edit the entry of the room in the published room list.
|
||||
|
||||
requester
|
||||
room_id (str)
|
||||
visibility (str): "public" or "private"
|
||||
"""
|
||||
if requester.is_guest:
|
||||
raise AuthError(403, "Guests cannot edit the published room list")
|
||||
|
||||
if visibility not in ["public", "private"]:
|
||||
raise SynapseError(400, "Invalid visibility setting")
|
||||
|
||||
room = yield self.store.get_room(room_id)
|
||||
if room is None:
|
||||
raise SynapseError(400, "Unknown room")
|
||||
|
||||
yield self.auth.check_can_change_room_list(room_id, requester.user)
|
||||
|
||||
yield self.store.set_room_is_public(room_id, visibility == "public")
|
||||
|
||||
+41
-79
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -18,6 +18,8 @@ from twisted.internet import defer
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.types import UserID
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.api.constants import Membership, EventTypes
|
||||
from synapse.events import EventBase
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -28,18 +30,6 @@ import random
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def started_user_eventstream(distributor, user):
|
||||
return distributor.fire("started_user_eventstream", user)
|
||||
|
||||
|
||||
def stopped_user_eventstream(distributor, user):
|
||||
return distributor.fire("stopped_user_eventstream", user)
|
||||
|
||||
|
||||
def user_joined_room(distributor, user, room_id):
|
||||
return distributor.fire("user_joined_room", user, room_id)
|
||||
|
||||
|
||||
class EventStreamHandler(BaseHandler):
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -58,88 +48,64 @@ class EventStreamHandler(BaseHandler):
|
||||
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def started_stream(self, user):
|
||||
"""Tells the presence handler that we have started an eventstream for
|
||||
the user:
|
||||
|
||||
Args:
|
||||
user (User): The user who started a stream.
|
||||
Returns:
|
||||
A deferred that completes once their presence has been updated.
|
||||
"""
|
||||
if user not in self._streams_per_user:
|
||||
self._streams_per_user[user] = 0
|
||||
if user in self._stop_timer_per_user:
|
||||
try:
|
||||
self.clock.cancel_call_later(
|
||||
self._stop_timer_per_user.pop(user)
|
||||
)
|
||||
except:
|
||||
logger.exception("Failed to cancel event timer")
|
||||
else:
|
||||
yield started_user_eventstream(self.distributor, user)
|
||||
|
||||
self._streams_per_user[user] += 1
|
||||
|
||||
def stopped_stream(self, user):
|
||||
"""If there are no streams for a user this starts a timer that will
|
||||
notify the presence handler that we haven't got an event stream for
|
||||
the user unless the user starts a new stream in 30 seconds.
|
||||
|
||||
Args:
|
||||
user (User): The user who stopped a stream.
|
||||
"""
|
||||
self._streams_per_user[user] -= 1
|
||||
if not self._streams_per_user[user]:
|
||||
del self._streams_per_user[user]
|
||||
|
||||
# 30 seconds of grace to allow the client to reconnect again
|
||||
# before we think they're gone
|
||||
def _later():
|
||||
logger.debug("_later stopped_user_eventstream %s", user)
|
||||
|
||||
self._stop_timer_per_user.pop(user, None)
|
||||
|
||||
return stopped_user_eventstream(self.distributor, user)
|
||||
|
||||
logger.debug("Scheduling _later: for %s", user)
|
||||
self._stop_timer_per_user[user] = (
|
||||
self.clock.call_later(30, _later)
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_stream(self, auth_user_id, pagin_config, timeout=0,
|
||||
as_client_event=True, affect_presence=True,
|
||||
only_room_events=False, room_id=None, is_guest=False):
|
||||
only_keys=None, room_id=None, is_guest=False):
|
||||
"""Fetches the events stream for a given user.
|
||||
|
||||
If `only_room_events` is `True` only room events will be returned.
|
||||
If `only_keys` is not None, events from keys will be sent down.
|
||||
"""
|
||||
auth_user = UserID.from_string(auth_user_id)
|
||||
presence_handler = self.hs.get_handlers().presence_handler
|
||||
|
||||
try:
|
||||
if affect_presence:
|
||||
yield self.started_stream(auth_user)
|
||||
|
||||
context = yield presence_handler.user_syncing(
|
||||
auth_user_id, affect_presence=affect_presence,
|
||||
)
|
||||
with context:
|
||||
if timeout:
|
||||
# If they've set a timeout set a minimum limit.
|
||||
timeout = max(timeout, 500)
|
||||
|
||||
# Add some randomness to this value to try and mitigate against
|
||||
# thundering herds on restart.
|
||||
timeout = random.randint(int(timeout*0.9), int(timeout*1.1))
|
||||
|
||||
if is_guest:
|
||||
yield user_joined_room(self.distributor, auth_user, room_id)
|
||||
timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1))
|
||||
|
||||
events, tokens = yield self.notifier.get_events_for(
|
||||
auth_user, pagin_config, timeout,
|
||||
only_room_events=only_room_events,
|
||||
is_guest=is_guest, guest_room_id=room_id
|
||||
only_keys=only_keys,
|
||||
is_guest=is_guest, explicit_room_id=room_id
|
||||
)
|
||||
|
||||
# When the user joins a new room, or another user joins a currently
|
||||
# joined room, we need to send down presence for those users.
|
||||
to_add = []
|
||||
for event in events:
|
||||
if not isinstance(event, EventBase):
|
||||
continue
|
||||
if event.type == EventTypes.Member:
|
||||
if event.membership != Membership.JOIN:
|
||||
continue
|
||||
# Send down presence.
|
||||
if event.state_key == auth_user_id:
|
||||
# Send down presence for everyone in the room.
|
||||
users = yield self.store.get_users_in_room(event.room_id)
|
||||
states = yield presence_handler.get_states(
|
||||
users,
|
||||
as_event=True,
|
||||
)
|
||||
to_add.extend(states)
|
||||
else:
|
||||
|
||||
ev = yield presence_handler.get_state(
|
||||
UserID.from_string(event.state_key),
|
||||
as_event=True,
|
||||
)
|
||||
to_add.append(ev)
|
||||
|
||||
events.extend(to_add)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
chunks = [
|
||||
@@ -154,10 +120,6 @@ class EventStreamHandler(BaseHandler):
|
||||
|
||||
defer.returnValue(chunk)
|
||||
|
||||
finally:
|
||||
if affect_presence:
|
||||
self.stopped_stream(auth_user)
|
||||
|
||||
|
||||
class EventHandler(BaseHandler):
|
||||
|
||||
|
||||
+268
-146
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,6 +14,9 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""Contains handlers for federation events."""
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -23,7 +26,7 @@ from synapse.api.errors import (
|
||||
from synapse.api.constants import EventTypes, Membership, RejectedReason
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.util.logcontext import PreserveLoggingContext, preserve_fn
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
@@ -36,6 +39,9 @@ from synapse.events.utils import prune_event
|
||||
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
from synapse.push.action_generator import ActionGenerator
|
||||
from synapse.util.distributor import user_joined_room
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import itertools
|
||||
@@ -44,10 +50,6 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def user_joined_room(distributor, user, room_id):
|
||||
return distributor.fire("user_joined_room", user, room_id)
|
||||
|
||||
|
||||
class FederationHandler(BaseHandler):
|
||||
"""Handles events that originated from federation.
|
||||
Responsible for:
|
||||
@@ -97,8 +99,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@log_function
|
||||
@defer.inlineCallbacks
|
||||
def on_receive_pdu(self, origin, pdu, backfilled, state=None,
|
||||
auth_chain=None):
|
||||
def on_receive_pdu(self, origin, pdu, state=None, auth_chain=None):
|
||||
""" Called by the ReplicationLayer when we have a new pdu. We need to
|
||||
do auth checks and put it through the StateHandler.
|
||||
"""
|
||||
@@ -118,7 +119,6 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# FIXME (erikj): Awful hack to make the case where we are not currently
|
||||
# in the room work
|
||||
current_state = None
|
||||
is_in_room = yield self.auth.check_host_in_room(
|
||||
event.room_id,
|
||||
self.server_name
|
||||
@@ -170,19 +170,13 @@ class FederationHandler(BaseHandler):
|
||||
})
|
||||
seen_ids.add(e.event_id)
|
||||
|
||||
yield self._handle_new_events(
|
||||
origin,
|
||||
event_infos,
|
||||
outliers=True
|
||||
)
|
||||
yield self._handle_new_events(origin, event_infos)
|
||||
|
||||
try:
|
||||
context, event_stream_id, max_stream_id = yield self._handle_new_event(
|
||||
origin,
|
||||
event,
|
||||
state=state,
|
||||
backfilled=backfilled,
|
||||
current_state=current_state,
|
||||
)
|
||||
except AuthError as e:
|
||||
raise FederationError(
|
||||
@@ -211,26 +205,17 @@ class FederationHandler(BaseHandler):
|
||||
except StoreError:
|
||||
logger.exception("Failed to store room.")
|
||||
|
||||
if not backfilled:
|
||||
extra_users = []
|
||||
if event.type == EventTypes.Member:
|
||||
target_user_id = event.state_key
|
||||
target_user = UserID.from_string(target_user_id)
|
||||
extra_users.append(target_user)
|
||||
extra_users = []
|
||||
if event.type == EventTypes.Member:
|
||||
target_user_id = event.state_key
|
||||
target_user = UserID.from_string(target_user_id)
|
||||
extra_users.append(target_user)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
d = self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=extra_users
|
||||
)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn(
|
||||
"Failed to notify about %s: %s",
|
||||
event.event_id, f.value
|
||||
)
|
||||
|
||||
d.addErrback(log_failure)
|
||||
with PreserveLoggingContext():
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=extra_users
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
if event.membership == Membership.JOIN:
|
||||
@@ -295,7 +280,14 @@ class FederationHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def backfill(self, dest, room_id, limit, extremities=[]):
|
||||
""" Trigger a backfill request to `dest` for the given `room_id`
|
||||
|
||||
This will attempt to get more events from the remote. This may return
|
||||
be successfull and still return no events if the other side has no new
|
||||
events to offer.
|
||||
"""
|
||||
if dest == self.server_name:
|
||||
raise SynapseError(400, "Can't backfill from self.")
|
||||
|
||||
if not extremities:
|
||||
extremities = yield self.store.get_oldest_events_in_room(room_id)
|
||||
|
||||
@@ -306,6 +298,16 @@ class FederationHandler(BaseHandler):
|
||||
extremities=extremities,
|
||||
)
|
||||
|
||||
# Don't bother processing events we already have.
|
||||
seen_events = yield self.store.have_events_in_timeline(
|
||||
set(e.event_id for e in events)
|
||||
)
|
||||
|
||||
events = [e for e in events if e.event_id not in seen_events]
|
||||
|
||||
if not events:
|
||||
defer.returnValue([])
|
||||
|
||||
event_map = {e.event_id: e for e in events}
|
||||
|
||||
event_ids = set(e.event_id for e in events)
|
||||
@@ -365,6 +367,7 @@ class FederationHandler(BaseHandler):
|
||||
for a in auth_events.values():
|
||||
if a.event_id in seen_events:
|
||||
continue
|
||||
a.internal_metadata.outlier = True
|
||||
ev_infos.append({
|
||||
"event": a,
|
||||
"auth_events": {
|
||||
@@ -385,20 +388,23 @@ class FederationHandler(BaseHandler):
|
||||
}
|
||||
})
|
||||
|
||||
yield self._handle_new_events(
|
||||
dest, ev_infos,
|
||||
backfilled=True,
|
||||
)
|
||||
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
for event in events:
|
||||
if event in events_to_state:
|
||||
continue
|
||||
|
||||
ev_infos.append({
|
||||
"event": event,
|
||||
})
|
||||
|
||||
yield self._handle_new_events(
|
||||
dest, ev_infos,
|
||||
backfilled=True,
|
||||
)
|
||||
# We store these one at a time since each event depends on the
|
||||
# previous to work out the state.
|
||||
# TODO: We can probably do something more clever here.
|
||||
yield self._handle_new_event(
|
||||
dest, event
|
||||
)
|
||||
|
||||
defer.returnValue(events)
|
||||
|
||||
@@ -462,7 +468,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
likely_domains = [
|
||||
domain for domain, depth in curr_domains
|
||||
if domain is not self.server_name
|
||||
if domain != self.server_name
|
||||
]
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -470,12 +476,16 @@ class FederationHandler(BaseHandler):
|
||||
# TODO: Should we try multiple of these at a time?
|
||||
for dom in domains:
|
||||
try:
|
||||
events = yield self.backfill(
|
||||
yield self.backfill(
|
||||
dom, room_id,
|
||||
limit=100,
|
||||
extremities=[e for e in extremities.keys()]
|
||||
)
|
||||
except SynapseError:
|
||||
# If this succeeded then we probably already have the
|
||||
# appropriate stuff.
|
||||
# TODO: We can probably do something more intelligent here.
|
||||
defer.returnValue(True)
|
||||
except SynapseError as e:
|
||||
logger.info(
|
||||
"Failed to backfill from %s because %s",
|
||||
dom, e,
|
||||
@@ -500,8 +510,6 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
continue
|
||||
|
||||
if events:
|
||||
defer.returnValue(True)
|
||||
defer.returnValue(False)
|
||||
|
||||
success = yield try_backfill(likely_domains)
|
||||
@@ -596,7 +604,7 @@ class FederationHandler(BaseHandler):
|
||||
handled_events = set()
|
||||
|
||||
try:
|
||||
new_event = self._sign_event(event)
|
||||
event = self._sign_event(event)
|
||||
# Try the host we successfully got a response to /make_join/
|
||||
# request first.
|
||||
try:
|
||||
@@ -604,7 +612,7 @@ class FederationHandler(BaseHandler):
|
||||
target_hosts.insert(0, origin)
|
||||
except ValueError:
|
||||
pass
|
||||
ret = yield self.replication_layer.send_join(target_hosts, new_event)
|
||||
ret = yield self.replication_layer.send_join(target_hosts, event)
|
||||
|
||||
origin = ret["origin"]
|
||||
state = ret["state"]
|
||||
@@ -613,12 +621,12 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
handled_events.update([s.event_id for s in state])
|
||||
handled_events.update([a.event_id for a in auth_chain])
|
||||
handled_events.add(new_event.event_id)
|
||||
handled_events.add(event.event_id)
|
||||
|
||||
logger.debug("do_invite_join auth_chain: %s", auth_chain)
|
||||
logger.debug("do_invite_join state: %s", state)
|
||||
|
||||
logger.debug("do_invite_join event: %s", new_event)
|
||||
logger.debug("do_invite_join event: %s", event)
|
||||
|
||||
try:
|
||||
yield self.store.store_room(
|
||||
@@ -635,19 +643,11 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
d = self.notifier.on_new_room_event(
|
||||
new_event, event_stream_id, max_stream_id,
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=[joinee]
|
||||
)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn(
|
||||
"Failed to notify about %s: %s",
|
||||
new_event.event_id, f.value
|
||||
)
|
||||
|
||||
d.addErrback(log_failure)
|
||||
|
||||
logger.debug("Finished joining %s to %s", joinee, room_id)
|
||||
finally:
|
||||
room_queue = self.room_queues[room_id]
|
||||
@@ -658,7 +658,7 @@ class FederationHandler(BaseHandler):
|
||||
continue
|
||||
|
||||
try:
|
||||
self.on_receive_pdu(origin, p, backfilled=False)
|
||||
self.on_receive_pdu(origin, p)
|
||||
except:
|
||||
logger.exception("Couldn't handle pdu")
|
||||
|
||||
@@ -681,9 +681,13 @@ class FederationHandler(BaseHandler):
|
||||
"state_key": user_id,
|
||||
})
|
||||
|
||||
event, context = yield self._create_new_client_event(
|
||||
builder=builder,
|
||||
)
|
||||
try:
|
||||
event, context = yield self._create_new_client_event(
|
||||
builder=builder,
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed to create join %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
|
||||
@@ -722,18 +726,10 @@ class FederationHandler(BaseHandler):
|
||||
extra_users.append(target_user)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
d = self.notifier.on_new_room_event(
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id, extra_users=extra_users
|
||||
)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn(
|
||||
"Failed to notify about %s: %s",
|
||||
event.event_id, f.value
|
||||
)
|
||||
|
||||
d.addErrback(log_failure)
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
if event.content["membership"] == Membership.JOIN:
|
||||
user = UserID.from_string(event.state_key)
|
||||
@@ -784,6 +780,7 @@ class FederationHandler(BaseHandler):
|
||||
event = pdu
|
||||
|
||||
event.internal_metadata.outlier = True
|
||||
event.internal_metadata.invite_from_remote = True
|
||||
|
||||
event.signatures.update(
|
||||
compute_event_signature(
|
||||
@@ -798,35 +795,32 @@ class FederationHandler(BaseHandler):
|
||||
event_stream_id, max_stream_id = yield self.store.persist_event(
|
||||
event,
|
||||
context=context,
|
||||
backfilled=False,
|
||||
)
|
||||
|
||||
target_user = UserID.from_string(event.state_key)
|
||||
with PreserveLoggingContext():
|
||||
d = self.notifier.on_new_room_event(
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=[target_user],
|
||||
)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn(
|
||||
"Failed to notify about %s: %s",
|
||||
event.event_id, f.value
|
||||
)
|
||||
|
||||
d.addErrback(log_failure)
|
||||
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_remotely_reject_invite(self, target_hosts, room_id, user_id):
|
||||
origin, event = yield self._make_and_verify_event(
|
||||
target_hosts,
|
||||
room_id,
|
||||
user_id,
|
||||
"leave"
|
||||
)
|
||||
signed_event = self._sign_event(event)
|
||||
try:
|
||||
origin, event = yield self._make_and_verify_event(
|
||||
target_hosts,
|
||||
room_id,
|
||||
user_id,
|
||||
"leave"
|
||||
)
|
||||
signed_event = self._sign_event(event)
|
||||
except SynapseError:
|
||||
raise
|
||||
except CodeMessageException as e:
|
||||
logger.warn("Failed to reject invite: %s", e)
|
||||
raise SynapseError(500, "Failed to reject invite")
|
||||
|
||||
# Try the host we successfully got a response to /make_join/
|
||||
# request first.
|
||||
@@ -836,11 +830,31 @@ class FederationHandler(BaseHandler):
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
yield self.replication_layer.send_leave(
|
||||
target_hosts,
|
||||
signed_event
|
||||
try:
|
||||
yield self.replication_layer.send_leave(
|
||||
target_hosts,
|
||||
signed_event
|
||||
)
|
||||
except SynapseError:
|
||||
raise
|
||||
except CodeMessageException as e:
|
||||
logger.warn("Failed to reject invite: %s", e)
|
||||
raise SynapseError(500, "Failed to reject invite")
|
||||
|
||||
context = yield self.state_handler.compute_event_context(event)
|
||||
|
||||
event_stream_id, max_stream_id = yield self.store.persist_event(
|
||||
event,
|
||||
context=context,
|
||||
)
|
||||
defer.returnValue(None)
|
||||
|
||||
target_user = UserID.from_string(event.state_key)
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=[target_user],
|
||||
)
|
||||
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _make_and_verify_event(self, target_hosts, room_id, user_id, membership,
|
||||
@@ -905,7 +919,11 @@ class FederationHandler(BaseHandler):
|
||||
builder=builder,
|
||||
)
|
||||
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
try:
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed to create new leave %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
defer.returnValue(event)
|
||||
|
||||
@@ -940,18 +958,10 @@ class FederationHandler(BaseHandler):
|
||||
extra_users.append(target_user)
|
||||
|
||||
with PreserveLoggingContext():
|
||||
d = self.notifier.on_new_room_event(
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id, extra_users=extra_users
|
||||
)
|
||||
|
||||
def log_failure(f):
|
||||
logger.warn(
|
||||
"Failed to notify about %s: %s",
|
||||
event.event_id, f.value
|
||||
)
|
||||
|
||||
d.addErrback(log_failure)
|
||||
|
||||
new_pdu = event
|
||||
|
||||
destinations = set()
|
||||
@@ -1094,30 +1104,41 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _handle_new_event(self, origin, event, state=None, backfilled=False,
|
||||
current_state=None, auth_events=None):
|
||||
|
||||
outlier = event.internal_metadata.is_outlier()
|
||||
|
||||
def _handle_new_event(self, origin, event, state=None, auth_events=None,
|
||||
backfilled=False):
|
||||
context = yield self._prep_event(
|
||||
origin, event,
|
||||
state=state,
|
||||
auth_events=auth_events,
|
||||
)
|
||||
|
||||
if not event.internal_metadata.is_outlier():
|
||||
action_generator = ActionGenerator(self.hs)
|
||||
yield action_generator.handle_push_actions_for_event(
|
||||
event, context, self
|
||||
)
|
||||
|
||||
event_stream_id, max_stream_id = yield self.store.persist_event(
|
||||
event,
|
||||
context=context,
|
||||
backfilled=backfilled,
|
||||
is_new_state=(not outlier and not backfilled),
|
||||
current_state=current_state,
|
||||
)
|
||||
|
||||
# this intentionally does not yield: we don't care about the result
|
||||
# and don't need to wait for it.
|
||||
preserve_fn(self.hs.get_pusherpool().on_new_notifications)(
|
||||
event_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
defer.returnValue((context, event_stream_id, max_stream_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _handle_new_events(self, origin, event_infos, backfilled=False,
|
||||
outliers=False):
|
||||
def _handle_new_events(self, origin, event_infos, backfilled=False):
|
||||
"""Creates the appropriate contexts and persists events. The events
|
||||
should not depend on one another, e.g. this should be used to persist
|
||||
a bunch of outliers, but not a chunk of individual events that depend
|
||||
on each other for state calculations.
|
||||
"""
|
||||
contexts = yield defer.gatherResults(
|
||||
[
|
||||
self._prep_event(
|
||||
@@ -1136,7 +1157,6 @@ class FederationHandler(BaseHandler):
|
||||
for ev_info, context in itertools.izip(event_infos, contexts)
|
||||
],
|
||||
backfilled=backfilled,
|
||||
is_new_state=(not outliers and not backfilled),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -1151,11 +1171,9 @@ class FederationHandler(BaseHandler):
|
||||
"""
|
||||
events_to_context = {}
|
||||
for e in itertools.chain(auth_events, state):
|
||||
ctx = yield self.state_handler.compute_event_context(
|
||||
e, outlier=True,
|
||||
)
|
||||
events_to_context[e.event_id] = ctx
|
||||
e.internal_metadata.outlier = True
|
||||
ctx = yield self.state_handler.compute_event_context(e)
|
||||
events_to_context[e.event_id] = ctx
|
||||
|
||||
event_map = {
|
||||
e.event_id: e
|
||||
@@ -1178,7 +1196,13 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
try:
|
||||
self.auth.check(e, auth_events=auth_for_e)
|
||||
except AuthError as err:
|
||||
except SynapseError as err:
|
||||
# we may get SynapseErrors here as well as AuthErrors. For
|
||||
# instance, there are a couple of (ancient) events in some
|
||||
# rooms whose senders do not have the correct sigil; these
|
||||
# cause SynapseErrors in auth.check. We don't want to give up
|
||||
# the attempt to federate altogether in such cases.
|
||||
|
||||
logger.warn(
|
||||
"Rejecting %s because %s",
|
||||
e.event_id, err.msg
|
||||
@@ -1193,17 +1217,14 @@ class FederationHandler(BaseHandler):
|
||||
(e, events_to_context[e.event_id])
|
||||
for e in itertools.chain(auth_events, state)
|
||||
],
|
||||
is_new_state=False,
|
||||
)
|
||||
|
||||
new_event_context = yield self.state_handler.compute_event_context(
|
||||
event, old_state=state, outlier=False,
|
||||
event, old_state=state
|
||||
)
|
||||
|
||||
event_stream_id, max_stream_id = yield self.store.persist_event(
|
||||
event, new_event_context,
|
||||
backfilled=False,
|
||||
is_new_state=True,
|
||||
current_state=state,
|
||||
)
|
||||
|
||||
@@ -1211,10 +1232,9 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _prep_event(self, origin, event, state=None, auth_events=None):
|
||||
outlier = event.internal_metadata.is_outlier()
|
||||
|
||||
context = yield self.state_handler.compute_event_context(
|
||||
event, old_state=state, outlier=outlier,
|
||||
event, old_state=state,
|
||||
)
|
||||
|
||||
if not auth_events:
|
||||
@@ -1500,8 +1520,9 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
try:
|
||||
self.auth.check(event, auth_events=auth_events)
|
||||
except AuthError:
|
||||
raise
|
||||
except AuthError as e:
|
||||
logger.warn("Failed auth resolution for %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def construct_auth_difference(self, local_auth, remote_auth):
|
||||
@@ -1646,31 +1667,48 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def exchange_third_party_invite(self, invite):
|
||||
sender = invite["sender"]
|
||||
room_id = invite["room_id"]
|
||||
def exchange_third_party_invite(
|
||||
self,
|
||||
sender_user_id,
|
||||
target_user_id,
|
||||
room_id,
|
||||
signed,
|
||||
):
|
||||
third_party_invite = {
|
||||
"signed": signed,
|
||||
}
|
||||
|
||||
event_dict = {
|
||||
"type": EventTypes.Member,
|
||||
"content": {
|
||||
"membership": Membership.INVITE,
|
||||
"third_party_invite": invite,
|
||||
"third_party_invite": third_party_invite,
|
||||
},
|
||||
"room_id": room_id,
|
||||
"sender": sender,
|
||||
"state_key": invite["mxid"],
|
||||
"sender": sender_user_id,
|
||||
"state_key": target_user_id,
|
||||
}
|
||||
|
||||
if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)):
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
EventValidator().validate_new(builder)
|
||||
event, context = yield self._create_new_client_event(builder=builder)
|
||||
self.auth.check(event, context.current_state)
|
||||
yield self._validate_keyserver(event, auth_events=context.current_state)
|
||||
|
||||
event, context = yield self.add_display_name_to_third_party_invite(
|
||||
event_dict, event, context
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check(event, context.current_state)
|
||||
except AuthError as e:
|
||||
logger.warn("Denying new third party invite %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
yield self._check_signature(event, auth_events=context.current_state)
|
||||
member_handler = self.hs.get_handlers().room_member_handler
|
||||
yield member_handler.change_membership(event, context)
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
else:
|
||||
destinations = set([x.split(":", 1)[-1] for x in (sender, room_id)])
|
||||
destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id))
|
||||
yield self.replication_layer.forward_third_party_invite(
|
||||
destinations,
|
||||
room_id,
|
||||
@@ -1686,27 +1724,111 @@ class FederationHandler(BaseHandler):
|
||||
builder=builder,
|
||||
)
|
||||
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
yield self._validate_keyserver(event, auth_events=context.current_state)
|
||||
event, context = yield self.add_display_name_to_third_party_invite(
|
||||
event_dict, event, context
|
||||
)
|
||||
|
||||
try:
|
||||
self.auth.check(event, auth_events=context.current_state)
|
||||
except AuthError as e:
|
||||
logger.warn("Denying third party invite %r because %s", event, e)
|
||||
raise e
|
||||
yield self._check_signature(event, auth_events=context.current_state)
|
||||
|
||||
returned_invite = yield self.send_invite(origin, event)
|
||||
# TODO: Make sure the signatures actually are correct.
|
||||
event.signatures.update(returned_invite.signatures)
|
||||
member_handler = self.hs.get_handlers().room_member_handler
|
||||
yield member_handler.change_membership(event, context)
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _validate_keyserver(self, event, auth_events):
|
||||
token = event.content["third_party_invite"]["signed"]["token"]
|
||||
def add_display_name_to_third_party_invite(self, event_dict, event, context):
|
||||
key = (
|
||||
EventTypes.ThirdPartyInvite,
|
||||
event.content["third_party_invite"]["signed"]["token"]
|
||||
)
|
||||
original_invite = context.current_state.get(key)
|
||||
if not original_invite:
|
||||
logger.info(
|
||||
"Could not find invite event for third_party_invite - "
|
||||
"discarding: %s" % (event_dict,)
|
||||
)
|
||||
return
|
||||
|
||||
display_name = original_invite.content["display_name"]
|
||||
event_dict["content"]["third_party_invite"]["display_name"] = display_name
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
EventValidator().validate_new(builder)
|
||||
event, context = yield self._create_new_client_event(builder=builder)
|
||||
defer.returnValue((event, context))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_signature(self, event, auth_events):
|
||||
"""
|
||||
Checks that the signature in the event is consistent with its invite.
|
||||
|
||||
Args:
|
||||
event (Event): The m.room.member event to check
|
||||
auth_events (dict<(event type, state_key), event>):
|
||||
|
||||
Raises:
|
||||
AuthError: if signature didn't match any keys, or key has been
|
||||
revoked,
|
||||
SynapseError: if a transient error meant a key couldn't be checked
|
||||
for revocation.
|
||||
"""
|
||||
signed = event.content["third_party_invite"]["signed"]
|
||||
token = signed["token"]
|
||||
|
||||
invite_event = auth_events.get(
|
||||
(EventTypes.ThirdPartyInvite, token,)
|
||||
)
|
||||
|
||||
if not invite_event:
|
||||
raise AuthError(403, "Could not find invite")
|
||||
|
||||
last_exception = None
|
||||
for public_key_object in self.hs.get_auth().get_public_keys(invite_event):
|
||||
try:
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
continue
|
||||
|
||||
public_key = public_key_object["public_key"]
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_name,
|
||||
decode_base64(public_key)
|
||||
)
|
||||
verify_signed_json(signed, server, verify_key)
|
||||
if "key_validity_url" in public_key_object:
|
||||
yield self._check_key_revocation(
|
||||
public_key,
|
||||
public_key_object["key_validity_url"]
|
||||
)
|
||||
return
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
raise last_exception
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_key_revocation(self, public_key, url):
|
||||
"""
|
||||
Checks whether public_key has been revoked.
|
||||
|
||||
Args:
|
||||
public_key (str): base-64 encoded public key.
|
||||
url (str): Key revocation URL.
|
||||
|
||||
Raises:
|
||||
AuthError: if they key has been revoked.
|
||||
SynapseError: if a transient error meant a key couldn't be checked
|
||||
for revocation.
|
||||
"""
|
||||
try:
|
||||
response = yield self.hs.get_simple_http_client().get_json(
|
||||
invite_event.content["key_validity_url"],
|
||||
{"public_key": invite_event.content["public_key"]}
|
||||
url,
|
||||
{"public_key": public_key}
|
||||
)
|
||||
except Exception:
|
||||
raise SynapseError(
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -36,14 +36,15 @@ class IdentityHandler(BaseHandler):
|
||||
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
|
||||
self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers)
|
||||
self.trust_any_id_server_just_for_testing_do_not_use = (
|
||||
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def threepid_from_creds(self, creds):
|
||||
yield run_on_reactor()
|
||||
|
||||
# XXX: make this configurable!
|
||||
# trustedIdServers = ['matrix.org', 'localhost:8090']
|
||||
trustedIdServers = ['matrix.org', 'vector.im']
|
||||
|
||||
if 'id_server' in creds:
|
||||
id_server = creds['id_server']
|
||||
elif 'idServer' in creds:
|
||||
@@ -58,10 +59,19 @@ class IdentityHandler(BaseHandler):
|
||||
else:
|
||||
raise SynapseError(400, "No client_secret in creds")
|
||||
|
||||
if id_server not in trustedIdServers:
|
||||
logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
|
||||
'credentials', id_server)
|
||||
defer.returnValue(None)
|
||||
if id_server not in self.trusted_id_servers:
|
||||
if self.trust_any_id_server_just_for_testing_do_not_use:
|
||||
logger.warn(
|
||||
"Trusting untrustworthy ID server %r even though it isn't"
|
||||
" in the trusted id list for testing because"
|
||||
" 'use_insecure_ssl_client_just_for_testing_do_not_use'"
|
||||
" is set in the config",
|
||||
id_server,
|
||||
)
|
||||
else:
|
||||
logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
|
||||
'credentials', id_server)
|
||||
defer.returnValue(None)
|
||||
|
||||
data = {}
|
||||
try:
|
||||
|
||||
+180
-141
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014 - 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,12 +16,13 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError, AuthError, Codes
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.caches.snapshot_cache import SnapshotCache
|
||||
from synapse.types import UserID, RoomStreamToken, StreamToken
|
||||
|
||||
from ._base import BaseHandler
|
||||
@@ -33,10 +34,6 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def collect_presencelike_data(distributor, user, content):
|
||||
return distributor.fire("collect_presencelike_data", user, content)
|
||||
|
||||
|
||||
class MessageHandler(BaseHandler):
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -45,52 +42,23 @@ class MessageHandler(BaseHandler):
|
||||
self.state = hs.get_state_handler()
|
||||
self.clock = hs.get_clock()
|
||||
self.validator = EventValidator()
|
||||
self.snapshot_cache = SnapshotCache()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_message(self, msg_id=None, room_id=None, sender_id=None,
|
||||
user_id=None):
|
||||
""" Retrieve a message.
|
||||
|
||||
Args:
|
||||
msg_id (str): The message ID to obtain.
|
||||
room_id (str): The room where the message resides.
|
||||
sender_id (str): The user ID of the user who sent the message.
|
||||
user_id (str): The user ID of the user making this request.
|
||||
Returns:
|
||||
The message, or None if no message exists.
|
||||
Raises:
|
||||
SynapseError if something went wrong.
|
||||
"""
|
||||
yield self.auth.check_joined_room(room_id, user_id)
|
||||
|
||||
# Pull out the message from the db
|
||||
# msg = yield self.store.get_message(
|
||||
# room_id=room_id,
|
||||
# msg_id=msg_id,
|
||||
# user_id=sender_id
|
||||
# )
|
||||
|
||||
# TODO (erikj): Once we work out the correct c-s api we need to think
|
||||
# on how to do this.
|
||||
|
||||
defer.returnValue(None)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_messages(self, user_id=None, room_id=None, pagin_config=None,
|
||||
as_client_event=True, is_guest=False):
|
||||
def get_messages(self, requester, room_id=None, pagin_config=None,
|
||||
as_client_event=True):
|
||||
"""Get messages in a room.
|
||||
|
||||
Args:
|
||||
user_id (str): The user requesting messages.
|
||||
requester (Requester): The user requesting messages.
|
||||
room_id (str): The room they want messages from.
|
||||
pagin_config (synapse.api.streams.PaginationConfig): The pagination
|
||||
config rules to apply, if any.
|
||||
as_client_event (bool): True to get events in client-server format.
|
||||
is_guest (bool): Whether the requesting user is a guest (as opposed
|
||||
to a fully registered user).
|
||||
Returns:
|
||||
dict: Pagination API results
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
data_source = self.hs.get_event_sources().sources["room"]
|
||||
|
||||
if pagin_config.from_token:
|
||||
@@ -104,8 +72,6 @@ class MessageHandler(BaseHandler):
|
||||
room_token = pagin_config.from_token.room_key
|
||||
|
||||
room_token = RoomStreamToken.parse(room_token)
|
||||
if room_token.topological is None:
|
||||
raise SynapseError(400, "Invalid token")
|
||||
|
||||
pagin_config.from_token = pagin_config.from_token.copy_and_replace(
|
||||
"room_key", str(room_token)
|
||||
@@ -113,36 +79,37 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
source_config = pagin_config.get_source_config("room")
|
||||
|
||||
if not is_guest:
|
||||
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
|
||||
if member_event.membership == Membership.LEAVE:
|
||||
# If they have left the room then clamp the token to be before
|
||||
# they left the room.
|
||||
# If they're a guest, we'll just 403 them if they're asking for
|
||||
# events they can't see.
|
||||
leave_token = yield self.store.get_topological_token_for_event(
|
||||
member_event.event_id
|
||||
)
|
||||
leave_token = RoomStreamToken.parse(leave_token)
|
||||
if leave_token.topological < room_token.topological:
|
||||
source_config.from_key = str(leave_token)
|
||||
|
||||
if source_config.direction == "f":
|
||||
if source_config.to_key is None:
|
||||
source_config.to_key = str(leave_token)
|
||||
else:
|
||||
to_token = RoomStreamToken.parse(source_config.to_key)
|
||||
if leave_token.topological < to_token.topological:
|
||||
source_config.to_key = str(leave_token)
|
||||
|
||||
yield self.hs.get_handlers().federation_handler.maybe_backfill(
|
||||
room_id, room_token.topological
|
||||
membership, member_event_id = yield self._check_in_room_or_world_readable(
|
||||
room_id, user_id
|
||||
)
|
||||
|
||||
user = UserID.from_string(user_id)
|
||||
if source_config.direction == 'b':
|
||||
# if we're going backwards, we might need to backfill. This
|
||||
# requires that we have a topo token.
|
||||
if room_token.topological:
|
||||
max_topo = room_token.topological
|
||||
else:
|
||||
max_topo = yield self.store.get_max_topological_token_for_stream_and_room(
|
||||
room_id, room_token.stream
|
||||
)
|
||||
|
||||
if membership == Membership.LEAVE:
|
||||
# If they have left the room then clamp the token to be before
|
||||
# they left the room, to save the effort of loading from the
|
||||
# database.
|
||||
leave_token = yield self.store.get_topological_token_for_event(
|
||||
member_event_id
|
||||
)
|
||||
leave_token = RoomStreamToken.parse(leave_token)
|
||||
if leave_token.topological < max_topo:
|
||||
source_config.from_key = str(leave_token)
|
||||
|
||||
yield self.hs.get_handlers().federation_handler.maybe_backfill(
|
||||
room_id, max_topo
|
||||
)
|
||||
|
||||
events, next_key = yield data_source.get_pagination_rows(
|
||||
user, source_config, room_id
|
||||
requester.user, source_config, room_id
|
||||
)
|
||||
|
||||
next_token = pagin_config.from_token.copy_and_replace(
|
||||
@@ -156,7 +123,11 @@ class MessageHandler(BaseHandler):
|
||||
"end": next_token.to_string(),
|
||||
})
|
||||
|
||||
events = yield self._filter_events_for_client(user_id, events, is_guest=is_guest)
|
||||
events = yield self._filter_events_for_client(
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
@@ -172,38 +143,45 @@ class MessageHandler(BaseHandler):
|
||||
defer.returnValue(chunk)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_and_send_event(self, event_dict, ratelimit=True,
|
||||
token_id=None, txn_id=None, is_guest=False):
|
||||
""" Given a dict from a client, create and handle a new event.
|
||||
def create_event(self, event_dict, token_id=None, txn_id=None, prev_event_ids=None):
|
||||
"""
|
||||
Given a dict from a client, create a new event.
|
||||
|
||||
Creates an FrozenEvent object, filling out auth_events, prev_events,
|
||||
etc.
|
||||
|
||||
Adds display names to Join membership events.
|
||||
|
||||
Persists and notifies local clients and federation.
|
||||
|
||||
Args:
|
||||
event_dict (dict): An entire event
|
||||
token_id (str)
|
||||
txn_id (str)
|
||||
prev_event_ids (list): The prev event ids to use when creating the event
|
||||
|
||||
Returns:
|
||||
Tuple of created event (FrozenEvent), Context
|
||||
"""
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
|
||||
self.validator.validate_new(builder)
|
||||
|
||||
if ratelimit:
|
||||
self.ratelimit(builder.user_id)
|
||||
# TODO(paul): Why does 'event' not have a 'user' object?
|
||||
user = UserID.from_string(builder.user_id)
|
||||
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
if membership == Membership.JOIN:
|
||||
joinee = UserID.from_string(builder.state_key)
|
||||
target = UserID.from_string(builder.state_key)
|
||||
|
||||
if membership in {Membership.JOIN, Membership.INVITE}:
|
||||
# If event doesn't include a display name, add one.
|
||||
yield collect_presencelike_data(
|
||||
self.distributor, joinee, builder.content
|
||||
)
|
||||
profile = self.hs.get_handlers().profile_handler
|
||||
content = builder.content
|
||||
|
||||
try:
|
||||
content["displayname"] = yield profile.get_displayname(target)
|
||||
content["avatar_url"] = yield profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information for %r: %s",
|
||||
target, e
|
||||
)
|
||||
|
||||
if token_id is not None:
|
||||
builder.internal_metadata.token_id = token_id
|
||||
@@ -213,32 +191,86 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
event, context = yield self._create_new_client_event(
|
||||
builder=builder,
|
||||
prev_event_ids=prev_event_ids,
|
||||
)
|
||||
defer.returnValue((event, context))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_nonmember_event(self, requester, event, context, ratelimit=True):
|
||||
"""
|
||||
Persists and notifies local clients and federation of an event.
|
||||
|
||||
Args:
|
||||
event (FrozenEvent) the event to send.
|
||||
context (Context) the context of the event.
|
||||
ratelimit (bool): Whether to rate limit this send.
|
||||
is_guest (bool): Whether the sender is a guest.
|
||||
"""
|
||||
if event.type == EventTypes.Member:
|
||||
raise SynapseError(
|
||||
500,
|
||||
"Tried to send member event through non-member codepath"
|
||||
)
|
||||
|
||||
user = UserID.from_string(event.sender)
|
||||
|
||||
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
|
||||
|
||||
if event.is_state():
|
||||
prev_state = context.current_state.get((event.type, event.state_key))
|
||||
if prev_state and event.user_id == prev_state.user_id:
|
||||
prev_content = encode_canonical_json(prev_state.content)
|
||||
next_content = encode_canonical_json(event.content)
|
||||
if prev_content == next_content:
|
||||
# Duplicate suppression for state updates with same sender
|
||||
# and content.
|
||||
defer.returnValue(prev_state)
|
||||
prev_state = self.deduplicate_state_event(event, context)
|
||||
if prev_state is not None:
|
||||
defer.returnValue(prev_state)
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
member_handler = self.hs.get_handlers().room_member_handler
|
||||
yield member_handler.change_membership(event, context, is_guest=is_guest)
|
||||
else:
|
||||
yield self.handle_new_client_event(
|
||||
event=event,
|
||||
context=context,
|
||||
)
|
||||
yield self.handle_new_client_event(
|
||||
requester=requester,
|
||||
event=event,
|
||||
context=context,
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
|
||||
if event.type == EventTypes.Message:
|
||||
presence = self.hs.get_handlers().presence_handler
|
||||
with PreserveLoggingContext():
|
||||
presence.bump_presence_active_time(user)
|
||||
yield presence.bump_presence_active_time(user)
|
||||
|
||||
def deduplicate_state_event(self, event, context):
|
||||
"""
|
||||
Checks whether event is in the latest resolved state in context.
|
||||
|
||||
If so, returns the version of the event in context.
|
||||
Otherwise, returns None.
|
||||
"""
|
||||
prev_event = context.current_state.get((event.type, event.state_key))
|
||||
if prev_event and event.user_id == prev_event.user_id:
|
||||
prev_content = encode_canonical_json(prev_event.content)
|
||||
next_content = encode_canonical_json(event.content)
|
||||
if prev_content == next_content:
|
||||
return prev_event
|
||||
return None
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_and_send_nonmember_event(
|
||||
self,
|
||||
requester,
|
||||
event_dict,
|
||||
ratelimit=True,
|
||||
txn_id=None
|
||||
):
|
||||
"""
|
||||
Creates an event, then sends it.
|
||||
|
||||
See self.create_event and self.send_nonmember_event.
|
||||
"""
|
||||
event, context = yield self.create_event(
|
||||
event_dict,
|
||||
token_id=requester.access_token_id,
|
||||
txn_id=txn_id
|
||||
)
|
||||
yield self.send_nonmember_event(
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@@ -254,7 +286,7 @@ class MessageHandler(BaseHandler):
|
||||
SynapseError if something went wrong.
|
||||
"""
|
||||
membership, membership_event_id = yield self._check_in_room_or_world_readable(
|
||||
room_id, user_id, is_guest
|
||||
room_id, user_id
|
||||
)
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
@@ -271,7 +303,7 @@ class MessageHandler(BaseHandler):
|
||||
defer.returnValue(data)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_in_room_or_world_readable(self, room_id, user_id, is_guest):
|
||||
def _check_in_room_or_world_readable(self, room_id, user_id):
|
||||
try:
|
||||
# check_user_was_in_room will return the most recent membership
|
||||
# event for the user if:
|
||||
@@ -281,7 +313,7 @@ class MessageHandler(BaseHandler):
|
||||
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
|
||||
defer.returnValue((member_event.membership, member_event.event_id))
|
||||
return
|
||||
except AuthError, auth_error:
|
||||
except AuthError:
|
||||
visibility = yield self.state_handler.get_current_state(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
@@ -291,8 +323,6 @@ class MessageHandler(BaseHandler):
|
||||
):
|
||||
defer.returnValue((Membership.JOIN, None))
|
||||
return
|
||||
if not is_guest:
|
||||
raise auth_error
|
||||
raise AuthError(
|
||||
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
|
||||
)
|
||||
@@ -310,7 +340,7 @@ class MessageHandler(BaseHandler):
|
||||
A list of dicts representing state events. [{}, {}, {}]
|
||||
"""
|
||||
membership, membership_event_id = yield self._check_in_room_or_world_readable(
|
||||
room_id, user_id, is_guest
|
||||
room_id, user_id
|
||||
)
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
@@ -326,7 +356,6 @@ class MessageHandler(BaseHandler):
|
||||
[serialize_event(c, now) for c in room_state.values()]
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def snapshot_all_rooms(self, user_id=None, pagin_config=None,
|
||||
as_client_event=True, include_archived=False):
|
||||
"""Retrieve a snapshot of all rooms the user is invited or has joined.
|
||||
@@ -346,6 +375,28 @@ class MessageHandler(BaseHandler):
|
||||
is joined on, may return a "messages" key with messages, depending
|
||||
on the specified PaginationConfig.
|
||||
"""
|
||||
key = (
|
||||
user_id,
|
||||
pagin_config.from_token,
|
||||
pagin_config.to_token,
|
||||
pagin_config.direction,
|
||||
pagin_config.limit,
|
||||
as_client_event,
|
||||
include_archived,
|
||||
)
|
||||
now_ms = self.clock.time_msec()
|
||||
result = self.snapshot_cache.get(now_ms, key)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
return self.snapshot_cache.set(now_ms, key, self._snapshot_all_rooms(
|
||||
user_id, pagin_config, as_client_event, include_archived
|
||||
))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _snapshot_all_rooms(self, user_id=None, pagin_config=None,
|
||||
as_client_event=True, include_archived=False):
|
||||
|
||||
memberships = [Membership.INVITE, Membership.JOIN]
|
||||
if include_archived:
|
||||
memberships.append(Membership.LEAVE)
|
||||
@@ -473,14 +524,7 @@ class MessageHandler(BaseHandler):
|
||||
except:
|
||||
logger.exception("Failed to get snapshot")
|
||||
|
||||
# Only do N rooms at once
|
||||
n = 5
|
||||
d_list = [handle_room(e) for e in room_list]
|
||||
for i in range(0, len(d_list), n):
|
||||
yield defer.gatherResults(
|
||||
d_list[i:i + n],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
yield concurrently_execute(handle_room, room_list, 10)
|
||||
|
||||
account_data_events = []
|
||||
for account_data_type, content in account_data.items():
|
||||
@@ -500,13 +544,13 @@ class MessageHandler(BaseHandler):
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def room_initial_sync(self, user_id, room_id, pagin_config=None, is_guest=False):
|
||||
def room_initial_sync(self, requester, room_id, pagin_config=None):
|
||||
"""Capture the a snapshot of a room. If user is currently a member of
|
||||
the room this will be what is currently in the room. If the user left
|
||||
the room this will be what was in the room when they left.
|
||||
|
||||
Args:
|
||||
user_id(str): The user to get a snapshot for.
|
||||
requester(Requester): The user to get a snapshot for.
|
||||
room_id(str): The room to get a snapshot of.
|
||||
pagin_config(synapse.streams.config.PaginationConfig):
|
||||
The pagination config used to determine how many messages to
|
||||
@@ -517,19 +561,20 @@ class MessageHandler(BaseHandler):
|
||||
A JSON serialisable dict with the snapshot of the room.
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
membership, member_event_id = yield self._check_in_room_or_world_readable(
|
||||
room_id,
|
||||
user_id,
|
||||
is_guest
|
||||
room_id, user_id,
|
||||
)
|
||||
is_peeking = member_event_id is None
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
result = yield self._room_initial_sync_joined(
|
||||
user_id, room_id, pagin_config, membership, is_guest
|
||||
user_id, room_id, pagin_config, membership, is_peeking
|
||||
)
|
||||
elif membership == Membership.LEAVE:
|
||||
result = yield self._room_initial_sync_parted(
|
||||
user_id, room_id, pagin_config, membership, member_event_id, is_guest
|
||||
user_id, room_id, pagin_config, membership, member_event_id, is_peeking
|
||||
)
|
||||
|
||||
account_data_events = []
|
||||
@@ -553,7 +598,7 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
|
||||
membership, member_event_id, is_guest):
|
||||
membership, member_event_id, is_peeking):
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
[member_event_id], None
|
||||
)
|
||||
@@ -575,11 +620,11 @@ class MessageHandler(BaseHandler):
|
||||
)
|
||||
|
||||
messages = yield self._filter_events_for_client(
|
||||
user_id, messages, is_guest=is_guest
|
||||
user_id, messages, is_peeking=is_peeking
|
||||
)
|
||||
|
||||
start_token = StreamToken(token[0], 0, 0, 0, 0)
|
||||
end_token = StreamToken(token[1], 0, 0, 0, 0)
|
||||
start_token = StreamToken.START.copy_and_replace("room_key", token[0])
|
||||
end_token = StreamToken.START.copy_and_replace("room_key", token[1])
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
@@ -598,15 +643,11 @@ class MessageHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _room_initial_sync_joined(self, user_id, room_id, pagin_config,
|
||||
membership, is_guest):
|
||||
membership, is_peeking):
|
||||
current_state = yield self.state.get_current_state(
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
# TODO(paul): I wish I was called with user objects not user_id
|
||||
# strings...
|
||||
auth_user = UserID.from_string(user_id)
|
||||
|
||||
# TODO: These concurrently
|
||||
time_now = self.clock.time_msec()
|
||||
state = [
|
||||
@@ -631,13 +672,11 @@ class MessageHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def get_presence():
|
||||
states = yield presence_handler.get_states(
|
||||
target_users=[UserID.from_string(m.user_id) for m in room_members],
|
||||
auth_user=auth_user,
|
||||
[m.user_id for m in room_members],
|
||||
as_event=True,
|
||||
check_auth=False,
|
||||
)
|
||||
|
||||
defer.returnValue(states.values())
|
||||
defer.returnValue(states)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_receipts():
|
||||
@@ -662,7 +701,7 @@ class MessageHandler(BaseHandler):
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
messages = yield self._filter_events_for_client(
|
||||
user_id, messages, is_guest=is_guest, require_all_visible_for_guests=False
|
||||
user_id, messages, is_peeking=is_peeking,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token[0])
|
||||
@@ -681,7 +720,7 @@ class MessageHandler(BaseHandler):
|
||||
"presence": presence,
|
||||
"receipts": receipts,
|
||||
}
|
||||
if not is_guest:
|
||||
if not is_peeking:
|
||||
ret["membership"] = membership
|
||||
|
||||
defer.returnValue(ret)
|
||||
|
||||
+870
-1075
File diff suppressed because it is too large
Load Diff
+22
-65
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014, 2015 OpenMarket Ltd
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,9 +16,7 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError, AuthError, CodeMessageException
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.types import UserID
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.types import UserID, Requester
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -28,14 +26,6 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def changed_presencelike_data(distributor, user, state):
|
||||
return distributor.fire("changed_presencelike_data", user, state)
|
||||
|
||||
|
||||
def collect_presencelike_data(distributor, user, content):
|
||||
return distributor.fire("collect_presencelike_data", user, content)
|
||||
|
||||
|
||||
class ProfileHandler(BaseHandler):
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -47,14 +37,9 @@ class ProfileHandler(BaseHandler):
|
||||
)
|
||||
|
||||
distributor = hs.get_distributor()
|
||||
self.distributor = distributor
|
||||
|
||||
distributor.observe("registered_user", self.registered_user)
|
||||
|
||||
distributor.observe(
|
||||
"collect_presencelike_data", self.collect_presencelike_data
|
||||
)
|
||||
|
||||
def registered_user(self, user):
|
||||
return self.store.create_profile(user.localpart)
|
||||
|
||||
@@ -87,13 +72,13 @@ class ProfileHandler(BaseHandler):
|
||||
defer.returnValue(result["displayname"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_displayname(self, target_user, auth_user, new_displayname):
|
||||
def set_displayname(self, target_user, requester, new_displayname):
|
||||
"""target_user is the user whose displayname is to be changed;
|
||||
auth_user is the user attempting to make this change."""
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(400, "User is not hosted on this Home Server")
|
||||
|
||||
if target_user != auth_user:
|
||||
if target_user != requester.user:
|
||||
raise AuthError(400, "Cannot set another user's displayname")
|
||||
|
||||
if new_displayname == '':
|
||||
@@ -103,11 +88,7 @@ class ProfileHandler(BaseHandler):
|
||||
target_user.localpart, new_displayname
|
||||
)
|
||||
|
||||
yield changed_presencelike_data(self.distributor, target_user, {
|
||||
"displayname": new_displayname,
|
||||
})
|
||||
|
||||
yield self._update_join_states(target_user)
|
||||
yield self._update_join_states(requester)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_avatar_url(self, target_user):
|
||||
@@ -137,42 +118,20 @@ class ProfileHandler(BaseHandler):
|
||||
defer.returnValue(result["avatar_url"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_avatar_url(self, target_user, auth_user, new_avatar_url):
|
||||
def set_avatar_url(self, target_user, requester, new_avatar_url):
|
||||
"""target_user is the user whose avatar_url is to be changed;
|
||||
auth_user is the user attempting to make this change."""
|
||||
if not self.hs.is_mine(target_user):
|
||||
raise SynapseError(400, "User is not hosted on this Home Server")
|
||||
|
||||
if target_user != auth_user:
|
||||
if target_user != requester.user:
|
||||
raise AuthError(400, "Cannot set another user's avatar_url")
|
||||
|
||||
yield self.store.set_profile_avatar_url(
|
||||
target_user.localpart, new_avatar_url
|
||||
)
|
||||
|
||||
yield changed_presencelike_data(self.distributor, target_user, {
|
||||
"avatar_url": new_avatar_url,
|
||||
})
|
||||
|
||||
yield self._update_join_states(target_user)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def collect_presencelike_data(self, user, state):
|
||||
if not self.hs.is_mine(user):
|
||||
defer.returnValue(None)
|
||||
|
||||
(displayname, avatar_url) = yield defer.gatherResults(
|
||||
[
|
||||
self.store.get_profile_displayname(user.localpart),
|
||||
self.store.get_profile_avatar_url(user.localpart),
|
||||
],
|
||||
consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
state["displayname"] = displayname
|
||||
state["avatar_url"] = avatar_url
|
||||
|
||||
defer.returnValue(None)
|
||||
yield self._update_join_states(requester)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_profile_query(self, args):
|
||||
@@ -197,32 +156,30 @@ class ProfileHandler(BaseHandler):
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_join_states(self, user):
|
||||
def _update_join_states(self, requester):
|
||||
user = requester.user
|
||||
if not self.hs.is_mine(user):
|
||||
return
|
||||
|
||||
self.ratelimit(user.to_string())
|
||||
self.ratelimit(requester)
|
||||
|
||||
joins = yield self.store.get_rooms_for_user(
|
||||
user.to_string(),
|
||||
)
|
||||
|
||||
for j in joins:
|
||||
content = {
|
||||
"membership": Membership.JOIN,
|
||||
}
|
||||
|
||||
yield collect_presencelike_data(self.distributor, user, content)
|
||||
|
||||
msg_handler = self.hs.get_handlers().message_handler
|
||||
handler = self.hs.get_handlers().room_member_handler
|
||||
try:
|
||||
yield msg_handler.create_and_send_event({
|
||||
"type": EventTypes.Member,
|
||||
"room_id": j.room_id,
|
||||
"state_key": user.to_string(),
|
||||
"content": content,
|
||||
"sender": user.to_string()
|
||||
}, ratelimit=False)
|
||||
# Assume the user isn't a guest because we don't let guests set
|
||||
# profile or avatar data.
|
||||
requester = Requester(user, "", False)
|
||||
yield handler.update_membership(
|
||||
requester,
|
||||
user,
|
||||
j.room_id,
|
||||
"join", # We treat a profile update like a join.
|
||||
ratelimit=False, # Try to hide that these events aren't atomic.
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
"Failed to update join event for room %s - %s",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user