Compare commits
48 Commits
v0.1
...
anoa/fix_e
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d567cc26c | ||
|
|
dc361798e5 | ||
|
|
f536e7c6a7 | ||
|
|
ad2c415965 | ||
|
|
2347e3c362 | ||
|
|
e906115472 | ||
|
|
ccddd9b9a8 | ||
|
|
228c3c405f | ||
|
|
58a755cdc3 | ||
|
|
8fde611a8c | ||
|
|
8f15832950 | ||
|
|
9fe6ad5fef | ||
|
|
fe2f2fc530 | ||
|
|
6be336c0d8 | ||
|
|
3b7a35a59a | ||
|
|
a9bcae9f50 | ||
|
|
d4f91e7e9f | ||
|
|
4037d3220a | ||
|
|
123c04daa7 | ||
|
|
62a2d60d72 | ||
|
|
958d69f300 | ||
|
|
15056ca208 | ||
|
|
7a48d0bab8 | ||
|
|
e23ab7f41a | ||
|
|
1ec7d656dd | ||
|
|
458e51df7a | ||
|
|
63eb4a1b62 | ||
|
|
8c97f6414c | ||
|
|
865077f1d1 | ||
|
|
7c8c3b8437 | ||
|
|
3e013b7c8e | ||
|
|
2a12d76646 | ||
|
|
97a8b4caf7 | ||
|
|
df3a5db629 | ||
|
|
85b0bd8fe0 | ||
|
|
105e7f6ed3 | ||
|
|
3b476f5767 | ||
|
|
d94916852f | ||
|
|
84c6ea1af8 | ||
|
|
45df38e61b | ||
|
|
2e9cf7dda5 | ||
|
|
14c24c9037 | ||
|
|
c159803067 | ||
|
|
0c4a99607e | ||
|
|
62921fb53e | ||
|
|
32768e96d4 | ||
|
|
adcd5368b0 | ||
|
|
73bbaf2bc6 |
@@ -49,14 +49,15 @@ steps:
|
||||
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev"
|
||||
- "python3.5 -m pip install tox"
|
||||
- "tox -e py35-old,codecov"
|
||||
label: ":python: 3.5 / SQLite / Old Deps"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.5"
|
||||
image: "ubuntu:xenial" # We use xenail to get an old sqlite and python
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
comment:
|
||||
layout: "diff"
|
||||
comment: off
|
||||
|
||||
coverage:
|
||||
status:
|
||||
|
||||
1
changelog.d/5693.bugfix
Normal file
1
changelog.d/5693.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix UISIs during homeserver outage.
|
||||
1
changelog.d/5746.misc
Normal file
1
changelog.d/5746.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5752.misc
Normal file
1
changelog.d/5752.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5770.misc
Normal file
1
changelog.d/5770.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5774.misc
Normal file
1
changelog.d/5774.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5775.bugfix
Normal file
1
changelog.d/5775.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix debian packaging scripts to correctly build sid packages.
|
||||
1
changelog.d/5782.removal
Normal file
1
changelog.d/5782.removal
Normal file
@@ -0,0 +1 @@
|
||||
Remove non-functional 'expire_access_token' setting.
|
||||
1
changelog.d/5783.feature
Normal file
1
changelog.d/5783.feature
Normal file
@@ -0,0 +1 @@
|
||||
Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events) over federation. This option can be used to prevent adverse performance on resource-constrained homeservers.
|
||||
1
changelog.d/5785.misc
Normal file
1
changelog.d/5785.misc
Normal file
@@ -0,0 +1 @@
|
||||
Set the logs emitted when checking typing and presence timeouts to DEBUG level, not INFO.
|
||||
1
changelog.d/5787.misc
Normal file
1
changelog.d/5787.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove DelayedCall debugging from the test suite, as it is no longer required in the vast majority of Synapse's tests.
|
||||
1
changelog.d/5789.bugfix
Normal file
1
changelog.d/5789.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix UISIs during homeserver outage.
|
||||
1
changelog.d/5792.misc
Normal file
1
changelog.d/5792.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5793.misc
Normal file
1
changelog.d/5793.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce database IO usage by optimising queries for current membership.
|
||||
1
changelog.d/5794.misc
Normal file
1
changelog.d/5794.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve performance when making `.well-known` requests by sharing the SSL options between requests.
|
||||
1
changelog.d/5796.misc
Normal file
1
changelog.d/5796.misc
Normal file
@@ -0,0 +1 @@
|
||||
Disable codecov GitHub comments on PRs.
|
||||
1
changelog.d/5798.bugfix
Normal file
1
changelog.d/5798.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Return 404 instead of 403 when accessing /rooms/{roomId}/event/{eventId} for an event without the appropriate permissions.
|
||||
@@ -42,6 +42,11 @@ RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
|
||||
###
|
||||
FROM ${distro}
|
||||
|
||||
# Get the distro we want to pull from as a dynamic build variable
|
||||
# (We need to define it in each build stage)
|
||||
ARG distro=""
|
||||
ENV distro ${distro}
|
||||
|
||||
# Install the build dependencies
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
|
||||
set -ex
|
||||
|
||||
DIST=`lsb_release -c -s`
|
||||
# Get the codename from distro env
|
||||
DIST=`cut -d ':' -f2 <<< $distro`
|
||||
|
||||
# we get a read-only copy of the source: make a writeable copy
|
||||
cp -aT /synapse/source /synapse/build
|
||||
|
||||
@@ -278,6 +278,23 @@ listeners:
|
||||
# Used by phonehome stats to group together related servers.
|
||||
#server_context: context
|
||||
|
||||
# Resource-constrained Homeserver Settings
|
||||
#
|
||||
# If limit_remote_rooms.enabled is True, the room complexity will be
|
||||
# checked before a user joins a new remote room. If it is above
|
||||
# limit_remote_rooms.complexity, it will disallow joining or
|
||||
# instantly leave.
|
||||
#
|
||||
# limit_remote_rooms.complexity_error can be set to customise the text
|
||||
# displayed to the user when a room above the complexity threshold has
|
||||
# its join cancelled.
|
||||
#
|
||||
# Uncomment the below lines to enable:
|
||||
#limit_remote_rooms:
|
||||
# enabled: True
|
||||
# complexity: 1.0
|
||||
# complexity_error: "This room is too complex."
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
@@ -925,10 +942,6 @@ uploads_path: "DATADIR/uploads"
|
||||
#
|
||||
# macaroon_secret_key: <PRIVATE STRING>
|
||||
|
||||
# Used to enable access token expiration.
|
||||
#
|
||||
#expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values. Must be specified for the User Consent
|
||||
# forms to work.
|
||||
|
||||
@@ -410,21 +410,16 @@ class Auth(object):
|
||||
try:
|
||||
user_id = self.get_user_id_from_macaroon(macaroon)
|
||||
|
||||
has_expiry = False
|
||||
guest = False
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith("time "):
|
||||
has_expiry = True
|
||||
elif caveat.caveat_id == "guest = true":
|
||||
if caveat.caveat_id == "guest = true":
|
||||
guest = True
|
||||
|
||||
self.validate_macaroon(
|
||||
macaroon, rights, self.hs.config.expire_access_token, user_id=user_id
|
||||
)
|
||||
self.validate_macaroon(macaroon, rights, user_id=user_id)
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
if not has_expiry and rights == "access":
|
||||
if rights == "access":
|
||||
self.token_cache[token] = (user_id, guest)
|
||||
|
||||
return user_id, guest
|
||||
@@ -450,7 +445,7 @@ class Auth(object):
|
||||
return caveat.caveat_id[len(user_prefix) :]
|
||||
raise InvalidClientTokenError("No user caveat in macaroon")
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
|
||||
def validate_macaroon(self, macaroon, type_string, user_id):
|
||||
"""
|
||||
validate that a Macaroon is understood by and was signed by this server.
|
||||
|
||||
@@ -458,7 +453,6 @@ class Auth(object):
|
||||
macaroon(pymacaroons.Macaroon): The macaroon to validate
|
||||
type_string(str): The kind of token required (e.g. "access",
|
||||
"delete_pusher")
|
||||
verify_expiry(bool): Whether to verify whether the macaroon has expired.
|
||||
user_id (str): The user_id required
|
||||
"""
|
||||
v = pymacaroons.Verifier()
|
||||
@@ -471,19 +465,7 @@ class Auth(object):
|
||||
v.satisfy_exact("type = " + type_string)
|
||||
v.satisfy_exact("user_id = %s" % user_id)
|
||||
v.satisfy_exact("guest = true")
|
||||
|
||||
# verify_expiry should really always be True, but there exist access
|
||||
# tokens in the wild which expire when they should not, so we can't
|
||||
# enforce expiry yet (so we have to allow any caveat starting with
|
||||
# 'time < ' in access tokens).
|
||||
#
|
||||
# On the other hand, short-term login tokens (as used by CAS login, for
|
||||
# example) have an expiry time which we do want to enforce.
|
||||
|
||||
if verify_expiry:
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
else:
|
||||
v.satisfy_general(lambda c: c.startswith("time < "))
|
||||
v.satisfy_general(self._verify_expiry)
|
||||
|
||||
# access_tokens include a nonce for uniqueness: any value is acceptable
|
||||
v.satisfy_general(lambda c: c.startswith("nonce = "))
|
||||
|
||||
@@ -116,8 +116,6 @@ class KeyConfig(Config):
|
||||
seed = bytes(self.signing_key[0])
|
||||
self.macaroon_secret_key = hashlib.sha256(seed).digest()
|
||||
|
||||
self.expire_access_token = config.get("expire_access_token", False)
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values
|
||||
self.form_secret = config.get("form_secret", None)
|
||||
@@ -144,10 +142,6 @@ class KeyConfig(Config):
|
||||
#
|
||||
%(macaroon_secret_key)s
|
||||
|
||||
# Used to enable access token expiration.
|
||||
#
|
||||
#expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values. Must be specified for the User Consent
|
||||
# forms to work.
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
import logging
|
||||
import os.path
|
||||
|
||||
import attr
|
||||
from netaddr import IPSet
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
@@ -38,6 +39,12 @@ DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
|
||||
|
||||
DEFAULT_ROOM_VERSION = "4"
|
||||
|
||||
ROOM_COMPLEXITY_TOO_GREAT = (
|
||||
"Your homeserver is unable to join rooms this large or complex. "
|
||||
"Please speak to your server administrator, or upgrade your instance "
|
||||
"to join this room."
|
||||
)
|
||||
|
||||
|
||||
class ServerConfig(Config):
|
||||
def read_config(self, config, **kwargs):
|
||||
@@ -247,6 +254,23 @@ class ServerConfig(Config):
|
||||
|
||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||
|
||||
@attr.s
|
||||
class LimitRemoteRoomsConfig(object):
|
||||
enabled = attr.ib(
|
||||
validator=attr.validators.instance_of(bool), default=False
|
||||
)
|
||||
complexity = attr.ib(
|
||||
validator=attr.validators.instance_of((int, float)), default=1.0
|
||||
)
|
||||
complexity_error = attr.ib(
|
||||
validator=attr.validators.instance_of(str),
|
||||
default=ROOM_COMPLEXITY_TOO_GREAT,
|
||||
)
|
||||
|
||||
self.limit_remote_rooms = LimitRemoteRoomsConfig(
|
||||
**config.get("limit_remote_rooms", {})
|
||||
)
|
||||
|
||||
bind_port = config.get("bind_port")
|
||||
if bind_port:
|
||||
if config.get("no_tls", False):
|
||||
@@ -617,6 +641,23 @@ class ServerConfig(Config):
|
||||
# Used by phonehome stats to group together related servers.
|
||||
#server_context: context
|
||||
|
||||
# Resource-constrained Homeserver Settings
|
||||
#
|
||||
# If limit_remote_rooms.enabled is True, the room complexity will be
|
||||
# checked before a user joins a new remote room. If it is above
|
||||
# limit_remote_rooms.complexity, it will disallow joining or
|
||||
# instantly leave.
|
||||
#
|
||||
# limit_remote_rooms.complexity_error can be set to customise the text
|
||||
# displayed to the user when a room above the complexity threshold has
|
||||
# its join cancelled.
|
||||
#
|
||||
# Uncomment the below lines to enable:
|
||||
#limit_remote_rooms:
|
||||
# enabled: True
|
||||
# complexity: 1.0
|
||||
# complexity_error: "This room is too complex."
|
||||
|
||||
# Whether to require a user to be in the room to add an alias to it.
|
||||
# Defaults to 'true'.
|
||||
#
|
||||
|
||||
@@ -31,6 +31,7 @@ from twisted.internet.ssl import (
|
||||
platformTrust,
|
||||
)
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.iweb import IPolicyForHTTPS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -74,6 +75,7 @@ class ServerContextFactory(ContextFactory):
|
||||
return self._context
|
||||
|
||||
|
||||
@implementer(IPolicyForHTTPS)
|
||||
class ClientTLSOptionsFactory(object):
|
||||
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
|
||||
to remote servers for federation.
|
||||
@@ -146,6 +148,12 @@ class ClientTLSOptionsFactory(object):
|
||||
f = Failure()
|
||||
tls_protocol.failVerification(f)
|
||||
|
||||
def creatorForNetloc(self, hostname, port):
|
||||
"""Implements the IPolicyForHTTPS interace so that this can be passed
|
||||
directly to agents.
|
||||
"""
|
||||
return self.get_options(hostname)
|
||||
|
||||
|
||||
@implementer(IOpenSSLClientConnectionCreator)
|
||||
class SSLClientConnectionCreator(object):
|
||||
|
||||
@@ -993,3 +993,39 @@ class FederationClient(FederationBase):
|
||||
)
|
||||
|
||||
raise RuntimeError("Failed to send to any server.")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_room_complexity(self, destination, room_id):
|
||||
"""
|
||||
Fetch the complexity of a remote room from another server.
|
||||
|
||||
Args:
|
||||
destination (str): The remote server
|
||||
room_id (str): The room ID to ask about.
|
||||
|
||||
Returns:
|
||||
Deferred[dict] or Deferred[None]: Dict contains the complexity
|
||||
metric versions, while None means we could not fetch the complexity.
|
||||
"""
|
||||
try:
|
||||
complexity = yield self.transport_layer.get_room_complexity(
|
||||
destination=destination, room_id=room_id
|
||||
)
|
||||
defer.returnValue(complexity)
|
||||
except CodeMessageException as e:
|
||||
# We didn't manage to get it -- probably a 404. We are okay if other
|
||||
# servers don't give it to us.
|
||||
logger.debug(
|
||||
"Failed to fetch room complexity via %s for %s, got a %d",
|
||||
destination,
|
||||
room_id,
|
||||
e.code,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to fetch room complexity via %s for %s", destination, room_id
|
||||
)
|
||||
|
||||
# If we don't manage to find it, return None. It's not an error if a
|
||||
# server doesn't give it to us.
|
||||
defer.returnValue(None)
|
||||
|
||||
@@ -21,7 +21,11 @@ from six.moves import urllib
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_UNSTABLE_PREFIX,
|
||||
FEDERATION_V1_PREFIX,
|
||||
FEDERATION_V2_PREFIX,
|
||||
)
|
||||
from synapse.logging.utils import log_function
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -935,6 +939,23 @@ class TransportLayerClient(object):
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
def get_room_complexity(self, destination, room_id):
|
||||
"""
|
||||
Args:
|
||||
destination (str): The remote server
|
||||
room_id (str): The room ID to ask about.
|
||||
"""
|
||||
path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/rooms/%s/complexity", room_id)
|
||||
|
||||
return self.client.get_json(destination=destination, path=path)
|
||||
|
||||
|
||||
def _create_path(federation_prefix, path, *args):
|
||||
"""
|
||||
Ensures that all args are url encoded.
|
||||
"""
|
||||
return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
|
||||
|
||||
|
||||
def _create_v1_path(path, *args):
|
||||
"""Creates a path against V1 federation API from the path template and
|
||||
@@ -951,9 +972,7 @@ def _create_v1_path(path, *args):
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
return FEDERATION_V1_PREFIX + path % tuple(
|
||||
urllib.parse.quote(arg, "") for arg in args
|
||||
)
|
||||
return _create_path(FEDERATION_V1_PREFIX, path, *args)
|
||||
|
||||
|
||||
def _create_v2_path(path, *args):
|
||||
@@ -971,6 +990,4 @@ def _create_v2_path(path, *args):
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
return FEDERATION_V2_PREFIX + path % tuple(
|
||||
urllib.parse.quote(arg, "") for arg in args
|
||||
)
|
||||
return _create_path(FEDERATION_V2_PREFIX, path, *args)
|
||||
|
||||
@@ -860,7 +860,7 @@ class AuthHandler(BaseHandler):
|
||||
try:
|
||||
macaroon = pymacaroons.Macaroon.deserialize(login_token)
|
||||
user_id = auth_api.get_user_id_from_macaroon(macaroon)
|
||||
auth_api.validate_macaroon(macaroon, "login", True, user_id)
|
||||
auth_api.validate_macaroon(macaroon, "login", user_id)
|
||||
except Exception:
|
||||
raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
|
||||
self.ratelimit_login_per_account(user_id)
|
||||
|
||||
@@ -209,12 +209,12 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
|
||||
self.federation_sender = hs.get_federation_sender()
|
||||
|
||||
self._edu_updater = DeviceListEduUpdater(hs, self)
|
||||
self.device_list_updater = DeviceListUpdater(hs, self)
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
||||
federation_registry.register_edu_handler(
|
||||
"m.device_list_update", self._edu_updater.incoming_device_list_update
|
||||
"m.device_list_update", self.device_list_updater.incoming_device_list_update
|
||||
)
|
||||
federation_registry.register_query_handler(
|
||||
"user_devices", self.on_federation_query_user_devices
|
||||
@@ -426,7 +426,7 @@ def _update_device_from_client_ips(device, client_ips):
|
||||
device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")})
|
||||
|
||||
|
||||
class DeviceListEduUpdater(object):
|
||||
class DeviceListUpdater(object):
|
||||
"Handles incoming device list updates from federation and updates the DB"
|
||||
|
||||
def __init__(self, hs, device_handler):
|
||||
@@ -519,75 +519,7 @@ class DeviceListEduUpdater(object):
|
||||
logger.debug("Need to re-sync devices for %r? %r", user_id, resync)
|
||||
|
||||
if resync:
|
||||
# Fetch all devices for the user.
|
||||
origin = get_domain_from_id(user_id)
|
||||
try:
|
||||
result = yield self.federation.query_user_devices(origin, user_id)
|
||||
except (
|
||||
NotRetryingDestination,
|
||||
RequestSendFailed,
|
||||
HttpResponseException,
|
||||
):
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.warn("Failed to handle device list update for %s", user_id)
|
||||
# We abort on exceptions rather than accepting the update
|
||||
# as otherwise synapse will 'forget' that its device list
|
||||
# is out of date. If we bail then we will retry the resync
|
||||
# next time we get a device list update for this user_id.
|
||||
# This makes it more likely that the device lists will
|
||||
# eventually become consistent.
|
||||
return
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
return
|
||||
except Exception:
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.exception(
|
||||
"Failed to handle device list update for %s", user_id
|
||||
)
|
||||
return
|
||||
|
||||
stream_id = result["stream_id"]
|
||||
devices = result["devices"]
|
||||
|
||||
# If the remote server has more than ~1000 devices for this user
|
||||
# we assume that something is going horribly wrong (e.g. a bot
|
||||
# that logs in and creates a new device every time it tries to
|
||||
# send a message). Maintaining lots of devices per user in the
|
||||
# cache can cause serious performance issues as if this request
|
||||
# takes more than 60s to complete, internal replication from the
|
||||
# inbound federation worker to the synapse master may time out
|
||||
# causing the inbound federation to fail and causing the remote
|
||||
# server to retry, causing a DoS. So in this scenario we give
|
||||
# up on storing the total list of devices and only handle the
|
||||
# delta instead.
|
||||
if len(devices) > 1000:
|
||||
logger.warn(
|
||||
"Ignoring device list snapshot for %s as it has >1K devs (%d)",
|
||||
user_id,
|
||||
len(devices),
|
||||
)
|
||||
devices = []
|
||||
|
||||
for device in devices:
|
||||
logger.debug(
|
||||
"Handling resync update %r/%r, ID: %r",
|
||||
user_id,
|
||||
device["device_id"],
|
||||
stream_id,
|
||||
)
|
||||
|
||||
yield self.store.update_remote_device_list_cache(
|
||||
user_id, devices, stream_id
|
||||
)
|
||||
device_ids = [device["device_id"] for device in devices]
|
||||
yield self.device_handler.notify_device_update(user_id, device_ids)
|
||||
|
||||
# We clobber the seen updates since we've re-synced from a given
|
||||
# point.
|
||||
self._seen_updates[user_id] = set([stream_id])
|
||||
yield self.user_device_resync(user_id)
|
||||
else:
|
||||
# Simply update the single device, since we know that is the only
|
||||
# change (because of the single prev_id matching the current cache)
|
||||
@@ -634,3 +566,77 @@ class DeviceListEduUpdater(object):
|
||||
stream_id_in_updates.add(stream_id)
|
||||
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def user_device_resync(self, user_id):
|
||||
"""Fetches all devices for a user and updates the device cache with them.
|
||||
|
||||
Args:
|
||||
user_id (str): The user's id whose device_list will be updated.
|
||||
Returns:
|
||||
Deferred[dict]: a dict with device info as under the "devices" in the result of this
|
||||
request:
|
||||
https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
|
||||
"""
|
||||
# Fetch all devices for the user.
|
||||
origin = get_domain_from_id(user_id)
|
||||
try:
|
||||
result = yield self.federation.query_user_devices(origin, user_id)
|
||||
except (NotRetryingDestination, RequestSendFailed, HttpResponseException):
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.warn("Failed to handle device list update for %s", user_id)
|
||||
# We abort on exceptions rather than accepting the update
|
||||
# as otherwise synapse will 'forget' that its device list
|
||||
# is out of date. If we bail then we will retry the resync
|
||||
# next time we get a device list update for this user_id.
|
||||
# This makes it more likely that the device lists will
|
||||
# eventually become consistent.
|
||||
return
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
return
|
||||
except Exception:
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.exception("Failed to handle device list update for %s", user_id)
|
||||
return
|
||||
stream_id = result["stream_id"]
|
||||
devices = result["devices"]
|
||||
|
||||
# If the remote server has more than ~1000 devices for this user
|
||||
# we assume that something is going horribly wrong (e.g. a bot
|
||||
# that logs in and creates a new device every time it tries to
|
||||
# send a message). Maintaining lots of devices per user in the
|
||||
# cache can cause serious performance issues as if this request
|
||||
# takes more than 60s to complete, internal replication from the
|
||||
# inbound federation worker to the synapse master may time out
|
||||
# causing the inbound federation to fail and causing the remote
|
||||
# server to retry, causing a DoS. So in this scenario we give
|
||||
# up on storing the total list of devices and only handle the
|
||||
# delta instead.
|
||||
if len(devices) > 1000:
|
||||
logger.warn(
|
||||
"Ignoring device list snapshot for %s as it has >1K devs (%d)",
|
||||
user_id,
|
||||
len(devices),
|
||||
)
|
||||
devices = []
|
||||
|
||||
for device in devices:
|
||||
logger.debug(
|
||||
"Handling resync update %r/%r, ID: %r",
|
||||
user_id,
|
||||
device["device_id"],
|
||||
stream_id,
|
||||
)
|
||||
|
||||
yield self.store.update_remote_device_list_cache(user_id, devices, stream_id)
|
||||
device_ids = [device["device_id"] for device in devices]
|
||||
yield self.device_handler.notify_device_update(user_id, device_ids)
|
||||
|
||||
# We clobber the seen updates since we've re-synced from a given
|
||||
# point.
|
||||
self._seen_updates[user_id] = set([stream_id])
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
@@ -278,7 +278,6 @@ class DirectoryHandler(BaseHandler):
|
||||
servers = list(servers)
|
||||
|
||||
return {"room_id": room_id, "servers": servers}
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_directory_query(self, args):
|
||||
|
||||
@@ -25,6 +25,7 @@ from twisted.internet import defer
|
||||
from synapse.api.errors import CodeMessageException, SynapseError
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -65,6 +66,7 @@ class E2eKeysHandler(object):
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
device_keys_query = query_body.get("device_keys", {})
|
||||
|
||||
# separate users by domain.
|
||||
@@ -121,7 +123,56 @@ class E2eKeysHandler(object):
|
||||
# Now fetch any devices that we don't have in our cache
|
||||
@defer.inlineCallbacks
|
||||
def do_remote_query(destination):
|
||||
"""This is called when we are querying the device list of a user on
|
||||
a remote homeserver and their device list is not in the device list
|
||||
cache. If we share a room with this user and we're not querying for
|
||||
specific user we will update the cache
|
||||
with their device list."""
|
||||
|
||||
destination_query = remote_queries_not_in_cache[destination]
|
||||
|
||||
# We first consider whether we wish to update the device list cache with
|
||||
# the users device list. We want to track a user's devices when the
|
||||
# authenticated user shares a room with the queried user and the query
|
||||
# has not specified a particular device.
|
||||
# If we update the cache for the queried user we remove them from further
|
||||
# queries. We use the more efficient batched query_client_keys for all
|
||||
# remaining users
|
||||
user_ids_updated = []
|
||||
for (user_id, device_list) in destination_query.items():
|
||||
if user_id in user_ids_updated:
|
||||
continue
|
||||
|
||||
if device_list:
|
||||
continue
|
||||
|
||||
room_ids = yield self.store.get_rooms_for_user(user_id)
|
||||
if not room_ids:
|
||||
continue
|
||||
|
||||
# We've decided we're sharing a room with this user and should
|
||||
# probably be tracking their device lists. However, we haven't
|
||||
# done an initial sync on the device list so we do it now.
|
||||
try:
|
||||
user_devices = yield self.device_handler.device_list_updater.user_device_resync(
|
||||
user_id
|
||||
)
|
||||
user_devices = user_devices["devices"]
|
||||
for device in user_devices:
|
||||
results[user_id] = {device["device_id"]: device["keys"]}
|
||||
user_ids_updated.append(user_id)
|
||||
except Exception as e:
|
||||
failures[destination] = _exception_to_failure(e)
|
||||
|
||||
if len(destination_query) == len(user_ids_updated):
|
||||
# We've updated all the users in the query and we do not need to
|
||||
# make any further remote calls.
|
||||
return
|
||||
|
||||
# Remove all the users from the query which we have updated
|
||||
for user_id in user_ids_updated:
|
||||
destination_query.pop(user_id)
|
||||
|
||||
try:
|
||||
remote_result = yield self.federation.query_client_keys(
|
||||
destination, {"device_keys": destination_query}, timeout=timeout
|
||||
@@ -132,7 +183,8 @@ class E2eKeysHandler(object):
|
||||
results[user_id] = keys
|
||||
|
||||
except Exception as e:
|
||||
failures[destination] = _exception_to_failure(e)
|
||||
failure = _exception_to_failure(e)
|
||||
failures[destination] = failure
|
||||
|
||||
yield make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
@@ -141,7 +193,7 @@ class E2eKeysHandler(object):
|
||||
for destination in remote_queries_not_in_cache
|
||||
],
|
||||
consumeErrors=True,
|
||||
)
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
return {"device_keys": results, "failures": failures}
|
||||
@@ -234,8 +286,10 @@ class E2eKeysHandler(object):
|
||||
for user_id, keys in remote_result["one_time_keys"].items():
|
||||
if user_id in device_keys:
|
||||
json_result[user_id] = keys
|
||||
|
||||
except Exception as e:
|
||||
failures[destination] = _exception_to_failure(e)
|
||||
failure = _exception_to_failure(e)
|
||||
failures[destination] = failure
|
||||
|
||||
yield make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
@@ -263,6 +317,7 @@ class E2eKeysHandler(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upload_keys_for_user(self, user_id, device_id, keys):
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
|
||||
@@ -2796,3 +2796,28 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
else:
|
||||
return user_joined_room(self.distributor, user, room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_room_complexity(self, remote_room_hosts, room_id):
|
||||
"""
|
||||
Fetch the complexity of a remote room over federation.
|
||||
|
||||
Args:
|
||||
remote_room_hosts (list[str]): The remote servers to ask.
|
||||
room_id (str): The room ID to ask about.
|
||||
|
||||
Returns:
|
||||
Deferred[dict] or Deferred[None]: Dict contains the complexity
|
||||
metric versions, while None means we could not fetch the complexity.
|
||||
"""
|
||||
|
||||
for host in remote_room_hosts:
|
||||
res = yield self.federation_client.get_room_complexity(host, room_id)
|
||||
|
||||
# We got a result, return it.
|
||||
if res:
|
||||
defer.returnValue(res)
|
||||
|
||||
# We fell off the bottom, couldn't get the complexity from anyone. Oh
|
||||
# well.
|
||||
defer.returnValue(None)
|
||||
|
||||
@@ -333,7 +333,7 @@ class PresenceHandler(object):
|
||||
"""Checks the presence of users that have timed out and updates as
|
||||
appropriate.
|
||||
"""
|
||||
logger.info("Handling presence timeouts")
|
||||
logger.debug("Handling presence timeouts")
|
||||
now = self.clock.time_msec()
|
||||
|
||||
# Fetch the list of users that *may* have timed out. Things may have
|
||||
|
||||
@@ -26,8 +26,7 @@ from unpaddedbase64 import decode_base64
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import synapse.server
|
||||
import synapse.types
|
||||
from synapse import types
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError
|
||||
from synapse.types import RoomID, UserID
|
||||
@@ -543,7 +542,7 @@ class RoomMemberHandler(object):
|
||||
), "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
|
||||
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
|
||||
else:
|
||||
requester = synapse.types.create_requester(target_user)
|
||||
requester = types.create_requester(target_user)
|
||||
|
||||
prev_event = yield self.event_creation_handler.deduplicate_state_event(
|
||||
event, context
|
||||
@@ -945,6 +944,47 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
self.distributor.declare("user_joined_room")
|
||||
self.distributor.declare("user_left_room")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_remote_room_too_complex(self, room_id, remote_room_hosts):
|
||||
"""
|
||||
Check if complexity of a remote room is too great.
|
||||
|
||||
Args:
|
||||
room_id (str)
|
||||
remote_room_hosts (list[str])
|
||||
|
||||
Returns: bool of whether the complexity is too great, or None
|
||||
if unable to be fetched
|
||||
"""
|
||||
max_complexity = self.hs.config.limit_remote_rooms.complexity
|
||||
complexity = yield self.federation_handler.get_room_complexity(
|
||||
remote_room_hosts, room_id
|
||||
)
|
||||
|
||||
if complexity:
|
||||
if complexity["v1"] > max_complexity:
|
||||
return True
|
||||
return False
|
||||
return None
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_local_room_too_complex(self, room_id):
|
||||
"""
|
||||
Check if the complexity of a local room is too great.
|
||||
|
||||
Args:
|
||||
room_id (str)
|
||||
|
||||
Returns: bool
|
||||
"""
|
||||
max_complexity = self.hs.config.limit_remote_rooms.complexity
|
||||
complexity = yield self.store.get_room_complexity(room_id)
|
||||
|
||||
if complexity["v1"] > max_complexity:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
|
||||
"""Implements RoomMemberHandler._remote_join
|
||||
@@ -952,7 +992,6 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
# filter ourselves out of remote_room_hosts: do_invite_join ignores it
|
||||
# and if it is the only entry we'd like to return a 404 rather than a
|
||||
# 500.
|
||||
|
||||
remote_room_hosts = [
|
||||
host for host in remote_room_hosts if host != self.hs.hostname
|
||||
]
|
||||
@@ -960,6 +999,18 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
if len(remote_room_hosts) == 0:
|
||||
raise SynapseError(404, "No known servers")
|
||||
|
||||
if self.hs.config.limit_remote_rooms.enabled:
|
||||
# Fetch the room complexity
|
||||
too_complex = yield self._is_remote_room_too_complex(
|
||||
room_id, remote_room_hosts
|
||||
)
|
||||
if too_complex is True:
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=self.hs.config.limit_remote_rooms.complexity_error,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
)
|
||||
|
||||
# We don't do an auth check if we are doing an invite
|
||||
# join dance for now, since we're kinda implicitly checking
|
||||
# that we are allowed to join when we decide whether or not we
|
||||
@@ -969,6 +1020,31 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
)
|
||||
yield self._user_joined_room(user, room_id)
|
||||
|
||||
# Check the room we just joined wasn't too large, if we didn't fetch the
|
||||
# complexity of it before.
|
||||
if self.hs.config.limit_remote_rooms.enabled:
|
||||
if too_complex is False:
|
||||
# We checked, and we're under the limit.
|
||||
return
|
||||
|
||||
# Check again, but with the local state events
|
||||
too_complex = yield self._is_local_room_too_complex(room_id)
|
||||
|
||||
if too_complex is False:
|
||||
# We're under the limit.
|
||||
return
|
||||
|
||||
# The room is too large. Leave.
|
||||
requester = types.create_requester(user, None, False, None)
|
||||
yield self.update_membership(
|
||||
requester=requester, target=user, room_id=room_id, action="leave"
|
||||
)
|
||||
raise SynapseError(
|
||||
code=400,
|
||||
msg=self.hs.config.limit_remote_rooms.complexity_error,
|
||||
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
|
||||
"""Implements RoomMemberHandler._remote_reject_invite
|
||||
|
||||
@@ -83,7 +83,7 @@ class TypingHandler(object):
|
||||
self._room_typing = {}
|
||||
|
||||
def _handle_timeouts(self):
|
||||
logger.info("Checking for typing timeouts")
|
||||
logger.debug("Checking for typing timeouts")
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
||||
|
||||
@@ -64,10 +64,6 @@ class MatrixFederationAgent(object):
|
||||
tls_client_options_factory (ClientTLSOptionsFactory|None):
|
||||
factory to use for fetching client tls options, or none to disable TLS.
|
||||
|
||||
_well_known_tls_policy (IPolicyForHTTPS|None):
|
||||
TLS policy to use for fetching .well-known files. None to use a default
|
||||
(browser-like) implementation.
|
||||
|
||||
_srv_resolver (SrvResolver|None):
|
||||
SRVResolver impl to use for looking up SRV records. None to use a default
|
||||
implementation.
|
||||
@@ -81,7 +77,6 @@ class MatrixFederationAgent(object):
|
||||
self,
|
||||
reactor,
|
||||
tls_client_options_factory,
|
||||
_well_known_tls_policy=None,
|
||||
_srv_resolver=None,
|
||||
_well_known_cache=well_known_cache,
|
||||
):
|
||||
@@ -98,13 +93,12 @@ class MatrixFederationAgent(object):
|
||||
self._pool.maxPersistentPerHost = 5
|
||||
self._pool.cachedConnectionTimeout = 2 * 60
|
||||
|
||||
agent_args = {}
|
||||
if _well_known_tls_policy is not None:
|
||||
# the param is called 'contextFactory', but actually passing a
|
||||
# contextfactory is deprecated, and it expects an IPolicyForHTTPS.
|
||||
agent_args["contextFactory"] = _well_known_tls_policy
|
||||
_well_known_agent = RedirectAgent(
|
||||
Agent(self._reactor, pool=self._pool, **agent_args)
|
||||
Agent(
|
||||
self._reactor,
|
||||
pool=self._pool,
|
||||
contextFactory=tls_client_options_factory,
|
||||
)
|
||||
)
|
||||
self._well_known_agent = _well_known_agent
|
||||
|
||||
|
||||
@@ -568,14 +568,22 @@ class RoomEventServlet(RestServlet):
|
||||
@defer.inlineCallbacks
|
||||
def on_GET(self, request, room_id, event_id):
|
||||
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
|
||||
event = yield self.event_handler.get_event(requester.user, room_id, event_id)
|
||||
try:
|
||||
event = yield self.event_handler.get_event(
|
||||
requester.user, room_id, event_id
|
||||
)
|
||||
except AuthError:
|
||||
# This endpoint is supposed to return a 404 when the requester does
|
||||
# not have permission to access the event
|
||||
# https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-event-eventid
|
||||
raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
if event:
|
||||
event = yield self._event_serializer.serialize_event(event, time_now)
|
||||
return (200, event)
|
||||
else:
|
||||
return (404, "Event not found.")
|
||||
|
||||
return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
|
||||
|
||||
|
||||
class RoomEventContextServlet(RestServlet):
|
||||
|
||||
@@ -51,7 +51,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
|
||||
def get_new_messages_for_device_txn(txn):
|
||||
sql = (
|
||||
"SELECT stream_id, message_json, context FROM device_inbox"
|
||||
"SELECT stream_id, message_json FROM device_inbox"
|
||||
" WHERE user_id = ? AND device_id = ?"
|
||||
" AND ? < stream_id AND stream_id <= ?"
|
||||
" ORDER BY stream_id ASC"
|
||||
@@ -61,22 +61,11 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
sql, (user_id, device_id, last_stream_id, current_stream_id, limit)
|
||||
)
|
||||
messages = []
|
||||
references = []
|
||||
for row in txn:
|
||||
stream_pos = row[0]
|
||||
messages.append(json.loads(row[1]))
|
||||
references.append(
|
||||
opentracing.extract_text_map(
|
||||
json.loads(json.loads(row[2])["opentracing"])
|
||||
)
|
||||
)
|
||||
if len(messages) < limit:
|
||||
stream_pos = current_stream_id
|
||||
with opentracing.start_active_span(
|
||||
"do we have send??" # , child_of=references[0]
|
||||
):
|
||||
opentracing.set_tag("ref", references)
|
||||
pass
|
||||
return (messages, stream_pos)
|
||||
|
||||
return self.runInteraction(
|
||||
@@ -292,7 +281,6 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore):
|
||||
allow_none=True,
|
||||
)
|
||||
if already_inserted is not None:
|
||||
opentracing.log_kv({"message": "message already received"})
|
||||
return
|
||||
|
||||
# Add an entry for this message_id so that we know we've processed
|
||||
@@ -306,9 +294,6 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore):
|
||||
"received_ts": now_ms,
|
||||
},
|
||||
)
|
||||
opentracing.log_kv(
|
||||
{"message": "device message added to device_federation_inbox"}
|
||||
)
|
||||
|
||||
# Add the messages to the approriate local device inboxes so that
|
||||
# they'll be sent to the devices when they next sync.
|
||||
@@ -351,13 +336,6 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore):
|
||||
messages_json_for_user[device] = message_json
|
||||
else:
|
||||
if not devices:
|
||||
opentracing.log_kv(
|
||||
{
|
||||
"message": "No devices for user.",
|
||||
"user_id": user_id,
|
||||
"messages": messages_by_device,
|
||||
}
|
||||
)
|
||||
continue
|
||||
sql = (
|
||||
"SELECT device_id FROM devices"
|
||||
@@ -383,17 +361,13 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore):
|
||||
|
||||
sql = (
|
||||
"INSERT INTO device_inbox"
|
||||
" (user_id, device_id, stream_id, message_json, context)"
|
||||
" VALUES (?,?,?,?,?)"
|
||||
" (user_id, device_id, stream_id, message_json)"
|
||||
" VALUES (?,?,?,?)"
|
||||
)
|
||||
rows = []
|
||||
# TODO: User whitelisting?
|
||||
context = json.dumps(
|
||||
{"opentracing": opentracing.active_span_context_as_string()}
|
||||
)
|
||||
for user_id, messages_by_device in local_by_user_then_device.items():
|
||||
for device_id, message_json in messages_by_device.items():
|
||||
rows.append((user_id, device_id, stream_id, message_json, context))
|
||||
rows.append((user_id, device_id, stream_id, message_json))
|
||||
|
||||
txn.executemany(sql, rows)
|
||||
|
||||
|
||||
@@ -156,9 +156,12 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
# then we can avoid a join, which is a Very Good Thing given how
|
||||
# frequently this function gets called.
|
||||
if self._current_state_events_membership_up_to_date:
|
||||
# Note, rejected events will have a null membership field, so
|
||||
# we we manually filter them out.
|
||||
sql = """
|
||||
SELECT count(*), membership FROM current_state_events
|
||||
WHERE type = 'm.room.member' AND room_id = ?
|
||||
AND membership IS NOT NULL
|
||||
GROUP BY membership
|
||||
"""
|
||||
else:
|
||||
@@ -179,19 +182,30 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
|
||||
# we order by membership and then fairly arbitrarily by event_id so
|
||||
# heroes are consistent
|
||||
sql = """
|
||||
SELECT m.user_id, m.membership, m.event_id
|
||||
FROM room_memberships as m
|
||||
INNER JOIN current_state_events as c
|
||||
ON m.event_id = c.event_id
|
||||
AND m.room_id = c.room_id
|
||||
AND m.user_id = c.state_key
|
||||
WHERE c.type = 'm.room.member' AND c.room_id = ?
|
||||
ORDER BY
|
||||
CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
|
||||
m.event_id ASC
|
||||
LIMIT ?
|
||||
"""
|
||||
if self._current_state_events_membership_up_to_date:
|
||||
# Note, rejected events will have a null membership field, so
|
||||
# we we manually filter them out.
|
||||
sql = """
|
||||
SELECT state_key, membership, event_id
|
||||
FROM current_state_events
|
||||
WHERE type = 'm.room.member' AND room_id = ?
|
||||
AND membership IS NOT NULL
|
||||
ORDER BY
|
||||
CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
|
||||
event_id ASC
|
||||
LIMIT ?
|
||||
"""
|
||||
else:
|
||||
sql = """
|
||||
SELECT c.state_key, m.membership, c.event_id
|
||||
FROM room_memberships as m
|
||||
INNER JOIN current_state_events as c USING (room_id, event_id)
|
||||
WHERE c.type = 'm.room.member' AND c.room_id = ?
|
||||
ORDER BY
|
||||
CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
|
||||
c.event_id ASC
|
||||
LIMIT ?
|
||||
"""
|
||||
|
||||
# 6 is 5 (number of heroes) plus 1, in case one of them is the calling user.
|
||||
txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6))
|
||||
@@ -256,28 +270,35 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
return invite
|
||||
return None
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_rooms_for_user_where_membership_is(self, user_id, membership_list):
|
||||
""" Get all the rooms for this user where the membership for this user
|
||||
matches one in the membership list.
|
||||
|
||||
Filters out forgotten rooms.
|
||||
|
||||
Args:
|
||||
user_id (str): The user ID.
|
||||
membership_list (list): A list of synapse.api.constants.Membership
|
||||
values which the user must be in.
|
||||
|
||||
Returns:
|
||||
A list of dictionary objects, with room_id, membership and sender
|
||||
defined.
|
||||
Deferred[list[RoomsForUser]]
|
||||
"""
|
||||
if not membership_list:
|
||||
return defer.succeed(None)
|
||||
|
||||
return self.runInteraction(
|
||||
rooms = yield self.runInteraction(
|
||||
"get_rooms_for_user_where_membership_is",
|
||||
self._get_rooms_for_user_where_membership_is_txn,
|
||||
user_id,
|
||||
membership_list,
|
||||
)
|
||||
|
||||
# Now we filter out forgotten rooms
|
||||
forgotten_rooms = yield self.get_forgotten_rooms_for_user(user_id)
|
||||
return [room for room in rooms if room.room_id not in forgotten_rooms]
|
||||
|
||||
def _get_rooms_for_user_where_membership_is_txn(
|
||||
self, txn, user_id, membership_list
|
||||
):
|
||||
@@ -287,26 +308,33 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
|
||||
results = []
|
||||
if membership_list:
|
||||
where_clause = "user_id = ? AND (%s) AND forgotten = 0" % (
|
||||
" OR ".join(["m.membership = ?" for _ in membership_list]),
|
||||
)
|
||||
if self._current_state_events_membership_up_to_date:
|
||||
sql = """
|
||||
SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering
|
||||
FROM current_state_events AS c
|
||||
INNER JOIN events AS e USING (room_id, event_id)
|
||||
WHERE
|
||||
c.type = 'm.room.member'
|
||||
AND state_key = ?
|
||||
AND c.membership IN (%s)
|
||||
""" % (
|
||||
",".join("?" * len(membership_list))
|
||||
)
|
||||
else:
|
||||
sql = """
|
||||
SELECT room_id, e.sender, m.membership, event_id, e.stream_ordering
|
||||
FROM current_state_events AS c
|
||||
INNER JOIN room_memberships AS m USING (room_id, event_id)
|
||||
INNER JOIN events AS e USING (room_id, event_id)
|
||||
WHERE
|
||||
c.type = 'm.room.member'
|
||||
AND state_key = ?
|
||||
AND m.membership IN (%s)
|
||||
""" % (
|
||||
",".join("?" * len(membership_list))
|
||||
)
|
||||
|
||||
args = [user_id]
|
||||
args.extend(membership_list)
|
||||
|
||||
sql = (
|
||||
"SELECT m.room_id, m.sender, m.membership, m.event_id, e.stream_ordering"
|
||||
" FROM current_state_events as c"
|
||||
" INNER JOIN room_memberships as m"
|
||||
" ON m.event_id = c.event_id"
|
||||
" INNER JOIN events as e"
|
||||
" ON e.event_id = c.event_id"
|
||||
" AND m.room_id = c.room_id"
|
||||
" AND m.user_id = c.state_key"
|
||||
" WHERE c.type = 'm.room.member' AND %s"
|
||||
) % (where_clause,)
|
||||
|
||||
txn.execute(sql, args)
|
||||
txn.execute(sql, (user_id, *membership_list))
|
||||
results = [RoomsForUser(**r) for r in self.cursor_to_dict(txn)]
|
||||
|
||||
if do_invite:
|
||||
@@ -637,6 +665,44 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
count = yield self.runInteraction("did_forget_membership", f)
|
||||
return count == 0
|
||||
|
||||
@cached()
|
||||
def get_forgotten_rooms_for_user(self, user_id):
|
||||
"""Gets all rooms the user has forgotten.
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
|
||||
Returns:
|
||||
Deferred[set[str]]
|
||||
"""
|
||||
|
||||
def _get_forgotten_rooms_for_user_txn(txn):
|
||||
# This is a slightly convoluted query that first looks up all rooms
|
||||
# that the user has forgotten in the past, then rechecks that list
|
||||
# to see if any have subsequently been updated. This is done so that
|
||||
# we can use a partial index on `forgotten = 1` on the assumption
|
||||
# that few users will actually forget many rooms.
|
||||
#
|
||||
# Note that a room is considered "forgotten" if *all* membership
|
||||
# events for that user and room have the forgotten field set (as
|
||||
# when a user forgets a room we update all rows for that user and
|
||||
# room, not just the current one).
|
||||
sql = """
|
||||
SELECT room_id, (
|
||||
SELECT count(*) FROM room_memberships
|
||||
WHERE room_id = m.room_id AND user_id = m.user_id AND forgotten = 0
|
||||
) AS count
|
||||
FROM room_memberships AS m
|
||||
WHERE user_id = ? AND forgotten = 1
|
||||
GROUP BY room_id, user_id;
|
||||
"""
|
||||
txn.execute(sql, (user_id,))
|
||||
return set(row[0] for row in txn if row[1] == 0)
|
||||
|
||||
return self.runInteraction(
|
||||
"get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_rooms_user_has_been_in(self, user_id):
|
||||
"""Get all rooms that the user has ever been in.
|
||||
@@ -668,6 +734,13 @@ class RoomMemberStore(RoomMemberWorkerStore):
|
||||
_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME,
|
||||
self._background_current_state_membership,
|
||||
)
|
||||
self.register_background_index_update(
|
||||
"room_membership_forgotten_idx",
|
||||
index_name="room_memberships_user_room_forgotten",
|
||||
table="room_memberships",
|
||||
columns=["user_id", "room_id"],
|
||||
where_clause="forgotten = 1",
|
||||
)
|
||||
|
||||
def _store_room_members_txn(self, txn, events, backfilled):
|
||||
"""Store a room member in the database.
|
||||
@@ -769,6 +842,9 @@ class RoomMemberStore(RoomMemberWorkerStore):
|
||||
txn.execute(sql, (user_id, room_id))
|
||||
|
||||
self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id))
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_forgotten_rooms_for_user, (user_id,)
|
||||
)
|
||||
|
||||
return self.runInteraction("forget_membership", f)
|
||||
|
||||
@@ -859,7 +935,7 @@ class RoomMemberStore(RoomMemberWorkerStore):
|
||||
while processed < batch_size:
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT MIN(room_id) FROM rooms WHERE room_id > ?
|
||||
SELECT MIN(room_id) FROM current_state_events WHERE room_id > ?
|
||||
""",
|
||||
(last_processed_room,),
|
||||
)
|
||||
@@ -870,10 +946,10 @@ class RoomMemberStore(RoomMemberWorkerStore):
|
||||
next_room, = row
|
||||
|
||||
sql = """
|
||||
UPDATE current_state_events AS c
|
||||
UPDATE current_state_events
|
||||
SET membership = (
|
||||
SELECT membership FROM room_memberships
|
||||
WHERE event_id = c.event_id
|
||||
WHERE event_id = current_state_events.event_id
|
||||
)
|
||||
WHERE room_id = ?
|
||||
"""
|
||||
|
||||
@@ -20,6 +20,3 @@
|
||||
-- for membership events. (Will also be null for membership events until the
|
||||
-- background update job has finished).
|
||||
ALTER TABLE current_state_events ADD membership TEXT;
|
||||
|
||||
INSERT INTO background_updates (update_name, progress_json) VALUES
|
||||
('current_state_events_membership', '{}');
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
/* Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- We add membership to current state so that we don't need to join against
|
||||
-- room_memberships, which can be surprisingly costly (we do such queries
|
||||
-- very frequently).
|
||||
-- This will be null for non-membership events and the content.membership key
|
||||
-- for membership events. (Will also be null for membership events until the
|
||||
-- background update job has finished).
|
||||
|
||||
INSERT INTO background_updates (update_name, progress_json) VALUES
|
||||
('current_state_events_membership', '{}');
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Copyright 2019 The Matrix.org Foundation C.I.C
|
||||
/* Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -13,4 +13,6 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
ALTER TABLE device_inbox ADD context TEXT;
|
||||
-- Adds an index on room_memberships for fetching all forgotten rooms for a user
|
||||
INSERT INTO background_updates (update_name, progress_json) VALUES
|
||||
('room_membership_forgotten_idx', '{}');
|
||||
@@ -1,45 +0,0 @@
|
||||
#! python
|
||||
import argparse
|
||||
import os.path as path
|
||||
import sys
|
||||
|
||||
import synapse_topology.controller.server as server
|
||||
import synapse_topology.model as model
|
||||
|
||||
from twisted.internet import endpoints, reactor
|
||||
from twisted.web.server import Site
|
||||
|
||||
from twisted.logger import (
|
||||
eventsFromJSONLogFile,
|
||||
textFileLogObserver,
|
||||
globalLogPublisher,
|
||||
)
|
||||
|
||||
globalLogPublisher.addObserver(textFileLogObserver(sys.stdout))
|
||||
|
||||
parser = argparse.ArgumentParser(description="Synapse configuration util")
|
||||
parser.add_argument(
|
||||
"config_dir",
|
||||
metavar="CONFIG_DIR",
|
||||
type=str,
|
||||
help="Path the directory containing synapse's configuration files.",
|
||||
)
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not path.isdir(args.config_dir):
|
||||
print("'{}' is not a directory.".format(args.config_dir))
|
||||
exit(1)
|
||||
|
||||
|
||||
model.set_config_dir(args.config_dir)
|
||||
|
||||
|
||||
backend_endpoint = endpoints.serverFromString(
|
||||
reactor, "tcp6:port=8888:interface=localhost"
|
||||
)
|
||||
backend_endpoint.listen(Site(server.app.resource()))
|
||||
|
||||
|
||||
reactor.run()
|
||||
@@ -1 +0,0 @@
|
||||
ed25519 a_altJ iZSrNbHiO1acwiNW3j6kYheALALGXe5uQMzs5NEKP2A
|
||||
@@ -1,7 +0,0 @@
|
||||
from klein import Klein
|
||||
|
||||
app = Klein()
|
||||
|
||||
from . import server
|
||||
from . import error_handlers
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
from jsonschema import ValidationError
|
||||
from simplejson.errors import JSONDecodeError
|
||||
from synapse_topology.model.errors import (
|
||||
BasConfigInUseError,
|
||||
BaseConfigNotFoundError,
|
||||
ConfigNotFoundError,
|
||||
)
|
||||
|
||||
from . import app
|
||||
|
||||
|
||||
@app.handle_errors(ValidationError)
|
||||
def validation_error(request, failure):
|
||||
request.setResponseCode(400)
|
||||
print("Invalid post schema {}".format(failure.getErrorMessage()))
|
||||
return "Invalid post schema {}".format(failure.getErrorMessage())
|
||||
|
||||
|
||||
@app.handle_errors(JSONDecodeError)
|
||||
def json_decode_error(request, failure):
|
||||
request.setResponseCode(400)
|
||||
return "Invalid post json"
|
||||
|
||||
|
||||
@app.handle_errors(BaseConfigNotFoundError)
|
||||
def base_config_not_found(request, failure):
|
||||
request.setResponseCode(500)
|
||||
return "Config file not setup, please initialise it using the /servername endpoint"
|
||||
|
||||
|
||||
@app.handle_errors(ConfigNotFoundError)
|
||||
def config_not_found(request, failure):
|
||||
request.setResponseCode(404)
|
||||
return "The config does not exist"
|
||||
|
||||
|
||||
@app.handle_errors(BasConfigInUseError)
|
||||
def base_config_in_use(request, failure):
|
||||
request.setResponseCode(409)
|
||||
return "Sever name and keys already configured"
|
||||
|
||||
|
||||
@app.handle_errors(Exception)
|
||||
def handle_generic_error(request, failure):
|
||||
request.setResponseCode(500)
|
||||
return "Internal server error\n{}".format(failure)
|
||||
@@ -1,51 +0,0 @@
|
||||
SERVERNAME_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"server_name": {"type": "string", "minlength": 1},
|
||||
"report_stats": {"type": "boolean"},
|
||||
},
|
||||
"required": ["server_name", "report_stats"],
|
||||
}
|
||||
|
||||
BASE_CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"server_name": {"type": "string", "minlength": 1},
|
||||
"report_stats": {"type": "boolean"},
|
||||
"log_config": {"type": "string", "minlength": 1},
|
||||
"media_store_path": {"type": "string", "minlength": 1},
|
||||
"uploads_path": {"type": "string", "minlength": 1},
|
||||
"pid_file": {"type": "string", "minlength": 1},
|
||||
"listeners": {"type": "array"},
|
||||
"acme": {"type": "object"},
|
||||
"database": {"type": "object"},
|
||||
"tls_certificate_path": {"type": "string", "minlength": 1},
|
||||
"tls_private_key_path": {"type": "string", "minlength": 1},
|
||||
"server_config_in_use": {"type": "boolean"},
|
||||
},
|
||||
"required": ["server_name", "report_stats", "database"],
|
||||
}
|
||||
|
||||
CERT_PATHS_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cert_path": {"type": "string", "minlength": 1},
|
||||
"cert_key_path": {"type": "string", "minlength": 1},
|
||||
},
|
||||
"required": ["cert_path", "cert_key_path"],
|
||||
}
|
||||
|
||||
CERTS_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cert": {"type": "string", "minlength": 1},
|
||||
"cert_key": {"type": "string", "minlength": 1},
|
||||
},
|
||||
"required": ["cert", "cert_key"],
|
||||
}
|
||||
|
||||
PORTS_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {"ports": {"type": "array"}},
|
||||
"required": ["ports"],
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
from os.path import abspath, dirname, join
|
||||
|
||||
from canonicaljson import json
|
||||
from synapse_topology import model
|
||||
|
||||
from twisted.web.static import File
|
||||
|
||||
from .utils import port_checker
|
||||
|
||||
from . import error_handlers
|
||||
from .schemas import (
|
||||
BASE_CONFIG_SCHEMA,
|
||||
SERVERNAME_SCHEMA,
|
||||
CERT_PATHS_SCHEMA,
|
||||
CERTS_SCHEMA,
|
||||
PORTS_SCHEMA,
|
||||
)
|
||||
from .utils import validate_schema, log_body_if_fail
|
||||
|
||||
from . import app
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
@app.route("/topology_webui/", branch=True)
|
||||
def server_webui(request):
|
||||
client_path = abspath(join(dirname(abspath(__file__)), "../../view/webui"))
|
||||
print(client_path)
|
||||
return File(client_path)
|
||||
|
||||
|
||||
@app.route("/setup", methods=["GET"])
|
||||
def get_config_setup(request):
|
||||
return json.dumps(
|
||||
{
|
||||
model.constants.CONFIG_LOCK: model.config_in_use(),
|
||||
"config_dir": model.get_config_dir(),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@app.route("/servername", methods=["GET"])
|
||||
def get_server_name(request):
|
||||
return model.get_server_name()
|
||||
|
||||
|
||||
@app.route("/servername", methods=["POST"])
|
||||
@validate_schema(SERVERNAME_SCHEMA)
|
||||
def set_server_name(request, body):
|
||||
model.generate_base_config(**body)
|
||||
|
||||
|
||||
@app.route("/secretkey", methods=["GET"])
|
||||
def get_secret_key(request):
|
||||
return json.dumps({"secret_key": model.get_secret_key()})
|
||||
|
||||
|
||||
@app.route("/config", methods=["GET"])
|
||||
def get_config(request):
|
||||
return str(model.get_config())
|
||||
|
||||
|
||||
@app.route("/config", methods=["POST"])
|
||||
@validate_schema(BASE_CONFIG_SCHEMA)
|
||||
def set_config(request, body):
|
||||
model.set_config(body)
|
||||
|
||||
|
||||
with app.subroute("/config") as app:
|
||||
for config in model.constants.CONFIGS:
|
||||
|
||||
@app.route("/config/{}".format(config), methods=["GET"])
|
||||
def get_sub_config(request, sub_config):
|
||||
return model.get_config(sub_config=config)
|
||||
|
||||
@app.route("/config/{}".format(config), methods=["POST"])
|
||||
def set_sub_config(request, sub_config):
|
||||
model.set_config(json.loads(request.content.read()), sub_config=config)
|
||||
|
||||
|
||||
@app.route("/testcertpaths", methods=["POST"])
|
||||
@log_body_if_fail
|
||||
@validate_schema(CERT_PATHS_SCHEMA)
|
||||
def test_cert_paths(request, body):
|
||||
result = {}
|
||||
config_path = model.get_config_dir()
|
||||
for name, path in body.items():
|
||||
path = abspath(join(config_path, path))
|
||||
try:
|
||||
with open(path, "r"):
|
||||
result[name] = {"invalid": False, "absolute_path": path}
|
||||
except:
|
||||
result[name] = {"invalid": True}
|
||||
return json.dumps(result)
|
||||
|
||||
|
||||
@app.route("/certs", methods=["POST"])
|
||||
@validate_schema(CERTS_SCHEMA)
|
||||
def upload_certs(request, body):
|
||||
model.add_certs(**body)
|
||||
|
||||
|
||||
@app.route("/ports", methods=["POST"])
|
||||
@validate_schema(PORTS_SCHEMA)
|
||||
def check_ports(request, body):
|
||||
results = []
|
||||
for port in body["ports"]:
|
||||
results.append(port_checker(port))
|
||||
return json.dumps({"ports": results})
|
||||
|
||||
|
||||
@app.route("/iw", methods=["POST"])
|
||||
def start_synapse(request):
|
||||
print("Start")
|
||||
subprocess.Popen(["synctl", "start", model.get_config_dir() + "/homeserver.yaml"])
|
||||
sys.exit()
|
||||
|
||||
|
||||
@app.route("/favicon.ico")
|
||||
def noop(request):
|
||||
return
|
||||
@@ -1,48 +0,0 @@
|
||||
from functools import wraps
|
||||
|
||||
from canonicaljson import json
|
||||
from jsonschema import validate
|
||||
|
||||
from contextlib import closing
|
||||
import socket
|
||||
|
||||
|
||||
def validate_schema(schema):
|
||||
def _wrap_validate(func):
|
||||
@wraps(func)
|
||||
def _do_validate(request):
|
||||
body = json.loads(request.content.read())
|
||||
validate(instance=body, schema=schema)
|
||||
return func(request, body)
|
||||
|
||||
return _do_validate
|
||||
|
||||
return _wrap_validate
|
||||
|
||||
|
||||
def port_checker(port):
|
||||
if port < 0 or 65535 < port:
|
||||
return False
|
||||
|
||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
|
||||
try:
|
||||
sock.bind((socket.gethostname(), port))
|
||||
sock.listen()
|
||||
sock.close()
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def log_body_if_fail(func):
|
||||
@wraps(func)
|
||||
def _log_wrapper(request):
|
||||
try:
|
||||
return func(request)
|
||||
except Exception:
|
||||
body = json.loads(request.content.read())
|
||||
print(body)
|
||||
raise
|
||||
|
||||
return _log_wrapper
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
Backend
|
||||
=======
|
||||
::
|
||||
|
||||
Make sure you have synapse and klein installed in your pip env
|
||||
Windows Right click __init__.py and select run with python
|
||||
*nix: ./__init__.py
|
||||
|
||||
Frontend
|
||||
========
|
||||
Start the Backend and
|
||||
|
||||
.. code:: bash
|
||||
|
||||
cd view/webui
|
||||
yarn watch
|
||||
@@ -1,104 +0,0 @@
|
||||
import os.path as path
|
||||
|
||||
import yaml
|
||||
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
from .constants import (
|
||||
BASE_CONFIG,
|
||||
CONFIG_LOCK,
|
||||
CONFIG_LOCK_DATA,
|
||||
DATA_SUBDIR,
|
||||
SERVER_NAME,
|
||||
)
|
||||
from .errors import BasConfigInUseError, BaseConfigNotFoundError, ConfigNotFoundError
|
||||
|
||||
import subprocess
|
||||
|
||||
|
||||
def set_config_dir(conf_dir):
|
||||
global config_dir
|
||||
config_dir = path.abspath(conf_dir)
|
||||
|
||||
|
||||
def get_config(sub_config=BASE_CONFIG):
|
||||
if sub_config:
|
||||
conf_path = path.join(config_dir, sub_config)
|
||||
try:
|
||||
with open(conf_path, "r") as f:
|
||||
return yaml.safe_load(f)
|
||||
except FileNotFoundError:
|
||||
raise BaseConfigNotFoundError() if sub_config == BASE_CONFIG else ConfigNotFoundError(
|
||||
sub_config
|
||||
)
|
||||
|
||||
|
||||
def get_config_dir():
|
||||
return config_dir
|
||||
|
||||
|
||||
def set_config(config, sub_config=BASE_CONFIG):
|
||||
if sub_config == BASE_CONFIG and config_in_use():
|
||||
raise BasConfigInUseError()
|
||||
with open(path.join(config_dir, sub_config), "w") as f:
|
||||
f.write(yaml.dump(config))
|
||||
|
||||
|
||||
def config_in_use():
|
||||
"""
|
||||
Checks if we set whether the config is in use. If it was set up by the system
|
||||
but synapse wasn't launched yet we will have set this to False. However if
|
||||
it's not present we assume someone else has set up synapse before so we assume
|
||||
the config is in use.
|
||||
"""
|
||||
try:
|
||||
return get_config().get(CONFIG_LOCK, True)
|
||||
except FileNotFoundError:
|
||||
return False
|
||||
|
||||
|
||||
def generate_base_config(server_name, report_stats):
|
||||
if config_in_use():
|
||||
raise BasConfigInUseError()
|
||||
|
||||
print(config_dir)
|
||||
conf = HomeServerConfig().generate_config(
|
||||
config_dir,
|
||||
path.join(config_dir, DATA_SUBDIR),
|
||||
server_name,
|
||||
generate_secrets=True,
|
||||
report_stats=report_stats,
|
||||
)
|
||||
|
||||
with open(path.join(config_dir, BASE_CONFIG), "w") as f:
|
||||
f.write(conf)
|
||||
f.write(CONFIG_LOCK_DATA)
|
||||
|
||||
|
||||
def get_server_name():
|
||||
config = get_config()
|
||||
if config:
|
||||
return config.get(SERVER_NAME)
|
||||
|
||||
|
||||
def get_secret_key():
|
||||
config = get_config()
|
||||
server_name = config.get(SERVER_NAME)
|
||||
signing_key_path = path.join(config_dir, server_name + ".signing.key")
|
||||
subprocess.run(["generate_signing_key.py", "-o", signing_key_path])
|
||||
with open(signing_key_path, "r") as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
def verify_yaml():
|
||||
pass
|
||||
|
||||
|
||||
def add_certs(cert, cert_key):
|
||||
with open(
|
||||
path.join(config_dir, get_server_name() + ".tls.crt"), "w"
|
||||
) as cert_file, open(
|
||||
path.join(config_dir, get_server_name() + ".tls.key"), "w"
|
||||
) as key_file:
|
||||
cert_file.write(cert)
|
||||
key_file.write(cert_key)
|
||||
@@ -1,25 +0,0 @@
|
||||
# Paths
|
||||
BASE_CONFIG = "homeserver.yaml"
|
||||
# TODO: fill in further configs
|
||||
CONFIGS = [BASE_CONFIG, "user.yaml", "optimizations.yaml", "something.yaml"]
|
||||
DATA_SUBDIR = "data"
|
||||
|
||||
# Config options
|
||||
SERVER_NAME = "server_name"
|
||||
CONFIG_LOCK = "server_config_in_use"
|
||||
SECRET_KEY = "macaroon_secret_key"
|
||||
|
||||
CONFIG_LOCK_DATA = """
|
||||
|
||||
## CONFIG LOCK ##
|
||||
|
||||
|
||||
# Specifies whether synapse has been started with this config.
|
||||
# If set to True the setup util will not go through the initialization
|
||||
# phase which sets the server name and server keys.
|
||||
{}: False
|
||||
|
||||
|
||||
""".format(
|
||||
CONFIG_LOCK
|
||||
)
|
||||
@@ -1,18 +0,0 @@
|
||||
from .constants import BASE_CONFIG
|
||||
|
||||
|
||||
class ConfigNotFoundError(FileNotFoundError):
|
||||
def __init__(self, config_name):
|
||||
self.config_name = config_name
|
||||
|
||||
def get_config_name(self):
|
||||
return self.config_name
|
||||
|
||||
|
||||
class BaseConfigNotFoundError(ConfigNotFoundError):
|
||||
def __init__(self):
|
||||
super().__init__(BASE_CONFIG)
|
||||
|
||||
|
||||
class BasConfigInUseError(Exception):
|
||||
pass
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"presets": [
|
||||
"@babel/preset-env",
|
||||
"@babel/preset-react",
|
||||
],
|
||||
"plugins": [
|
||||
"@babel/plugin-proposal-object-rest-spread"
|
||||
],
|
||||
}
|
||||
2
synapse_topology/view/webui/.gitignore
vendored
2
synapse_topology/view/webui/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
node_modules
|
||||
dist
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,46 +0,0 @@
|
||||
Digitized data copyright (c) 2010 Google Corporation
|
||||
with Reserved Font Arimo, Tinos and Cousine.
|
||||
Copyright (c) 2012 Red Hat, Inc.
|
||||
with Reserved Font Name Liberation.
|
||||
|
||||
This Font Software is licensed under the SIL Open Font License, Version 1.1.
|
||||
This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL
|
||||
|
||||
-----------------------------------------------------------
|
||||
SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
|
||||
-----------------------------------------------------------
|
||||
|
||||
PREAMBLE
|
||||
The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others.
|
||||
|
||||
The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives.
|
||||
|
||||
DEFINITIONS
|
||||
"Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation.
|
||||
|
||||
"Reserved Font Name" refers to any names specified as such after the copyright statement(s).
|
||||
|
||||
"Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s).
|
||||
|
||||
"Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment.
|
||||
|
||||
"Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software.
|
||||
|
||||
PERMISSION & CONDITIONS
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions:
|
||||
|
||||
1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself.
|
||||
|
||||
2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user.
|
||||
|
||||
3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users.
|
||||
|
||||
4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission.
|
||||
|
||||
5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software.
|
||||
|
||||
TERMINATION
|
||||
This license becomes null and void if any of the above conditions are not met.
|
||||
|
||||
DISCLAIMER
|
||||
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
|
||||
@@ -1,5 +0,0 @@
|
||||
<svg width="75" height="32" xmlns="http://www.w3.org/2000/svg">
|
||||
<g fill="#2D2D2D" fill-rule="nonzero">
|
||||
<path d="M.936.732V31.25H3.13v.732H.095V0h3.034v.732zM9.386 10.407v1.544h.044a4.461 4.461 0 0 1 1.487-1.368c.58-.323 1.245-.485 1.993-.485.72 0 1.377.14 1.972.42.595.279 1.047.771 1.355 1.477.338-.5.796-.941 1.377-1.323.58-.383 1.266-.574 2.06-.574.602 0 1.16.074 1.674.22.514.148.954.383 1.322.707.366.323.653.746.859 1.268.205.522.308 1.15.308 1.887v7.633H20.71v-6.464c0-.383-.015-.743-.044-1.082a2.305 2.305 0 0 0-.242-.882 1.473 1.473 0 0 0-.584-.596c-.257-.146-.606-.22-1.047-.22-.44 0-.796.085-1.068.253-.272.17-.485.39-.639.662a2.654 2.654 0 0 0-.308.927 7.074 7.074 0 0 0-.078 1.048v6.354h-3.128v-6.398c0-.338-.007-.673-.021-1.004a2.825 2.825 0 0 0-.188-.916 1.411 1.411 0 0 0-.55-.673c-.258-.168-.636-.253-1.135-.253a2.33 2.33 0 0 0-.584.1 1.94 1.94 0 0 0-.705.374c-.228.184-.422.449-.584.794-.161.346-.242.798-.242 1.357v6.619H6.434V10.407h2.952zM25.842 12.084a3.751 3.751 0 0 1 1.233-1.17 5.37 5.37 0 0 1 1.685-.629 9.579 9.579 0 0 1 1.884-.187c.573 0 1.153.04 1.74.121.588.081 1.124.24 1.609.475.484.235.88.562 1.19.981.308.42.462.975.462 1.666v5.934c0 .516.03 1.008.088 1.478.058.471.161.824.308 1.06H32.87a4.435 4.435 0 0 1-.22-1.104c-.5.515-1.087.876-1.762 1.081a7.084 7.084 0 0 1-2.071.31c-.544 0-1.05-.067-1.52-.2a3.472 3.472 0 0 1-1.234-.617 2.87 2.87 0 0 1-.826-1.059c-.199-.426-.298-.934-.298-1.522 0-.647.114-1.18.342-1.6.227-.419.52-.753.881-1.004.36-.25.771-.437 1.234-.562.462-.125.929-.224 1.399-.298.47-.073.932-.132 1.387-.176.456-.044.86-.11 1.212-.199.353-.088.631-.217.837-.386.206-.169.301-.415.287-.74 0-.337-.055-.606-.166-.804a1.217 1.217 0 0 0-.44-.464 1.737 1.737 0 0 0-.639-.22 5.292 5.292 0 0 0-.782-.055c-.617 0-1.101.132-1.454.397-.352.264-.558.706-.617 1.323h-3.128c.044-.735.227-1.345.55-1.83zm6.179 4.423a5.095 5.095 0 0 1-.639.165 9.68 9.68 0 0 1-.716.11c-.25.03-.5.067-.749.11a5.616 5.616 0 0 0-.694.177 2.057 2.057 0 0 0-.594.298c-.17.125-.305.284-.408.474-.103.192-.154.434-.154.728 0 .28.051.515.154.706.103.192.242.342.419.453.176.11.381.187.617.231.234.044.477.066.726.066.617 0 1.094-.102 1.432-.309.338-.205.587-.452.75-.739.16-.286.26-.576.297-.87.036-.295.055-.53.055-.707v-1.17a1.4 1.4 0 0 1-.496.277zM43.884 10.407v2.096h-2.291v5.647c0 .53.088.883.264 1.059.176.177.529.265 1.057.265.177 0 .345-.007.507-.022.161-.015.316-.037.463-.066v2.426a7.49 7.49 0 0 1-.882.089 21.67 21.67 0 0 1-.947.022c-.484 0-.944-.034-1.377-.1a3.233 3.233 0 0 1-1.145-.386 2.04 2.04 0 0 1-.782-.816c-.191-.353-.287-.816-.287-1.39v-6.728H36.57v-2.096h1.894v-3.42h3.129v3.42h2.29zM48.355 10.407v2.118h.044a3.907 3.907 0 0 1 1.454-1.754 4.213 4.213 0 0 1 1.036-.497 3.734 3.734 0 0 1 1.145-.176c.206 0 .433.037.683.11v2.912a5.862 5.862 0 0 0-.528-.077 5.566 5.566 0 0 0-.595-.033c-.573 0-1.058.096-1.454.287a2.52 2.52 0 0 0-.958.783 3.143 3.143 0 0 0-.518 1.158 6.32 6.32 0 0 0-.154 1.434v5.14h-3.128V10.407h2.973zM54.039 8.642V6.06h3.128v2.582H54.04zm3.128 1.765v11.405H54.04V10.407h3.128zM58.797 10.407h3.569l2.005 2.978 1.982-2.978h3.459l-3.745 5.339 4.208 6.067h-3.57l-2.378-3.596-2.38 3.596h-3.502l4.097-6.001zM74.094 31.25V.732H71.9V0h3.035v31.982H71.9v-.732z"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 3.1 KiB |
@@ -1,13 +0,0 @@
|
||||
<html>
|
||||
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Topology - The synapse configuration tool</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="content" />
|
||||
<script src="dist/bundle.js" type="text/javascript"></script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@@ -1,24 +0,0 @@
|
||||
export const DELEGATION_TYPES = {
|
||||
LOCAL: "local",
|
||||
WELL_KNOWN: "well_known",
|
||||
DNS: "dns",
|
||||
}
|
||||
|
||||
export const REVERSE_PROXY_TYPES = {
|
||||
CADDY: "CADDY",
|
||||
APACHE: "APACHE",
|
||||
HAPROXY: "HAPROXY",
|
||||
NGINX: "NGINX",
|
||||
OTHER: "OTHER",
|
||||
}
|
||||
|
||||
export const TLS_TYPES = {
|
||||
ACME: "ACME",
|
||||
TLS: "TLS",
|
||||
REVERSE_PROXY: "REVERSE_PROXY",
|
||||
}
|
||||
|
||||
export const DATABASE_TYPES = {
|
||||
SQLITE3: "sqlite3",
|
||||
POSTGRES: "psycopg2",
|
||||
}
|
||||
@@ -1,255 +0,0 @@
|
||||
import {
|
||||
ADVANCE_UI,
|
||||
BACK_UI,
|
||||
SET_SERVERNAME,
|
||||
SET_STATS,
|
||||
BASE_CONFIG_CHECKED,
|
||||
FAIL,
|
||||
SET_SECRET_KEY,
|
||||
GETTING_SECRET_KEY,
|
||||
SET_DELEGATION,
|
||||
SET_DELEGATION_SERVERNAME,
|
||||
SET_DELEGATION_PORTS,
|
||||
SET_REVERSE_PROXY,
|
||||
SET_TLS,
|
||||
TESTING_TLS_CERT_PATHS,
|
||||
SET_TLS_CERT_PATHS,
|
||||
SET_TLS_CERT_PATHS_VALIDITY,
|
||||
SET_TLS_CERT_FILES,
|
||||
UPLOADING_TLS_CERT_PATHS,
|
||||
TESTING_SYNAPSE_PORTS,
|
||||
SET_SYNAPSE_PORTS,
|
||||
SET_SYNAPSE_PORTS_FREE,
|
||||
SET_DATABASE,
|
||||
SET_CONFIG_DIR,
|
||||
WRITE_CONFIG,
|
||||
} from './types';
|
||||
|
||||
import {
|
||||
get_server_setup,
|
||||
post_server_name,
|
||||
get_secretkey,
|
||||
post_cert_paths,
|
||||
post_certs,
|
||||
test_ports,
|
||||
post_config,
|
||||
start_synapse,
|
||||
} from '../api';
|
||||
import { CONFIG_LOCK, CONFIG_DIR } from '../api/constants';
|
||||
import { base_config_to_synapse_config } from '../utils/yaml';
|
||||
|
||||
export const startup = () => {
|
||||
return dispatch => {
|
||||
get_server_setup().then(
|
||||
result => {
|
||||
dispatch(start(result[CONFIG_LOCK]));
|
||||
dispatch(set_config_dir(result[CONFIG_DIR]));
|
||||
},
|
||||
error => dispatch(fail(error)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const set_config_dir = dir => ({
|
||||
type: SET_CONFIG_DIR,
|
||||
config_dir: dir,
|
||||
});
|
||||
|
||||
export const generate_secret_keys = consent => {
|
||||
return (dispatch, getState) => {
|
||||
dispatch(getting_secret_keys());
|
||||
post_server_name(getState().base_config.servername, consent)
|
||||
.then(
|
||||
result => dispatch(get_secret_key()),
|
||||
error => dispatch(fail(error))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
export const set_tls_cert_paths = (cert_path, cert_key_path) => {
|
||||
return dispatch => {
|
||||
dispatch(testing_tls_cert_paths(true));
|
||||
post_cert_paths(cert_path, cert_key_path)
|
||||
.then(
|
||||
result => dispatch(check_tls_cert_path_validity(result)),
|
||||
error => dispatch(fail(error))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const set_tls_certs = (cert_path, cert_key_path) => ({
|
||||
type: SET_TLS_CERT_PATHS,
|
||||
cert_path,
|
||||
cert_key_path,
|
||||
})
|
||||
|
||||
const testing_tls_cert_paths = testing => ({
|
||||
type: TESTING_TLS_CERT_PATHS,
|
||||
testing,
|
||||
});
|
||||
|
||||
const check_tls_cert_path_validity = (args) => {
|
||||
const { cert_path, cert_key_path } = args
|
||||
console.log("!!!!!!!!!!")
|
||||
console.log(args)
|
||||
console.log("!!!!!!!!!!")
|
||||
return dispatch => {
|
||||
dispatch(testing_tls_cert_paths(false));
|
||||
dispatch(set_tls_certs(cert_path.absolute_path, cert_key_path.absolute_path))
|
||||
dispatch(set_cert_path_validity({ cert_path, cert_key_path }));
|
||||
if (!cert_path.invalid && !cert_key_path.invalid) {
|
||||
dispatch(advance_ui());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const upload_tls_cert_files = (tls_cert_file, tls_cert_key_file) =>
|
||||
dispatch => {
|
||||
dispatch(set_tls_cert_files(tls_cert_file, tls_cert_key_file));
|
||||
dispatch(uploading_tls_cert_files(true));
|
||||
post_certs(tls_cert_file, tls_cert_key_file)
|
||||
.then(
|
||||
result => {
|
||||
dispatch(uploading_tls_cert_files(false));
|
||||
dispatch(advance_ui())
|
||||
},
|
||||
error => dispatch(fail(error)),
|
||||
)
|
||||
}
|
||||
|
||||
const uploading_tls_cert_files = uploading => ({
|
||||
type: UPLOADING_TLS_CERT_PATHS,
|
||||
uploading
|
||||
})
|
||||
|
||||
export const set_tls_cert_files = (tls_cert_file, tls_cert_key_file) => ({
|
||||
type: SET_TLS_CERT_FILES,
|
||||
tls_cert_file,
|
||||
tls_cert_key_file,
|
||||
})
|
||||
const set_cert_path_validity = ({ cert_path, cert_key_path }) => ({
|
||||
type: SET_TLS_CERT_PATHS_VALIDITY,
|
||||
cert_path_invalid: cert_path.invalid,
|
||||
cert_key_path_invalid: cert_key_path.invalid,
|
||||
});
|
||||
|
||||
export const getting_secret_keys = () => ({
|
||||
type: GETTING_SECRET_KEY,
|
||||
});
|
||||
|
||||
export const get_secret_key = () => {
|
||||
return dispatch => {
|
||||
get_secretkey().then(
|
||||
result => dispatch(set_secret_key(result)),
|
||||
error => dispatch(fail(error)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
export const set_secret_key = key => ({
|
||||
type: SET_SECRET_KEY,
|
||||
key,
|
||||
});
|
||||
|
||||
export const start = server_setup => ({
|
||||
type: BASE_CONFIG_CHECKED,
|
||||
base_config_done: server_setup,
|
||||
});
|
||||
|
||||
export const fail = reason => ({
|
||||
type: FAIL,
|
||||
reason,
|
||||
});
|
||||
|
||||
export const advance_ui = option => ({
|
||||
type: ADVANCE_UI,
|
||||
option,
|
||||
});
|
||||
|
||||
export const set_servername = servername => ({
|
||||
type: SET_SERVERNAME,
|
||||
servername,
|
||||
});
|
||||
|
||||
export const set_stats = consent => ({
|
||||
type: SET_STATS,
|
||||
consent,
|
||||
});
|
||||
|
||||
export const set_delegation = delegation_type => ({
|
||||
type: SET_DELEGATION,
|
||||
delegation_type,
|
||||
});
|
||||
|
||||
export const set_delegation_servername = servername => ({
|
||||
type: SET_DELEGATION_SERVERNAME,
|
||||
servername,
|
||||
});
|
||||
|
||||
export const set_delegation_ports = (federation_port, client_port) => ({
|
||||
type: SET_DELEGATION_PORTS,
|
||||
federation_port,
|
||||
client_port,
|
||||
});
|
||||
|
||||
export const set_reverse_proxy = proxy_type => ({
|
||||
type: SET_REVERSE_PROXY,
|
||||
proxy_type,
|
||||
});
|
||||
|
||||
export const set_tls = tls_type => ({
|
||||
type: SET_TLS,
|
||||
tls_type,
|
||||
});
|
||||
|
||||
export const set_synapse_ports = (federation_port, client_port) => {
|
||||
const fed_port_priv = federation_port < 1024;
|
||||
const client_port_priv = client_port < 1024;
|
||||
return dispatch => {
|
||||
dispatch(testing_synapse_ports(true));
|
||||
dispatch({
|
||||
type: SET_SYNAPSE_PORTS,
|
||||
federation_port,
|
||||
client_port,
|
||||
})
|
||||
test_ports([federation_port, client_port])
|
||||
.then(
|
||||
results => dispatch(update_ports_free(
|
||||
fed_port_priv ? true : results.ports[0],
|
||||
client_port_priv ? true : results.ports[1]
|
||||
)),
|
||||
error => dispatch(fail(error)),
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
export const update_ports_free = (synapse_federation_port_free, synapse_client_port_free) => {
|
||||
return dispatch => {
|
||||
dispatch(testing_synapse_ports(false));
|
||||
dispatch({
|
||||
type: SET_SYNAPSE_PORTS_FREE,
|
||||
synapse_federation_port_free,
|
||||
synapse_client_port_free,
|
||||
});
|
||||
if (synapse_federation_port_free && synapse_client_port_free) {
|
||||
dispatch(advance_ui())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const testing_synapse_ports = verifying => ({
|
||||
type: TESTING_SYNAPSE_PORTS,
|
||||
verifying,
|
||||
})
|
||||
|
||||
export const set_database = database => ({
|
||||
type: SET_DATABASE,
|
||||
database,
|
||||
})
|
||||
|
||||
export const write_config = (config, sub_config_name) => {
|
||||
return (dispatch, getState) => {
|
||||
post_config(base_config_to_synapse_config(getState().base_config), sub_config_name)
|
||||
.then(res => start_synapse(), error => dispatch(fail(error)))
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
export const ADVANCE_UI = 'ADVANCE_UI';
|
||||
export const BACK_UI = 'BACK_UI';
|
||||
export const SET_SERVERNAME = 'SET_SERVERNAME';
|
||||
export const SET_STATS = 'SET_STATS';
|
||||
export const BASE_CONFIG_CHECKED = 'BASE_CONFIG_CHECKED';
|
||||
export const FAIL = 'NETWORK_FAIL';
|
||||
export const SET_SECRET_KEY = 'SET_SECRET_KEY';
|
||||
export const GETTING_SECRET_KEY = 'GETTING_SECRET_KEY';
|
||||
export const SET_DELEGATION = 'SET_DELEGATION';
|
||||
export const SET_DELEGATION_SERVERNAME = 'SET_DELEGATION_SERVERNAME';
|
||||
export const SET_DELEGATION_PORTS = 'SET_DELEGATION_PORTS';
|
||||
export const SET_REVERSE_PROXY = 'SET_REVERSE_PROXY';
|
||||
export const TESTING_TLS_CERT_PATHS = 'TESTING_TLS_CERT_PATHS';
|
||||
export const UPLOADING_TLS_CERT_PATHS = 'UPLOADING_TLS_CERT_PATHS';
|
||||
export const SET_TLS = 'SET_TLS';
|
||||
export const SET_TLS_CERT_PATHS = 'SET_TLS_CERT_PATHS';
|
||||
export const SET_TLS_CERT_PATHS_VALIDITY = 'SET_TLS_CERT_PATHS_VALIDITY';
|
||||
export const SET_TLS_CERT_FILES = 'SET_TLS_CERT_FILES';
|
||||
export const TESTING_SYNAPSE_PORTS = 'TESTING_SYNAPSE_PORTS';
|
||||
export const SET_SYNAPSE_PORTS = 'SET_SYNAPSE_PORTS';
|
||||
export const SET_SYNAPSE_PORTS_FREE = 'SET_SYNAPSE_PORTS_IN_USE';
|
||||
export const SET_DATABASE = 'SET_DATABASE';
|
||||
export const SET_CONFIG_DIR = 'SET_CONFIG_DIR';
|
||||
export const WRITE_CONFIG = 'WRITE_CONFIG';
|
||||
@@ -1,11 +0,0 @@
|
||||
export const API_URL = "http://localhost:8888/";
|
||||
export const SERVER_NAME = "/servername";
|
||||
export const SECRET_KEY = "/secretkey";
|
||||
export const CONFIG = "/config";
|
||||
export const CONFIG_SOMETHING = "/config_something";
|
||||
export const SETUP_CHECK = "/setup";
|
||||
export const CERT_PATHS = "/testcertpaths";
|
||||
export const TEST_PORTS = "/ports";
|
||||
export const CONFIG_LOCK = "server_config_in_use";
|
||||
export const CONFIG_DIR = "config_dir";
|
||||
export const START = "/start";
|
||||
@@ -1,89 +0,0 @@
|
||||
import fetchAbsolute from 'fetch-absolute';
|
||||
import {
|
||||
API_URL,
|
||||
CONFIG,
|
||||
SECRET_KEY,
|
||||
SERVER_NAME,
|
||||
SETUP_CHECK,
|
||||
CERT_PATHS,
|
||||
TEST_PORTS,
|
||||
START,
|
||||
} from './constants';
|
||||
|
||||
const fetchAbs = fetchAbsolute(fetch)(API_URL)
|
||||
|
||||
export const get_server_name = () =>
|
||||
fetchAbs(SERVER_NAME)
|
||||
.then(res => res.json())
|
||||
|
||||
export const post_server_name = (servername, consent) =>
|
||||
fetchAbs(
|
||||
SERVER_NAME,
|
||||
{
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
"server_name": servername,
|
||||
"report_stats": consent
|
||||
})
|
||||
}
|
||||
)
|
||||
|
||||
export const post_cert_paths = (cert_path, cert_key_path) =>
|
||||
fetchAbs(
|
||||
CERT_PATHS,
|
||||
{
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
cert_path,
|
||||
cert_key_path,
|
||||
})
|
||||
}
|
||||
).then(res => res.json())
|
||||
|
||||
export const post_certs = (cert, cert_key) =>
|
||||
fetchAbs(
|
||||
CERT_PATHS,
|
||||
{
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
cert,
|
||||
cert_key,
|
||||
})
|
||||
}
|
||||
)
|
||||
|
||||
export const test_ports = (ports) =>
|
||||
fetchAbs(
|
||||
TEST_PORTS,
|
||||
{
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
ports
|
||||
})
|
||||
}
|
||||
).then(res => res.json())
|
||||
|
||||
export const get_secretkey = () =>
|
||||
fetchAbs(SECRET_KEY)
|
||||
.then(res => res.json())
|
||||
.then(json => json.secret_key)
|
||||
|
||||
export const get_config = () => {
|
||||
|
||||
};
|
||||
|
||||
export const post_config = (config, sub_config_name) =>
|
||||
fetchAbs(
|
||||
sub_config_name ? CONFIG + "/" + sub_config_name : CONFIG,
|
||||
{
|
||||
method: 'POST',
|
||||
body: JSON.stringify(config),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
// Checks if the server's base config has been setup.
|
||||
export const get_server_setup = () => fetchAbs(SETUP_CHECK)
|
||||
.then(res => res.json())
|
||||
|
||||
export const start_synapse = () => fetchAbs(START)
|
||||
@@ -1,12 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
import ButtonDisplay from './ButtonDisplay';
|
||||
|
||||
export default ({ onClick }) =>
|
||||
<ContentWrapper>
|
||||
<h1>Synapse Topology</h1>
|
||||
<p>Let's get started.</p>
|
||||
<ButtonDisplay><button onClick={onClick}>SETUP</button></ButtonDisplay>
|
||||
</ContentWrapper>
|
||||
@@ -1,5 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
|
||||
export default ({ children }) => <div className={style.buttonDisplay}>{children}</div>
|
||||
@@ -1,14 +0,0 @@
|
||||
import React, { useState } from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
|
||||
export default () => {
|
||||
return <ContentWrapper>
|
||||
<h1>Config selection</h1>
|
||||
<p>The base config has already been setup. Please select a config to edit:</p>
|
||||
<p>TODO: .. well .. this.</p>
|
||||
<div>
|
||||
<button />
|
||||
</div>
|
||||
</ContentWrapper>;
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
|
||||
export default ({ servername, children }) => {
|
||||
if (servername) {
|
||||
return <div>
|
||||
<p className={style.servername}>{servername}</p>
|
||||
<div className={style.contentWrapper}>
|
||||
{children}
|
||||
</div>
|
||||
</div>
|
||||
} else {
|
||||
return <div className={style.contentWrapper}>{children}</div>
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
import React, { useState } from 'react';
|
||||
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
import {
|
||||
DATABASE_TYPES
|
||||
} from '../actions/constants'
|
||||
import ButtonDisplay from './ButtonDisplay';
|
||||
|
||||
export default ({
|
||||
onClick,
|
||||
}) => {
|
||||
const defaultDatabase = DATABASE_TYPES.POSTGRES;
|
||||
const [database, setDatabase] = useState(defaultDatabase)
|
||||
return <ContentWrapper>
|
||||
<h1>Database</h1>
|
||||
<p>Synapse can use either SQLite3 or Postgres as it's databse.</p>
|
||||
<p>If you don't have one of those two installed Postgres is the recommended database to use.</p>
|
||||
|
||||
<select defaultValue={defaultDatabase} onChange={event => setDatabase(event.target.value)}>
|
||||
<option value={DATABASE_TYPES.POSTGRES}>PostgreSQL</option>
|
||||
<option value={DATABASE_TYPES.SQLITE3}>SQLite3</option>
|
||||
</select>
|
||||
<ButtonDisplay>
|
||||
<button onClick={() => onClick(database)}>Continue</button>
|
||||
</ButtonDisplay>
|
||||
</ContentWrapper>
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
import ButtonDisplay from './ButtonDisplay';
|
||||
|
||||
export default ({ servername, clickLocal, clickWellKnown, clickDNS }) => {
|
||||
const local_button_text = `This server is ${servername}`;
|
||||
return <ContentWrapper>
|
||||
<h1>Delegation</h1>
|
||||
<p>Other federation servers will connect to {servername}:8448 over the network.</p>
|
||||
<p>
|
||||
If you'd like the synapse install to be hosted on a different server
|
||||
to the one known on the network by '{servername}' you can use delegation.
|
||||
</p>
|
||||
<p>
|
||||
Otherwise click '{local_button_text}'.
|
||||
</p>
|
||||
<p>There are two forms of delegation: </p>
|
||||
<h3>.well_known delegation</h3>
|
||||
<p>
|
||||
{servername} provides the url https://{servername}/.well-known/matrix/server
|
||||
which gives federating servers information about how to contact the actual server
|
||||
hosting the synapse install. (Don't worry! We'll print out the .well-known file for you later.)
|
||||
</p>
|
||||
<h3>DNS SRV delegation</h3>
|
||||
<p>
|
||||
You will need access to {servername}'s domain zone DNS records. This method
|
||||
also requires the synapse install's server to provide a valid TLS cert for {servername}
|
||||
</p>
|
||||
<p>
|
||||
You will need to add an SRV record to {servername}'s DNS zone. (Once again, we'll print
|
||||
the SRV record out for you later.)
|
||||
</p>
|
||||
|
||||
<h3>More info</h3>
|
||||
<p>
|
||||
Confused? I am too. Maybe <a href="https://github.com/matrix-org/synapse/blob/master/docs/federate.md" target="_blank">
|
||||
this can answer some of your questions.
|
||||
</a>
|
||||
</p>
|
||||
<ButtonDisplay>
|
||||
<button onClick={clickLocal}>{local_button_text}</button>
|
||||
<button onClick={clickWellKnown}>Use 'well known'</button>
|
||||
<button onClick={clickDNS}>Use DNS</button>
|
||||
</ButtonDisplay>
|
||||
</ContentWrapper>;
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
import React, { useState } from 'react';
|
||||
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
|
||||
export default ({ onClick }) => {
|
||||
const [fedPort, setFedPort] = useState("");
|
||||
const [clientPort, setClientPort] = useState("");
|
||||
const [clientPortValid, setClientPortValid] = useState(true)
|
||||
const [fedPortValid, setFedPortValid] = useState(true)
|
||||
|
||||
const updateValidity = (port, setValid) => setValid(
|
||||
!port ||
|
||||
(!isNaN(port) && 0 < port && port <= 65535)
|
||||
)
|
||||
|
||||
const onFederationChange = event => {
|
||||
const val = event.target.value;
|
||||
setFedPort(val);
|
||||
updateValidity(val, setFedPortValid);
|
||||
}
|
||||
|
||||
const onClientChange = event => {
|
||||
const val = event.target.value;
|
||||
setClientPort(val);
|
||||
updateValidity(val, setClientPortValid);
|
||||
}
|
||||
|
||||
return <ContentWrapper>
|
||||
<h1>Outward facing ports</h1>
|
||||
<p>
|
||||
Normally other matrix servers will try to contact the Synapse install's server on
|
||||
port 8448 and clients, such as riot, riotX, neo etc., will try to contact
|
||||
the install server on port 443.
|
||||
</p>
|
||||
<p>
|
||||
Delegation let's us tell those servers and clients to try a different port!
|
||||
(Flexible!)
|
||||
</p>
|
||||
<p>
|
||||
It's perfectly fine to leave the defaults. Only change them if you have a
|
||||
real need to.
|
||||
</p>
|
||||
<p>
|
||||
I would recommend using unprivileged ports but I would recommend the
|
||||
default ports more strongly.
|
||||
</p>
|
||||
<p>
|
||||
Please choose the port for other matrix servers to contact:
|
||||
</p>
|
||||
<input
|
||||
type="text"
|
||||
onChange={onFederationChange}
|
||||
className={fedPortValid ? undefined : style.invalidInput}
|
||||
autoFocus
|
||||
placeholder="Use Default 8448"
|
||||
></input>
|
||||
<p>
|
||||
Please choose the port for clients to contact:
|
||||
</p>
|
||||
<input
|
||||
type="text"
|
||||
onChange={onClientChange}
|
||||
className={clientPortValid ? undefined : style.invalidInput}
|
||||
autoFocus
|
||||
placeholder="Use Default 443"
|
||||
></input>
|
||||
<div>
|
||||
<button
|
||||
disabled={clientPortValid && fedPortValid ? undefined : true}
|
||||
onClick={() => onClick(fedPort, clientPort)}
|
||||
>Use These Ports</button>
|
||||
</div>
|
||||
</ContentWrapper>
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
import ButtonDisplay from './ButtonDisplay';
|
||||
import DownloadOrCopy from './DownloadOrCopy';
|
||||
import { DELEGATION_TYPES } from '../actions/constants';
|
||||
|
||||
export default ({
|
||||
delegationType,
|
||||
serverConfig,
|
||||
clientConfig,
|
||||
serverConfigFileName,
|
||||
clientConfigFileName,
|
||||
serverName,
|
||||
onClick
|
||||
}) => {
|
||||
if (delegationType == DELEGATION_TYPES.DNS) {
|
||||
|
||||
return <ContentWrapper>
|
||||
<h1>ConfigureDelegation</h1>
|
||||
<p>
|
||||
You will need to add the following SRV record to your DNS zone.
|
||||
</p>
|
||||
<pre>
|
||||
<code>
|
||||
{clientConfig}
|
||||
</code>
|
||||
</pre>
|
||||
<DownloadOrCopy content={clientConfig} fileName={clientConfigFileName} />
|
||||
<ButtonDisplay>
|
||||
<button onClick={onClick}>Continue</button>
|
||||
</ButtonDisplay>
|
||||
</ContentWrapper>
|
||||
|
||||
} else {
|
||||
|
||||
return <ContentWrapper>
|
||||
<h1>Configure delegation</h1>
|
||||
<p>
|
||||
The delegation configuration needs to take place outside the installer.
|
||||
</p>
|
||||
<p>
|
||||
You'll need to host the following at https://{serverName}/.well-known/matrix/server
|
||||
</p>
|
||||
<pre>
|
||||
<code>
|
||||
{serverConfig}
|
||||
</code>
|
||||
</pre>
|
||||
<DownloadOrCopy content={serverConfig} fileName={serverConfigFileName} />
|
||||
<p>
|
||||
You'll also need to host the following at https://{serverName}/.well-known/matrix/client
|
||||
</p>
|
||||
<pre>
|
||||
<code>
|
||||
{clientConfig}
|
||||
</code>
|
||||
</pre>
|
||||
<DownloadOrCopy content={clientConfig} fileName={clientConfigFileName} />
|
||||
<ButtonDisplay>
|
||||
<button onClick={onClick}>Continue</button>
|
||||
</ButtonDisplay>
|
||||
</ContentWrapper>;
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
import React, { useState } from 'react';
|
||||
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
export default ({ onClick }) => {
|
||||
const [servername, setServerName] = useState("");
|
||||
|
||||
const onChange = event => {
|
||||
setServerName(event.target.value);
|
||||
}
|
||||
|
||||
return <ContentWrapper>
|
||||
<h1>Synapse install's servername.</h1>
|
||||
<p>What is the Synapse Install's server called on the network?</p>
|
||||
<input type="text" onChange={onChange} autoFocus placeholder="host.server"></input>
|
||||
<div>
|
||||
<button disabled={servername ? undefined : true} onClick={() => onClick(servername)}>Continue</button>
|
||||
</div>
|
||||
</ContentWrapper>
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
import React from 'react';
|
||||
import ButtonDisplay from './ButtonDisplay';
|
||||
|
||||
const download = (filename, text) => {
|
||||
const e = document.createElement('a');
|
||||
e.setAttribute('href', 'data:text/plain;charset=utf-8,' + encodeURIComponent(text));
|
||||
e.setAttribute('download', filename);
|
||||
|
||||
e.style.display = 'none';
|
||||
document.body.appendChild(e);
|
||||
|
||||
e.click();
|
||||
|
||||
document.body.removeChild(e);
|
||||
}
|
||||
|
||||
export default ({ content, fileName }) =>
|
||||
<ButtonDisplay>
|
||||
<button onClick={() => download(fileName, content)}>Download</button>
|
||||
<button onClick={() => navigator.clipboard.writeText(content)}>Copy</button>
|
||||
</ButtonDisplay>
|
||||
@@ -1,11 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
export default () => {
|
||||
return <ContentWrapper>
|
||||
<h1>Damn!</h1>
|
||||
<p>Has the config server been started?</p>
|
||||
</ContentWrapper>;
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import ButtonDisplay from './ButtonDisplay';
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
import DownloadOrCopy from './DownloadOrCopy';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
|
||||
|
||||
export default ({ secret_key_loaded, secret_key, onClick }) => {
|
||||
if (!secret_key_loaded) {
|
||||
return <ContentWrapper>
|
||||
<h1>Generating secret key</h1>
|
||||
</ContentWrapper>;
|
||||
} else {
|
||||
return <ContentWrapper>
|
||||
<h1>Export keys</h1>
|
||||
<p>
|
||||
This is your server's secret key:
|
||||
</p>
|
||||
<p className={style.keyDisplay}>{secret_key}</p>
|
||||
<DownloadOrCopy content={secret_key} fileName="secret_key.txt" />
|
||||
<p>
|
||||
The server uses this to identify
|
||||
itself to other servers. You can use it to retain ownership of the server's
|
||||
name in the event that the server itself becomes irrevocably inaccessible.
|
||||
</p>
|
||||
<p>Keep it safe</p>
|
||||
<ButtonDisplay><button onClick={onClick}>Continue</button></ButtonDisplay>
|
||||
</ContentWrapper>;
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
export default () => {
|
||||
return <ContentWrapper>
|
||||
<h1>loading..</h1>
|
||||
</ContentWrapper>;
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
import React, { useState } from 'react';
|
||||
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
|
||||
export default ({
|
||||
servername,
|
||||
verifyingPorts,
|
||||
fedPortInUse,
|
||||
clientPortInUse,
|
||||
canChangePorts,
|
||||
defaultFedPort,
|
||||
defaultClientPort,
|
||||
onClick,
|
||||
}) => {
|
||||
if (verifyingPorts) {
|
||||
return <ContentWrapper><h1>Verifying ports.</h1></ContentWrapper>
|
||||
}
|
||||
|
||||
const [fedPort, setFedPort] = useState(defaultFedPort);
|
||||
const [clientPort, setClientPort] = useState(defaultClientPort);
|
||||
const [clientPortValid, setClientPortValid] = useState(true)
|
||||
const [fedPortValid, setFedPortValid] = useState(true)
|
||||
const [clientPortPriv, setClientPortPriv] = useState(defaultClientPort < 1024)
|
||||
const [fedPortPriv, setFedPortPriv] = useState(defaultFedPort < 1024)
|
||||
|
||||
const updateValidity = (port, setValid) => setValid(
|
||||
!isNaN(port) && 0 < port && port <= 65535
|
||||
)
|
||||
|
||||
const updatePriv = (port, setPriv) => setPriv(
|
||||
port < 1024
|
||||
)
|
||||
|
||||
const onFederationChange = event => {
|
||||
const val = event.target.value ? event.target.value : defaultFedPort;
|
||||
setFedPort(val);
|
||||
updatePriv(val, setFedPortPriv);
|
||||
updateValidity(val, setFedPortValid);
|
||||
}
|
||||
|
||||
const onClientChange = event => {
|
||||
const val = event.target.value ? event.target.value : defaultClientPort;
|
||||
setClientPort(val);
|
||||
updatePriv(val, setClientPortPriv);
|
||||
updateValidity(val, setClientPortValid);
|
||||
}
|
||||
|
||||
return <ContentWrapper>
|
||||
<h1>{servername}'s ports</h1>
|
||||
<p>
|
||||
The synapse install itself will be listening on the following ports.
|
||||
</p>
|
||||
{
|
||||
canChangePorts ?
|
||||
<p>
|
||||
Since you're using a reverse proxy you can change these to anything you
|
||||
like as long as synapse can bind to them. We recommend not using privileged
|
||||
ports within the range 0 to 1024.
|
||||
</p>
|
||||
:
|
||||
<p>
|
||||
Since you're not using a reverse proxy synapse will have to listen on
|
||||
these ports. If any of these ports are already in use (we'll test them when
|
||||
you click the button) go back and change the values you set for the ports
|
||||
there. Otherwise you're going to have to rethink your setup.
|
||||
</p>
|
||||
}
|
||||
|
||||
<p>
|
||||
We will check that the ports are not in use. If they are you can either
|
||||
reconfigure the server that synapse is installed on outside of this installer
|
||||
or you can change the ports as explained above.
|
||||
</p>
|
||||
<p>
|
||||
Note: we can't check whether privileged ports are in use. If you've
|
||||
set a privileged port <b>we will skip the check for that port</b>.
|
||||
</p>
|
||||
|
||||
<h3>Federation Port</h3>
|
||||
<input
|
||||
type="text"
|
||||
onChange={onFederationChange}
|
||||
disabled={canChangePorts ? undefined : true}
|
||||
autoFocus
|
||||
placeholder={defaultFedPort}
|
||||
></input>
|
||||
{fedPortPriv ? <p>This is a privileged port.</p> : undefined}
|
||||
|
||||
<h3>Client Port</h3>
|
||||
<input
|
||||
type="text"
|
||||
onChange={onClientChange}
|
||||
disabled={canChangePorts ? undefined : true}
|
||||
autoFocus
|
||||
placeholder={defaultClientPort}
|
||||
></input>
|
||||
{clientPortPriv ? <p>This is a privileged port.</p> : undefined}
|
||||
<div>
|
||||
<button
|
||||
disabled={clientPortValid && fedPortValid ? undefined : true}
|
||||
onClick={() => onClick(parseInt(fedPort), parseInt(clientPort))}
|
||||
>Verify These Ports</button>
|
||||
</div>
|
||||
</ContentWrapper>
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
import React, { useState } from 'react';
|
||||
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
import {
|
||||
REVERSE_PROXY_TYPES
|
||||
} from '../actions/constants'
|
||||
|
||||
|
||||
export default ({ onClick }) => {
|
||||
const defaultValue = REVERSE_PROXY_TYPES.NGINX;
|
||||
const [reverseProxy, setReverseProxy] = useState(defaultValue);
|
||||
|
||||
const onChange = event => {
|
||||
console.log("trigered")
|
||||
console.log(event.target)
|
||||
setReverseProxy(event.target.value);
|
||||
}
|
||||
|
||||
return <ContentWrapper>
|
||||
<h1>Reverse Proxy</h1>
|
||||
<p>
|
||||
Please choose the reverse proxy you're using. This is just so we can provide
|
||||
you with a template later, if you already know how you're going to set yours
|
||||
up don't worry too much about this.
|
||||
</p>
|
||||
<select defaultValue={defaultValue} onChange={onChange} >
|
||||
<option value={REVERSE_PROXY_TYPES.APACHE}>Apache</option>
|
||||
<option value={REVERSE_PROXY_TYPES.CADDY}>Caddy</option>
|
||||
<option value={REVERSE_PROXY_TYPES.HAPROXY}>HAProxy</option>
|
||||
<option value={REVERSE_PROXY_TYPES.NGINX}>NGiNX</option>
|
||||
<option value={REVERSE_PROXY_TYPES.OTHER}>Some other Reverse Proxy</option>
|
||||
</select>
|
||||
<div>
|
||||
<button onClick={() => onClick(reverseProxy)}>Safety First</button>
|
||||
</div>
|
||||
</ContentWrapper>
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
import ButtonDisplay from './ButtonDisplay';
|
||||
import DownloadOrCopy from './DownloadOrCopy';
|
||||
import { REVERSE_PROXY_TYPES } from '../actions/constants';
|
||||
|
||||
export default ({ proxyType, sampleConfig, fileName, onClick }) => {
|
||||
console.log("SFSFD")
|
||||
console.log(sampleConfig)
|
||||
console.log("SFSFD")
|
||||
return <ContentWrapper>
|
||||
<h1>Configure the ReverseProxy</h1>
|
||||
<p>
|
||||
It's time for you to setup the reverse proxy outside of this installer.
|
||||
</p>
|
||||
{
|
||||
proxyType == REVERSE_PROXY_TYPES.OTHER ?
|
||||
<p>
|
||||
Here's a sample config for Apache. Since you chose 'other' for your reverse proxy.
|
||||
You'll have to figure it out for yourself. We believe in you.
|
||||
</p>
|
||||
:
|
||||
<p>
|
||||
We can't do it for you
|
||||
but here's the sample configuration for your {proxyType} proxy.
|
||||
</p>
|
||||
}
|
||||
<pre>
|
||||
<code>
|
||||
{sampleConfig}
|
||||
</code>
|
||||
</pre>
|
||||
<DownloadOrCopy content={sampleConfig} fileName={fileName} />
|
||||
<ButtonDisplay>
|
||||
<button onClick={onClick}>Continue</button>
|
||||
</ButtonDisplay>
|
||||
</ContentWrapper>;
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
import React, { useState } from 'react';
|
||||
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
export default ({ onClick }) => {
|
||||
const [servername, setServerName] = useState("");
|
||||
|
||||
const onChange = event => {
|
||||
setServerName(event.target.value);
|
||||
}
|
||||
|
||||
return <ContentWrapper>
|
||||
<h1>Select a server name</h1>
|
||||
<p>It's important to choose a good name for your server because it cannot be changed later.</p>
|
||||
<p>
|
||||
The name forms a part of the user id's for the users on the server. Which will look like `@you:server.name`.
|
||||
The name will also be what other servers look up when they're trying to reach this one.
|
||||
</p>
|
||||
<p>
|
||||
Normally the server name is usually just your domain. For example <a target="_blank" href="https://matrix.org">matrix.org</a>'s server is
|
||||
known as `matrix.org`.
|
||||
</p>
|
||||
<input type="text" onChange={onChange} autoFocus placeholder="synapse.dev"></input>
|
||||
<div>
|
||||
<button disabled={servername ? undefined : true} onClick={() => onClick(servername)}>I like it</button>
|
||||
</div>
|
||||
</ContentWrapper>;
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
|
||||
import ButtonDisplay from './ButtonDisplay';
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
|
||||
export default ({ onClick }) =>
|
||||
<ContentWrapper>
|
||||
<h1>Anonymous Statistics</h1>
|
||||
<p>Would you like to report anonymouse statistics to matrix.org?</p>
|
||||
<ButtonDisplay>
|
||||
<button onClick={() => onClick(true)}>YES</button>
|
||||
<button onClick={() => onClick(false)} className={style.redButton}>NO</button>
|
||||
</ButtonDisplay>
|
||||
</ContentWrapper >
|
||||
@@ -1,77 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
|
||||
import ButtonDisplay from './ButtonDisplay';
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
const tlsLink = "https://en.wikipedia.org/wiki/Transport_Layer_Security";
|
||||
const apacheLink = "http://httpd.apache.org/";
|
||||
const caddyLink = "https://caddyserver.com/";
|
||||
const haproxyLink = "http://www.haproxy.org/";
|
||||
const nginxLink = "https://www.nginx.com/";
|
||||
const proxyInfoLink = "https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst";
|
||||
|
||||
export default ({ onClickACME, onClickTLS, onClickReverseProxy }) =>
|
||||
<ContentWrapper>
|
||||
<h1>TLS</h1>
|
||||
<p>
|
||||
I was going to make a <a target="_blank" href={tlsLink}>TLS</a> joke but it
|
||||
was making me insecure..
|
||||
</p>
|
||||
<p>
|
||||
TLS keeps the communication between homeservers secure. To enable TLS you'll
|
||||
need a TLS cert. You can use ACME, provide your own certs, or let the reverse
|
||||
proxy handle the TLS certs instead.
|
||||
</p>
|
||||
<h3>
|
||||
ReverseProxy
|
||||
</h3>
|
||||
<p>
|
||||
It is a good idea to use Synapse behind a reverse proxy such as <a target="_blank" href={apacheLink}>Apache</a>, <a target="_blank" href={caddyLink}>Caddy</a>, <a target="_blank" href={haproxyLink}>HAProxy</a>, or <a target="_blank" href={nginxLink}>NGiNX</a>.
|
||||
</p>
|
||||
<p>
|
||||
The main benefit to this is that the reverse proxy can listen on the privilaged port
|
||||
443 (which clients like riot expect to connect to) on behalf of synapse. The incoming traffic
|
||||
is then forwarded to Synapse on a non privilaged port.
|
||||
<br />
|
||||
You need root to listen on ports 0 to 1024 inclusive and
|
||||
running synapse with root privileges is <b>strongly discouraged</b>.
|
||||
Reverse proxies are more secure, run with root and pass things on like nobody's business.
|
||||
<br />
|
||||
(Note: you can also have synapse use a non privilaged port
|
||||
by using one of the delegation methods mentioned earlier.)
|
||||
</p>
|
||||
<p>
|
||||
If you choose to use a Reverse Proxy (good for you) we'll provide you with
|
||||
configuration templates later. Easy breasy.
|
||||
</p>
|
||||
<p>
|
||||
More information about Reverse Proxies <a target="_blank" href={proxyInfoLink}> in the docs.</a>
|
||||
</p>
|
||||
<h3>
|
||||
ACME
|
||||
</h3>
|
||||
<p>
|
||||
ACME is <strike>a super cool initiative</strike> a protocol that allows TLS
|
||||
certificates to be requested automagically. Synapse supports ACME by requesting
|
||||
certs from Let's Encrypt. This is the easiest way to manage your certs because
|
||||
once you set it up you don't need to manage it.
|
||||
</p>
|
||||
<p>
|
||||
If you wish to use ACME you will need access to port 80 which usually requires
|
||||
root privileges. Do not run Synapse as root. Use a Reverse Proxy or Authbind
|
||||
</p>
|
||||
<h3>
|
||||
Provide your own TLS certs
|
||||
</h3>
|
||||
<p>
|
||||
If you have your own TLS certs for the domain we'll ask you for the path
|
||||
to them or you can upload them for synapse to use.
|
||||
</p>
|
||||
<ButtonDisplay>
|
||||
<button onClick={() => onClickACME()}>Use ACME</button>
|
||||
<button onClick={() => onClickReverseProxy()}>I already/will use a Reverse Proxy with TLS</button>
|
||||
<button onClick={() => onClickTLS()}>I have a TLS cert</button>
|
||||
</ButtonDisplay>
|
||||
</ContentWrapper>
|
||||
@@ -1,61 +0,0 @@
|
||||
import React, { useState } from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
|
||||
import ButtonDisplay from './ButtonDisplay';
|
||||
import ContentWrapper from '../containers/ContentWrapper';
|
||||
|
||||
|
||||
export default ({ testingCertPaths, uploadingCerts, certPathInvalid, certKeyPathInvalid, onClickCertPath, onClickCertUpload }) => {
|
||||
const [certPath, setCertPath] = useState("");
|
||||
const [certKeyPath, setCertKeyPath] = useState("");
|
||||
const [certFile, setCertFile] = useState();
|
||||
const [certKeyFile, setCertKeyFile] = useState();
|
||||
|
||||
if (testingCertPaths) {
|
||||
return <ContentWrapper><h1>Testing the cert paths.</h1></ContentWrapper>
|
||||
} else if (uploadingCerts) {
|
||||
return <ContentWrapper><h1>Uploading Certs</h1></ContentWrapper>
|
||||
} else {
|
||||
return <ContentWrapper>
|
||||
<h1>TLS Path</h1>
|
||||
<p>
|
||||
If you have a tls cert on your server you can provide a path to it here.
|
||||
The cert needs to be a `.pem` file that includes the
|
||||
full certificate chain including any intermediate certificates.
|
||||
</p>
|
||||
|
||||
<p>Please enter {certPathInvalid ? "a valid" : "the"} path to the cert</p>
|
||||
<input
|
||||
className={certPathInvalid ? style.invalidInput : undefined}
|
||||
type="text"
|
||||
placeholder="/path/to/your/cert.pem"
|
||||
value={certPath ? certPath : undefined}
|
||||
onChange={e => setCertPath(e.target.value)}
|
||||
/>
|
||||
|
||||
<p>Please enter {certKeyPathInvalid ? "a valid" : "the"} path to the cert's key</p>
|
||||
<input
|
||||
className={certKeyPathInvalid ? style.invalidInput : undefined}
|
||||
type="text"
|
||||
placeholder="/path/to/your/cert/key.tls.key"
|
||||
value={certKeyPath ? certKeyPath : undefined}
|
||||
onChange={e => setCertKeyPath(e.target.value)}
|
||||
/>
|
||||
|
||||
<button
|
||||
disabled={certPath && certKeyPath ? undefined : true}
|
||||
onClick={() => onClickCertPath(certPath, certKeyPath)}
|
||||
>Use TLS Path</button>
|
||||
|
||||
<h3>OR..</h3>
|
||||
<h1>Upload a TLS cert</h1>
|
||||
<p>Upload a cert file.</p>
|
||||
<input type="file" name="cert" onChange={e => setCertFile(e.target.files[0])} />
|
||||
<p>Upload the cert's private key file.</p>
|
||||
<input type="file" name="certkey" onChange={e => setCertKeyFile(e.target.files[0])} />
|
||||
<button disabled={certFile && certKeyFile ? undefined : true} onClick={() => onClickCertUpload(certFile, certKeyFile)}>Upload cert</button>
|
||||
|
||||
</ContentWrapper >
|
||||
}
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
import React from 'react';
|
||||
|
||||
import style from '../../less/main.less';
|
||||
|
||||
import {
|
||||
BASE_INTRO_UI,
|
||||
SERVER_NAME_UI,
|
||||
STATS_REPORT_UI,
|
||||
KEY_EXPORT_UI,
|
||||
DELEGATION_OPTIONS_UI,
|
||||
WELL_KNOWN_UI,
|
||||
DNS_UI,
|
||||
WORKER_UI,
|
||||
TLS_UI,
|
||||
REVERSE_PROXY_UI,
|
||||
PORT_SELECTION_UI,
|
||||
REVERSE_PROXY_TEMPLATE_UI,
|
||||
LOADING_UI,
|
||||
ERROR_UI,
|
||||
DELEGATION_SERVER_NAME_UI,
|
||||
TLS_CERTPATH_UI,
|
||||
DELEGATION_PORT_SELECTION_UI,
|
||||
DELEGATION_TEMPLATE_UI,
|
||||
DATABASE_UI,
|
||||
} from '../reducers/ui_constants';
|
||||
|
||||
import Error from '../components/Error';
|
||||
import Loading from '../components/Loading';
|
||||
|
||||
import IntroUi from '../containers/BaseIntro';
|
||||
import ServerName from '../containers/ServerName';
|
||||
import StatsReporter from '../containers/StatsReporter';
|
||||
import ExportKeys from '../containers/ExportKeys';
|
||||
import DelegationOptions from '../containers/DelegationOptions';
|
||||
import DelegationServerName from '../containers/DelegationServerName';
|
||||
import ReverseProxy from '../containers/ReverseProxy';
|
||||
import TLS from '../containers/TLS';
|
||||
import TLSCertPath from '../containers/TLSCertPath';
|
||||
import DelegationPortSelection from '../containers/DelegationPortSelection';
|
||||
import PortSelection from '../containers/PortSelection';
|
||||
import ReverseProxySampleConfig from '../containers/ReverseProxySampleConfig';
|
||||
import DelegationSampleConfig from '../containers/DelegationSampleConfig';
|
||||
import Database from '../containers/Database';
|
||||
|
||||
export default ({ active_ui, dispatch }) => {
|
||||
console.log(`switching to ui ${active_ui}`)
|
||||
switch (active_ui) {
|
||||
case LOADING_UI:
|
||||
return <Loading />
|
||||
case ERROR_UI:
|
||||
return <Error />
|
||||
case BASE_INTRO_UI:
|
||||
return < IntroUi />
|
||||
case SERVER_NAME_UI:
|
||||
return <ServerName />
|
||||
case STATS_REPORT_UI:
|
||||
return <StatsReporter />
|
||||
case KEY_EXPORT_UI:
|
||||
return <ExportKeys />
|
||||
case DELEGATION_OPTIONS_UI:
|
||||
return <DelegationOptions />
|
||||
case DELEGATION_SERVER_NAME_UI:
|
||||
return <DelegationServerName />
|
||||
case DELEGATION_PORT_SELECTION_UI:
|
||||
return <DelegationPortSelection />
|
||||
case REVERSE_PROXY_UI:
|
||||
return <ReverseProxy />
|
||||
case TLS_UI:
|
||||
return <TLS />
|
||||
case TLS_CERTPATH_UI:
|
||||
return <TLSCertPath />
|
||||
case PORT_SELECTION_UI:
|
||||
return <PortSelection />
|
||||
case REVERSE_PROXY_TEMPLATE_UI:
|
||||
return <ReverseProxySampleConfig />
|
||||
case DELEGATION_TEMPLATE_UI:
|
||||
return <DelegationSampleConfig />
|
||||
case DATABASE_UI:
|
||||
return <Database />
|
||||
default:
|
||||
return <h1>how did i get here?</h1>
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import BaseIntro from '../components/BaseIntro';
|
||||
|
||||
import { advance_ui } from '../actions';
|
||||
|
||||
const mapStateToProps = (state, ownProps) => ({
|
||||
|
||||
});
|
||||
|
||||
const mapDispathToProps = (dispatch) => ({
|
||||
onClick: () => dispatch(advance_ui())
|
||||
});
|
||||
|
||||
export default connect(
|
||||
null,
|
||||
mapDispathToProps
|
||||
)(BaseIntro);
|
||||
@@ -1,16 +0,0 @@
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import ContentWrapper from '../components/ContentWrapper';
|
||||
|
||||
const mapStateToProps = (state, { children }) => ({
|
||||
servername: state.base_config.servername,
|
||||
children,
|
||||
})
|
||||
|
||||
|
||||
const mapDispatchToProps = (dispatch) => ({
|
||||
});
|
||||
|
||||
export default connect(
|
||||
mapStateToProps
|
||||
)(ContentWrapper);
|
||||
@@ -1,21 +0,0 @@
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import Database from '../components/Database';
|
||||
import { set_database, advance_ui, write_config } from '../actions';
|
||||
|
||||
const mapStateToProps = (state) => {
|
||||
}
|
||||
|
||||
|
||||
const mapDispatchToProps = (dispatch) => ({
|
||||
onClick: database => {
|
||||
dispatch(set_database(database));
|
||||
dispatch(advance_ui());
|
||||
dispatch(write_config())
|
||||
}
|
||||
});
|
||||
|
||||
export default connect(
|
||||
null,
|
||||
mapDispatchToProps,
|
||||
)(Database);
|
||||
@@ -1,32 +0,0 @@
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import DelegationOptions from '../components/DelegationOptions';
|
||||
import { set_delegation, advance_ui } from '../actions';
|
||||
import { DELEGATION_TYPES } from '../actions/constants';
|
||||
|
||||
const mapStateToProps = (state, { children }) => {
|
||||
return {
|
||||
servername: state.base_config.servername,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const mapDispatchToProps = (dispatch) => ({
|
||||
clickLocal: () => {
|
||||
dispatch(advance_ui(DELEGATION_TYPES.LOCAL));
|
||||
dispatch(set_delegation(DELEGATION_TYPES.LOCAL));
|
||||
},
|
||||
clickWellKnown: () => {
|
||||
dispatch(advance_ui(DELEGATION_TYPES.WELL_KNOWN));
|
||||
dispatch(set_delegation(DELEGATION_TYPES.WELL_KNOWN));
|
||||
},
|
||||
clickDNS: () => {
|
||||
dispatch(advance_ui(DELEGATION_TYPES.DNS));
|
||||
dispatch(set_delegation(DELEGATION_TYPES.DNS));
|
||||
}
|
||||
});
|
||||
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps,
|
||||
)(DelegationOptions);
|
||||
@@ -1,21 +0,0 @@
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import DelegationPortSelection from '../components/DelegationPortSelection';
|
||||
|
||||
import { advance_ui, set_delegation_ports } from '../actions';
|
||||
|
||||
const mapStateToProps = (state, ownProps) => ({
|
||||
|
||||
});
|
||||
|
||||
const mapDispathToProps = (dispatch) => ({
|
||||
onClick: (fedPort, clientPort) => {
|
||||
dispatch(advance_ui());
|
||||
dispatch(set_delegation_ports(fedPort, clientPort));
|
||||
}
|
||||
});
|
||||
|
||||
export default connect(
|
||||
null,
|
||||
mapDispathToProps
|
||||
)(DelegationPortSelection);
|
||||
@@ -1,56 +0,0 @@
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import DelegationSampleConfig from '../components/DelegationSampleConfig';
|
||||
|
||||
import { advance_ui } from '../actions';
|
||||
|
||||
import DNSConfig from '../templates/dns-srv';
|
||||
import FedWellKnownConfig from '../templates/federation-well-known'
|
||||
import ClientWellKnownConfig from '../templates/client-well-known'
|
||||
import { DELEGATION_TYPES } from '../actions/constants';
|
||||
|
||||
// synapseServerName: state.base_config.delegation_server_name ? state.base_config.delegation_server_name : state.base_config.servername,
|
||||
|
||||
const serverConfig = state => {
|
||||
if (state.delegation_type == DELEGATION_TYPES.DNS) {
|
||||
return undefined;
|
||||
} else {
|
||||
return FedWellKnownConfig({
|
||||
synapseServerName: state.delegation_servername,
|
||||
delegationSynapsePort: state.delegation_federation_port ? state.delegation_federation_port : 8448,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const clientConfig = state => {
|
||||
if (state.delegation_type == DELEGATION_TYPES.WELL_KNOWN) {
|
||||
return ClientWellKnownConfig({
|
||||
synapseServerName: state.delegation_servername,
|
||||
delegationClientPort: state.delegation_client_port ? state.delegation_client_port : 443,
|
||||
});
|
||||
} else {
|
||||
return DNSConfig({
|
||||
serverName: state.servername,
|
||||
synapseServerName: state.delegation_servername,
|
||||
delegationClientPort: state.delegation_client_port ? state.delegation_client_port : 443,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const mapStateToProps = state => ({
|
||||
delegationType: state.base_config.delegation_type,
|
||||
serverConfig: serverConfig(state.base_config),
|
||||
clientConfig: clientConfig(state.base_config),
|
||||
serverConfigFileName: `${state.base_config.servername}_delegation.conf`,
|
||||
clientConfigFileName: `${state.base_config.servername}_client_delegation.conf`,
|
||||
serverName: state.base_config.servername,
|
||||
});
|
||||
|
||||
const mapDispatchToProps = dispatch => ({
|
||||
onClick: () => dispatch(advance_ui()),
|
||||
});
|
||||
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(DelegationSampleConfig);
|
||||
@@ -1,21 +0,0 @@
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import DelegationServerName from '../components/DelegationServerName';
|
||||
|
||||
import { advance_ui, set_delegation_servername } from '../actions';
|
||||
|
||||
const mapStateToProps = (state, ownProps) => ({
|
||||
|
||||
});
|
||||
|
||||
const mapDispathToProps = (dispatch) => ({
|
||||
onClick: servername => {
|
||||
dispatch(advance_ui());
|
||||
dispatch(set_delegation_servername(servername));
|
||||
}
|
||||
});
|
||||
|
||||
export default connect(
|
||||
null,
|
||||
mapDispathToProps
|
||||
)(DelegationServerName);
|
||||
@@ -1,23 +0,0 @@
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import ExportKeys from '../components/ExportKeys';
|
||||
|
||||
import { advance_ui } from '../actions';
|
||||
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
const secret_key_loaded = state.base_config.secret_key_loaded;
|
||||
const secret_key = state.base_config.secret_key;
|
||||
return {
|
||||
secret_key_loaded,
|
||||
secret_key,
|
||||
}
|
||||
};
|
||||
|
||||
const mapDispatchToProps = (dispatch) => ({
|
||||
onClick: () => dispatch(advance_ui())
|
||||
});
|
||||
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispatchToProps
|
||||
)(ExportKeys);
|
||||
@@ -1,44 +0,0 @@
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import PortSelection from '../components/PortSelection';
|
||||
|
||||
import { set_synapse_ports } from '../actions';
|
||||
import { TLS_TYPES } from '../actions/constants';
|
||||
|
||||
const defaultFedPort = state => {
|
||||
console.log(state)
|
||||
if (state.tls == TLS_TYPES.REVERSE_PROXY) {
|
||||
return 8008;
|
||||
}
|
||||
|
||||
return state.delegation_federation_port ? state.delegation_federation_port : 8448;
|
||||
}
|
||||
|
||||
const defaultClientPort = state => {
|
||||
if (state.tls == TLS_TYPES.REVERSE_PROXY) {
|
||||
return 8008;
|
||||
}
|
||||
|
||||
return state.delegation_federation_port ? state.delegation_federation_port : 443;
|
||||
}
|
||||
|
||||
const mapStateToProps = (state, ownProps) => ({
|
||||
servername: state.base_config.servername,
|
||||
verifyingPorts: state.base_config.verifying_ports,
|
||||
fedPortInUse: !state.base_config.synapse_federation_port_free,
|
||||
clientPortInUse: !state.base_config.synapse_client_port_free,
|
||||
canChangePorts: state.base_config.tls == TLS_TYPES.REVERSE_PROXY,
|
||||
defaultFedPort: defaultFedPort(state.base_config),
|
||||
defaultClientPort: defaultClientPort(state.base_config),
|
||||
});
|
||||
|
||||
const mapDispathToProps = (dispatch) => ({
|
||||
onClick: (fedPort, clientPort) => {
|
||||
dispatch(set_synapse_ports(fedPort, clientPort));
|
||||
}
|
||||
});
|
||||
|
||||
export default connect(
|
||||
mapStateToProps,
|
||||
mapDispathToProps
|
||||
)(PortSelection);
|
||||
@@ -1,21 +0,0 @@
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
import ReverseProxy from '../components/ReverseProxy';
|
||||
|
||||
import { advance_ui, set_reverse_proxy } from '../actions';
|
||||
|
||||
const mapStateToProps = (state, ownProps) => {
|
||||
|
||||
};
|
||||
|
||||
const mapDispatchToProps = (dispatch) => ({
|
||||
onClick: proxy_type => {
|
||||
dispatch(set_reverse_proxy(proxy_type));
|
||||
dispatch(advance_ui());
|
||||
}
|
||||
});
|
||||
|
||||
export default connect(
|
||||
null,
|
||||
mapDispatchToProps
|
||||
)(ReverseProxy);
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user