From 4f66312df8788afc68803cdbcb9c98449f14edd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20L=C3=B6thberg?= Date: Sat, 17 Jun 2017 17:36:03 +0200 Subject: [PATCH 0001/1637] python_dependencies: Use bcrypt module instead of py-bcrypt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit py-bcrypt has been unmaintained for a long while, while bcrypt is actively maintained. And since ff8b87118dcfb153d972e29c2b77b195244d5ddc we're compatible with the bcrypt anyway. Signed-off-by: Johannes Löthberg --- synapse/python_dependencies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index ed7f1c89ad..a34cfec8f2 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -31,7 +31,7 @@ REQUIREMENTS = { "pyyaml": ["yaml"], "pyasn1": ["pyasn1"], "daemonize": ["daemonize"], - "py-bcrypt": ["bcrypt"], + "bcrypt": ["bcrypt"], "pillow": ["PIL"], "pydenticon": ["pydenticon"], "ujson": ["ujson"], From 8c23221666f1a09fdc97c2b526cb100cdbd32f60 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Jun 2017 15:53:45 +0100 Subject: [PATCH 0002/1637] Fix up --- synapse/api/auth.py | 2 +- synapse/replication/slave/storage/client_ips.py | 3 +-- synapse/storage/client_ips.py | 8 +++++--- tests/handlers/test_device.py | 3 +-- tests/storage/test_client_ips.py | 5 +---- 5 files changed, 9 insertions(+), 12 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 10f4972369..d23bcecbad 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -200,7 +200,7 @@ class Auth(object): )[0] if user and access_token and ip_addr: self.store.insert_client_ip( - user=user, + user_id=user.to_string(), access_token=access_token, ip=ip_addr, user_agent=user_agent, diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 65250285e8..352c9a2aa8 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -29,9 +29,8 @@ class SlavedClientIpStore(BaseSlavedStore): max_entries=50000 * CACHE_SIZE_FACTOR, ) - def insert_client_ip(self, user, access_token, ip, user_agent, device_id): + def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id): now = int(self._clock.time_msec()) - user_id = user.to_string() key = (user_id, access_token, ip) try: diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index 88a5eb232f..5a88e242e5 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -58,9 +58,11 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): ) reactor.addSystemEventTrigger("before", "shutdown", self._update_client_ips_batch) - def insert_client_ip(self, user, access_token, ip, user_agent, device_id): - now = int(self._clock.time_msec()) - key = (user.to_string(), access_token, ip) + def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id, + now=None): + if not now: + now = int(self._clock.time_msec()) + key = (user_id, access_token, ip) try: last_seen = self.client_ip_last_seen.get(key) diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 2eaaa8253c..778ff2f6e9 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -19,7 +19,6 @@ import synapse.api.errors import synapse.handlers.device import synapse.storage -from synapse import types from tests import unittest, utils user1 = "@boris:aaa" @@ -179,6 +178,6 @@ class DeviceTestCase(unittest.TestCase): if ip is not None: yield self.store.insert_client_ip( - types.UserID.from_string(user_id), + user_id, access_token, ip, "user_agent", device_id) self.clock.advance_time(1000) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 03df697575..bd6fda6cb1 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -15,9 +15,6 @@ from twisted.internet import defer -import synapse.server -import synapse.storage -import synapse.types import tests.unittest import tests.utils @@ -39,7 +36,7 @@ class ClientIpStoreTestCase(tests.unittest.TestCase): self.clock.now = 12345678 user_id = "@user:id" yield self.store.insert_client_ip( - synapse.types.UserID.from_string(user_id), + user_id, "access_token", "ip", "user_agent", "device_id", ) From 27f26e48b7740248dd4d45b7bb2487b38477b7f4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Jun 2017 16:25:38 +0100 Subject: [PATCH 0003/1637] Serialize user ip command as json --- synapse/replication/tcp/commands.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index a009214e43..171227cce2 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -323,14 +323,18 @@ class UserIpCommand(Command): @classmethod def from_line(cls, line): - user_id, access_token, ip, device_id, last_seen, user_agent = line.split(" ", 5) + user_id, jsn = line.split(" ", 1) - return cls(user_id, access_token, ip, user_agent, device_id, int(last_seen)) + access_token, ip, user_agent, device_id, last_seen = json.loads(jsn) + + return cls( + user_id, access_token, ip, user_agent, device_id, last_seen + ) def to_line(self): - return " ".join(( - self.user_id, self.access_token, self.ip, self.device_id, - str(self.last_seen), self.user_agent, + return self.user_id + " " + json.dumps(( + self.access_token, self.ip, self.user_agent, self.device_id, + self.last_seen, )) From 05538587efd55bc0df362a1fccf80d5ccce9e5c5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 4 Jul 2017 14:02:21 +0100 Subject: [PATCH 0004/1637] Bump version and changelog --- CHANGES.rst | 16 ++++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index ce356a11bd..713e60d43e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,19 @@ +Changes in synapse v0.22.0-rc2 (2017-07-04) +=========================================== + +Changes: + +* Improve performance of storing user IPs (PR #2307, #2308) +* Slightly improve performance of verifying access tokens (PR #2320) +* Slightly improve performance of event persistence (PR #2321) +* Increase default cache factor size from 0.1 to 0.5 (PR #2330) + +Bug fixes: + +* Fix bug with storing registration sessions that caused frequent CPU churn + (PR #2319) + + Changes in synapse v0.22.0-rc1 (2017-06-26) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index 6b0a766391..c26e8d4d93 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.22.0-rc1" +__version__ = "0.22.0-rc2" From 6264cf96665ab2737ffd19f9ef84240d1331a6bc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Jul 2017 10:35:56 +0100 Subject: [PATCH 0005/1637] Bump version and changelog --- CHANGES.rst | 6 ++++++ synapse/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 713e60d43e..e7c12dd919 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,9 @@ +Changes in synapse v0.22.0 (2017-07-06) +======================================= + +No changes since v0.22.0-rc2 + + Changes in synapse v0.22.0-rc2 (2017-07-04) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index c26e8d4d93..60af1cbecd 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.22.0-rc2" +__version__ = "0.22.0" From 5a7f561a9bff5163ce7fce719eea083cdd0eabd9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Jul 2017 17:55:51 +0100 Subject: [PATCH 0006/1637] Fix bug where pusherpool didn't start and broke some rooms Since we didn't instansiate the PusherPool at start time it could fail at run time, which it did for some users. This may or may not fix things for those users, but it should happen at start time and stop the server from starting. --- synapse/handlers/federation.py | 3 ++- synapse/handlers/message.py | 4 +++- synapse/rest/client/v1/pusher.py | 14 ++++++-------- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 483cb8eac6..694b820d85 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -76,6 +76,7 @@ class FederationHandler(BaseHandler): self.keyring = hs.get_keyring() self.action_generator = hs.get_action_generator() self.is_mine_id = hs.is_mine_id + self.pusher_pool = hs.get_pusherpool() self.replication_layer.set_handler(self) @@ -1426,7 +1427,7 @@ class FederationHandler(BaseHandler): if not backfilled: # this intentionally does not yield: we don't care about the result # and don't need to wait for it. - preserve_fn(self.hs.get_pusherpool().on_new_notifications)( + preserve_fn(self.pusher_pool.on_new_notifications)( event_stream_id, max_stream_id ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 24c9ffdb20..be4f123c54 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -50,6 +50,8 @@ class MessageHandler(BaseHandler): self.pagination_lock = ReadWriteLock() + self.pusher_pool = hs.get_pusherpool() + # We arbitrarily limit concurrent event creation for a room to 5. # This is to stop us from diverging history *too* much. self.limiter = Limiter(max_count=5) @@ -610,7 +612,7 @@ class MessageHandler(BaseHandler): # this intentionally does not yield: we don't care about the result # and don't need to wait for it. - preserve_fn(self.hs.get_pusherpool().on_new_notifications)( + preserve_fn(self.pusher_pool.on_new_notifications)( event_stream_id, max_stream_id ) diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 9a2ed6ed88..1819a560cb 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -73,6 +73,7 @@ class PushersSetRestServlet(ClientV1RestServlet): def __init__(self, hs): super(PushersSetRestServlet, self).__init__(hs) self.notifier = hs.get_notifier() + self.pusher_pool = self.hs.get_pusherpool() @defer.inlineCallbacks def on_POST(self, request): @@ -81,12 +82,10 @@ class PushersSetRestServlet(ClientV1RestServlet): content = parse_json_object_from_request(request) - pusher_pool = self.hs.get_pusherpool() - if ('pushkey' in content and 'app_id' in content and 'kind' in content and content['kind'] is None): - yield pusher_pool.remove_pusher( + yield self.pusher_pool.remove_pusher( content['app_id'], content['pushkey'], user_id=user.to_string() ) defer.returnValue((200, {})) @@ -109,14 +108,14 @@ class PushersSetRestServlet(ClientV1RestServlet): append = content['append'] if not append: - yield pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user( + yield self.pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user( app_id=content['app_id'], pushkey=content['pushkey'], not_user_id=user.to_string() ) try: - yield pusher_pool.add_pusher( + yield self.pusher_pool.add_pusher( user_id=user.to_string(), access_token=requester.access_token_id, kind=content['kind'], @@ -152,6 +151,7 @@ class PushersRemoveRestServlet(RestServlet): self.hs = hs self.notifier = hs.get_notifier() self.auth = hs.get_v1auth() + self.pusher_pool = self.hs.get_pusherpool() @defer.inlineCallbacks def on_GET(self, request): @@ -161,10 +161,8 @@ class PushersRemoveRestServlet(RestServlet): app_id = parse_string(request, "app_id", required=True) pushkey = parse_string(request, "pushkey", required=True) - pusher_pool = self.hs.get_pusherpool() - try: - yield pusher_pool.remove_pusher( + yield self.pusher_pool.remove_pusher( app_id=app_id, pushkey=pushkey, user_id=user.to_string(), From 76ed3476d3f59bc17c345aa8dffbc2635999e5fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Jul 2017 18:11:22 +0100 Subject: [PATCH 0007/1637] Bump version and changelog --- CHANGES.rst | 9 +++++++++ synapse/__init__.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index e7c12dd919..a415944756 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,12 @@ +Changes in synapse v0.22.1 (2017-07-06) +======================================= + +Bug fixes: + +* Fix bug where pusher pool didn't start and caused issues when + interacting with some rooms (PR #2342) + + Changes in synapse v0.22.0 (2017-07-06) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index 60af1cbecd..dbf22eca00 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.22.0" +__version__ = "0.22.1" From f502b0dea14ea07bad1e1e0f5a6d00f19df1c6c0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jul 2017 14:04:40 +0100 Subject: [PATCH 0008/1637] Perf: Don't filter events for push We know the users are joined and we can explicitly check for if they are ignoring the user, so lets do that. --- synapse/push/bulk_push_rule_evaluator.py | 25 ++++++++---------------- synapse/storage/account_data.py | 13 ++++++++++++ synapse/visibility.py | 19 ------------------ 3 files changed, 21 insertions(+), 36 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 9a96e6fe8f..803ac3e75b 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -19,7 +19,6 @@ from twisted.internet import defer from .push_rule_evaluator import PushRuleEvaluatorForEvent -from synapse.visibility import filter_events_for_clients_context from synapse.api.constants import EventTypes, Membership from synapse.util.caches.descriptors import cached from synapse.util.async import Linearizer @@ -92,15 +91,6 @@ class BulkPushRuleEvaluator(object): rules_by_user = yield self._get_rules_for_event(event, context) actions_by_user = {} - # None of these users can be peeking since this list of users comes - # from the set of users in the room, so we know for sure they're all - # actually in the room. - user_tuples = [(u, False) for u in rules_by_user] - - filtered_by_user = yield filter_events_for_clients_context( - self.store, user_tuples, [event], {event.event_id: context} - ) - room_members = yield self.store.get_joined_users_from_context( event, context ) @@ -110,6 +100,14 @@ class BulkPushRuleEvaluator(object): condition_cache = {} for uid, rules in rules_by_user.iteritems(): + if event.sender == uid: + continue + + if not event.is_state(): + is_ignored = yield self.store.is_ignored_by(event.sender, uid) + if is_ignored: + continue + display_name = None profile_info = room_members.get(uid) if profile_info: @@ -121,13 +119,6 @@ class BulkPushRuleEvaluator(object): if event.type == EventTypes.Member and event.state_key == uid: display_name = event.content.get("displayname", None) - filtered = filtered_by_user[uid] - if len(filtered) == 0: - continue - - if filtered[0].sender == uid: - continue - for rule in rules: if 'enabled' in rule and not rule['enabled']: continue diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index aa84ffc2b0..ff14e54c11 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -308,3 +308,16 @@ class AccountDataStore(SQLBaseStore): " WHERE stream_id < ?" ) txn.execute(update_max_id_sql, (next_id, next_id)) + + @cachedInlineCallbacks(num_args=2, cache_context=True, max_entries=5000) + def is_ignored_by(self, ignored_user_id, ignorer_user_id, cache_context): + ignored_account_data = yield self.get_global_account_data_by_type_for_user( + "m.ignored_user_list", ignorer_user_id, + on_invalidate=cache_context.invalidate, + ) + if not ignored_account_data: + defer.returnValue(False) + + defer.returnValue( + ignored_user_id in ignored_account_data.get("ignored_users", {}) + ) diff --git a/synapse/visibility.py b/synapse/visibility.py index c4dd9ae2c7..5590b866ed 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -188,25 +188,6 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state): }) -@defer.inlineCallbacks -def filter_events_for_clients_context(store, user_tuples, events, event_id_to_context): - user_ids = set(u[0] for u in user_tuples) - event_id_to_state = {} - for event_id, context in event_id_to_context.items(): - state = yield store.get_events([ - e_id - for key, e_id in context.current_state_ids.iteritems() - if key == (EventTypes.RoomHistoryVisibility, "") - or (key[0] == EventTypes.Member and key[1] in user_ids) - ]) - event_id_to_state[event_id] = state - - res = yield filter_events_for_clients( - store, user_tuples, events, event_id_to_state - ) - defer.returnValue(res) - - @defer.inlineCallbacks def filter_events_for_client(store, user_id, events, is_peeking=False): """ From 1fc4a962e46e074343450c893a521d51338ba396 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jul 2017 18:19:46 +0100 Subject: [PATCH 0009/1637] Add a frontend proxy --- synapse/app/frontend_proxy.py | 267 ++++++++++++++++++++++++++++++++++ synapse/config/workers.py | 2 + 2 files changed, 269 insertions(+) create mode 100644 synapse/app/frontend_proxy.py diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py new file mode 100644 index 0000000000..c8fa7854ad --- /dev/null +++ b/synapse/app/frontend_proxy.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import synapse + +from synapse.config._base import ConfigError +from synapse.config.homeserver import HomeServerConfig +from synapse.config.logger import setup_logging +from synapse.http.site import SynapseSite +from synapse.http.server import JsonResource +from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.replication.slave.storage._base import BaseSlavedStore +from synapse.replication.slave.storage.client_ips import SlavedClientIpStore +from synapse.replication.slave.storage.devices import SlavedDeviceStore +from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.server import HomeServer +from synapse.storage.engines import create_engine +from synapse.util.httpresourcetree import create_resource_tree +from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.util.manhole import manhole +from synapse.util.rlimit import change_resource_limit +from synapse.util.versionstring import get_version_string +from synapse.crypto import context_factory +from synapse.api.errors import SynapseError +from synapse.http.servlet import ( + RestServlet, parse_json_object_from_request, +) +from synapse.rest.client.v2_alpha._base import client_v2_patterns + +from synapse import events + + +from twisted.internet import reactor, defer +from twisted.web.resource import Resource + +from daemonize import Daemonize + +import sys +import logging +import gc + + +logger = logging.getLogger("synapse.app.frontend_proxy") + + +class KeyUploadServlet(RestServlet): + PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$", + releases=()) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(KeyUploadServlet, self).__init__() + self.auth = hs.get_auth() + self.store = hs.get_datastore() + self.http_client = hs.get_simple_http_client() + self.main_uri = hs.config.worker_main_http_uri + + @defer.inlineCallbacks + def on_POST(self, request, device_id): + requester = yield self.auth.get_user_by_req(request, allow_guest=True) + user_id = requester.user.to_string() + body = parse_json_object_from_request(request) + + if device_id is not None: + # passing the device_id here is deprecated; however, we allow it + # for now for compatibility with older clients. + if (requester.device_id is not None and + device_id != requester.device_id): + logger.warning("Client uploading keys for a different device " + "(logged in as %s, uploading for %s)", + requester.device_id, device_id) + else: + device_id = requester.device_id + + if device_id is None: + raise SynapseError( + 400, + "To upload keys, you must pass device_id when authenticating" + ) + + if body: + # They're actually trying to upload something, proxy to main synapse. + result = yield self.http_client.post_json_get_json( + self.main_uri + request.uri, + body, + ) + + defer.returnValue((200, result)) + else: + # Just interested in counts. + result = yield self.store.count_e2e_one_time_keys(user_id, device_id) + defer.returnValue((200, {"one_time_key_counts": result})) + + +class FrontendProxySlavedStore( + SlavedDeviceStore, + SlavedClientIpStore, + BaseSlavedStore, +): + pass + + +class FrontendProxyServer(HomeServer): + def get_db_conn(self, run_new_connection=True): + # Any param beginning with cp_ is a parameter for adbapi, and should + # not be passed to the database engine. + db_params = { + k: v for k, v in self.db_config.get("args", {}).items() + if not k.startswith("cp_") + } + db_conn = self.database_engine.module.connect(**db_params) + + if run_new_connection: + self.database_engine.on_new_connection(db_conn) + return db_conn + + def setup(self): + logger.info("Setting up.") + self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self) + logger.info("Finished setting up.") + + def _listen_http(self, listener_config): + port = listener_config["port"] + bind_addresses = listener_config["bind_addresses"] + site_tag = listener_config.get("tag", port) + resources = {} + for res in listener_config["resources"]: + for name in res["names"]: + if name == "metrics": + resources[METRICS_PREFIX] = MetricsResource(self) + elif name == "client": + resource = JsonResource(self, canonical_json=False) + KeyUploadServlet(self).register(resource) + resources.update({ + "/_matrix/client/r0": resource, + "/_matrix/client/unstable": resource, + "/_matrix/client/v2_alpha": resource, + "/_matrix/client/api/v1": resource, + }) + + root_resource = create_resource_tree(resources, Resource()) + + for address in bind_addresses: + reactor.listenTCP( + port, + SynapseSite( + "synapse.access.http.%s" % (site_tag,), + site_tag, + listener_config, + root_resource, + ), + interface=address + ) + + logger.info("Synapse client reader now listening on port %d", port) + + def start_listening(self, listeners): + for listener in listeners: + if listener["type"] == "http": + self._listen_http(listener) + elif listener["type"] == "manhole": + bind_addresses = listener["bind_addresses"] + + for address in bind_addresses: + reactor.listenTCP( + listener["port"], + manhole( + username="matrix", + password="rabbithole", + globals={"hs": self}, + ), + interface=address + ) + else: + logger.warn("Unrecognized listener type: %s", listener["type"]) + + self.get_tcp_replication().start_replication(self) + + def build_tcp_replication(self): + return ReplicationClientHandler(self.get_datastore()) + + +def start(config_options): + try: + config = HomeServerConfig.load_config( + "Synapse frontend proxy", config_options + ) + except ConfigError as e: + sys.stderr.write("\n" + e.message + "\n") + sys.exit(1) + + assert config.worker_app == "synapse.app.frontend_proxy" + + assert config.worker_main_http_uri is not None + + setup_logging(config, use_worker_options=True) + + events.USE_FROZEN_DICTS = config.use_frozen_dicts + + database_engine = create_engine(config.database_config) + + tls_server_context_factory = context_factory.ServerContextFactory(config) + + ss = FrontendProxyServer( + config.server_name, + db_config=config.database_config, + tls_server_context_factory=tls_server_context_factory, + config=config, + version_string="Synapse/" + get_version_string(synapse), + database_engine=database_engine, + ) + + ss.setup() + ss.get_handlers() + ss.start_listening(config.worker_listeners) + + def run(): + # make sure that we run the reactor with the sentinel log context, + # otherwise other PreserveLoggingContext instances will get confused + # and complain when they see the logcontext arbitrarily swapping + # between the sentinel and `run` logcontexts. + with PreserveLoggingContext(): + logger.info("Running") + change_resource_limit(config.soft_file_limit) + if config.gc_thresholds: + gc.set_threshold(*config.gc_thresholds) + reactor.run() + + def start(): + ss.get_state_handler().start_caching() + ss.get_datastore().start_profiling() + + reactor.callWhenRunning(start) + + if config.worker_daemonize: + daemon = Daemonize( + app="synapse-frontend-proxy", + pid=config.worker_pid_file, + action=run, + auto_close_fds=False, + verbose=True, + logger=logger, + ) + daemon.start() + else: + run() + + +if __name__ == '__main__': + with LoggingContext("main"): + start(sys.argv[1:]) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index ea48d931a1..99d5d8aaeb 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -32,6 +32,8 @@ class WorkerConfig(Config): self.worker_replication_port = config.get("worker_replication_port", None) self.worker_name = config.get("worker_name", self.worker_app) + self.worker_main_http_uri = config.get("worker_main_http_uri", None) + if self.worker_listeners: for listener in self.worker_listeners: bind_address = listener.pop("bind_address", None) From d4d12daed9374ef0419528b877ca37ff1821367a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jul 2017 18:36:45 +0100 Subject: [PATCH 0010/1637] Include registration and as stores in frontend proxy --- synapse/app/frontend_proxy.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index c8fa7854ad..132f18a979 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -25,6 +25,8 @@ from synapse.metrics.resource import MetricsResource, METRICS_PREFIX from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore from synapse.replication.slave.storage.devices import SlavedDeviceStore +from synapse.replication.slave.storage.registration import SlavedRegistrationStore +from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer from synapse.storage.engines import create_engine @@ -111,6 +113,8 @@ class KeyUploadServlet(RestServlet): class FrontendProxySlavedStore( SlavedDeviceStore, SlavedClientIpStore, + SlavedApplicationServiceStore, + SlavedRegistrationStore, BaseSlavedStore, ): pass From 6e16aca8b0046b5f4887fd249fe7f653262ed49c Mon Sep 17 00:00:00 2001 From: Krombel Date: Mon, 10 Jul 2017 16:42:17 +0200 Subject: [PATCH 0011/1637] encode sync-response statically; omit empty objects from sync-response --- synapse/rest/client/v2_alpha/sync.py | 81 ++++++++++++++++------------ 1 file changed, 48 insertions(+), 33 deletions(-) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 83e209d18f..fc4d7d7dff 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -164,41 +164,53 @@ class SyncRestServlet(RestServlet): ) time_now = self.clock.time_msec() + response_content = self.encode_response(time_now, sync_result, requester.access_token_id, filter) - joined = self.encode_joined( - sync_result.joined, time_now, requester.access_token_id, filter.event_fields - ) + defer.returnValue((200, response_content)) - invited = self.encode_invited( - sync_result.invited, time_now, requester.access_token_id - ) - - archived = self.encode_archived( - sync_result.archived, time_now, requester.access_token_id, - filter.event_fields, - ) - - response_content = { - "account_data": {"events": sync_result.account_data}, - "to_device": {"events": sync_result.to_device}, - "device_lists": { - "changed": list(sync_result.device_lists), - }, - "presence": self.encode_presence( - sync_result.presence, time_now - ), - "rooms": { - "join": joined, - "invite": invited, - "leave": archived, - }, + @staticmethod + def encode_response(time_now, sync_result, access_token_id, filter): + response = { "device_one_time_keys_count": sync_result.device_one_time_keys_count, "next_batch": sync_result.next_batch.to_string(), } - defer.returnValue((200, response_content)) + if sync_result.account_data: + response["account_data"] = {"events": sync_result.account_data} + if sync_result.to_device: + response["to_device"] = {"events": sync_result.to_device} + if sync_result.device_lists: + response["device_lists"] = { + "changed": list(sync_result.device_lists), + } - def encode_presence(self, events, time_now): + if sync_result.presence: + response["presence"] = SyncRestServlet.encode_presence( + sync_result.presence, time_now + ) + + rooms = {} + if sync_result.joined: + rooms["join"] = SyncRestServlet.encode_joined( + sync_result.joined, time_now, access_token_id, filter.event_fields + ) + if sync_result.invited: + rooms["invite"] = SyncRestServlet.encode_invited( + sync_result.invited, time_now, access_token_id + ) + if sync_result.archived: + rooms["leave"] = SyncRestServlet.encode_archived( + sync_result.archived, time_now, access_token_id, + filter.event_fields, + ) + + if rooms: + response["rooms"] = rooms + + return response + + @staticmethod + def encode_presence(events, time_now): return { "events": [ { @@ -212,7 +224,8 @@ class SyncRestServlet(RestServlet): ] } - def encode_joined(self, rooms, time_now, token_id, event_fields): + @staticmethod + def encode_joined(rooms, time_now, token_id, event_fields): """ Encode the joined rooms in a sync result @@ -231,13 +244,14 @@ class SyncRestServlet(RestServlet): """ joined = {} for room in rooms: - joined[room.room_id] = self.encode_room( + joined[room.room_id] = SyncRestServlet.encode_room( room, time_now, token_id, only_fields=event_fields ) return joined - def encode_invited(self, rooms, time_now, token_id): + @staticmethod + def encode_invited(rooms, time_now, token_id): """ Encode the invited rooms in a sync result @@ -270,7 +284,8 @@ class SyncRestServlet(RestServlet): return invited - def encode_archived(self, rooms, time_now, token_id, event_fields): + @staticmethod + def encode_archived(rooms, time_now, token_id, event_fields): """ Encode the archived rooms in a sync result @@ -289,7 +304,7 @@ class SyncRestServlet(RestServlet): """ joined = {} for room in rooms: - joined[room.room_id] = self.encode_room( + joined[room.room_id] = SyncRestServlet.encode_room( room, time_now, token_id, joined=False, only_fields=event_fields ) From b8ca494ee9e42e5b1aca8958088bd35cc5707437 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jul 2017 15:44:15 +0100 Subject: [PATCH 0012/1637] Initial group server implementation --- synapse/federation/transport/client.py | 34 ++ synapse/federation/transport/server.py | 143 ++++++- synapse/groups/__init__.py | 0 synapse/groups/attestations.py | 120 ++++++ synapse/groups/groups_server.py | 382 ++++++++++++++++++ synapse/handlers/room_list.py | 18 +- synapse/http/server.py | 4 +- synapse/server.py | 14 + synapse/storage/__init__.py | 3 +- synapse/storage/group_server.py | 280 +++++++++++++ .../storage/schema/delta/43/group_server.sql | 77 ++++ 11 files changed, 1064 insertions(+), 11 deletions(-) create mode 100644 synapse/groups/__init__.py create mode 100644 synapse/groups/attestations.py create mode 100644 synapse/groups/groups_server.py create mode 100644 synapse/storage/group_server.py create mode 100644 synapse/storage/schema/delta/43/group_server.sql diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 52b2a717d2..17b93a28ab 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -471,3 +471,37 @@ class TransportLayerClient(object): ) defer.returnValue(content) + + @log_function + def invite_to_group_notification(self, destination, group_id, user_id, content): + path = PREFIX + "/groups/local/%s/users/%s/invite" % (group_id, user_id) + + return self.client.post_json( + destination=destination, + path=path, + data=content, + ignore_backoff=True, + ) + + @log_function + def remove_user_from_group_notification(self, destination, group_id, user_id, + content): + path = PREFIX + "/groups/local/%s/users/%s/remove" % (group_id, user_id) + + return self.client.post_json( + destination=destination, + path=path, + data=content, + ignore_backoff=True, + ) + + @log_function + def renew_group_attestation(self, destination, group_id, user_id, content): + path = PREFIX + "/groups/%s/renew_attestation/%s" % (group_id, user_id) + + return self.client.post_json( + destination=destination, + path=path, + data=content, + ignore_backoff=True, + ) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a78f01e442..e6b0f432fc 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -25,7 +25,7 @@ from synapse.http.servlet import ( from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.versionstring import get_version_string from synapse.util.logcontext import preserve_fn -from synapse.types import ThirdPartyInstanceID +from synapse.types import ThirdPartyInstanceID, get_domain_from_id import functools import logging @@ -609,6 +609,115 @@ class FederationVersionServlet(BaseFederationServlet): })) +class FederationGroupsProfileServlet(BaseFederationServlet): + PATH = "/groups/(?P[^/]*)/profile$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id): + requester_user_id = content["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.get_group_profile( + group_id, requester_user_id + ) + + defer.returnValue((200, new_content)) + + +class FederationGroupsRoomsServlet(BaseFederationServlet): + PATH = "/groups/(?P[^/]*)/rooms$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id): + requester_user_id = content["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.get_rooms_in_group( + group_id, requester_user_id + ) + + defer.returnValue((200, new_content)) + + +class FederationGroupsUsersServlet(BaseFederationServlet): + PATH = "/groups/(?P[^/]*)/users$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id): + requester_user_id = content["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.get_users_in_group( + group_id, requester_user_id + ) + + defer.returnValue((200, new_content)) + + +class FederationGroupsInviteServlet(BaseFederationServlet): + PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/invite$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, user_id): + requester_user_id = content["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.invite_to_group( + group_id, user_id, requester_user_id, content, + ) + + defer.returnValue((200, new_content)) + + +class FederationGroupsAcceptInviteServlet(BaseFederationServlet): + PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/accept_invite$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, user_id): + if get_domain_from_id(user_id) != origin: + raise SynapseError(403, "user_id doesn't match origin") + + new_content = yield self.handler.accept_invite( + group_id, user_id, content, + ) + + defer.returnValue((200, new_content)) + + +class FederationGroupsRemoveUserServlet(BaseFederationServlet): + PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/remove$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, user_id): + requester_user_id = content["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.remove_user_from_group( + group_id, user_id, requester_user_id, content, + ) + + defer.returnValue((200, new_content)) + + +class FederationGroupsRenewAttestaionServlet(BaseFederationServlet): + PATH = "/groups/(?P[^/]*)/renew_attestation/(?P[^/]*)$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, user_id): + # We don't need to check auth here as we check the attestation signatures + + new_content = yield self.handler.on_renew_group_attestation( + origin, content, group_id, user_id + ) + + defer.returnValue((200, new_content)) + + FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, FederationPullServlet, @@ -635,11 +744,27 @@ FEDERATION_SERVLET_CLASSES = ( FederationVersionServlet, ) + ROOM_LIST_CLASSES = ( PublicRoomList, ) +GROUP_SERVER_SERVLET_CLASSES = ( + FederationGroupsProfileServlet, + FederationGroupsRoomsServlet, + FederationGroupsUsersServlet, + FederationGroupsInviteServlet, + FederationGroupsAcceptInviteServlet, + FederationGroupsRemoveUserServlet, +) + + +GROUP_ATTESTATION_SERVLET_CLASSES = ( + FederationGroupsRenewAttestaionServlet, +) + + def register_servlets(hs, resource, authenticator, ratelimiter): for servletclass in FEDERATION_SERVLET_CLASSES: servletclass( @@ -656,3 +781,19 @@ def register_servlets(hs, resource, authenticator, ratelimiter): ratelimiter=ratelimiter, server_name=hs.hostname, ).register(resource) + + for servletclass in GROUP_SERVER_SERVLET_CLASSES: + servletclass( + handler=hs.get_groups_server_handler(), + authenticator=authenticator, + ratelimiter=ratelimiter, + server_name=hs.hostname, + ).register(resource) + + for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES: + servletclass( + handler=hs.get_groups_attestation_renewer(), + authenticator=authenticator, + ratelimiter=ratelimiter, + server_name=hs.hostname, + ).register(resource) diff --git a/synapse/groups/__init__.py b/synapse/groups/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py new file mode 100644 index 0000000000..d83076a9b3 --- /dev/null +++ b/synapse/groups/attestations.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.api.errors import SynapseError +from synapse.types import get_domain_from_id +from synapse.util.logcontext import preserve_fn + +from signedjson.sign import sign_json + + +DEFAULT_ATTESTATION_LENGTH_MS = 3 * 24 * 60 * 60 * 1000 +MIN_ATTESTATION_LENGTH_MS = 1 * 60 * 60 * 1000 +UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000 + + +class GroupAttestationSigning(object): + def __init__(self, hs): + self.keyring = hs.get_keyring() + self.clock = hs.get_clock() + self.server_name = hs.hostname + self.signing_key = hs.config.signing_key[0] + + @defer.inlineCallbacks + def verify_attestation(self, attestation, group_id, user_id, server_name=None): + if not server_name: + if get_domain_from_id(group_id) == self.server_name: + server_name = get_domain_from_id(user_id) + else: + server_name = get_domain_from_id(group_id) + + if user_id != attestation["user_id"]: + raise SynapseError(400, "Attestation has incorrect user_id") + + if group_id != attestation["group_id"]: + raise SynapseError(400, "Attestation has incorrect group_id") + + valid_until_ms = attestation["valid_until_ms"] + if valid_until_ms - self.clock.time_msec() < MIN_ATTESTATION_LENGTH_MS: + raise SynapseError(400, "Attestation not valid for long enough") + + yield self.keyring.verify_json_for_server(server_name, attestation) + + def create_attestation(self, group_id, user_id): + return sign_json({ + "group_id": group_id, + "user_id": user_id, + "valid_until_ms": self.clock.time_msec() + DEFAULT_ATTESTATION_LENGTH_MS, + }, self.server_name, self.signing_key) + + +class GroupAttestionRenewer(object): + def __init__(self, hs): + self.clock = hs.get_clock() + self.store = hs.get_datastore() + self.assestations = hs.get_groups_attestation_signing() + self.transport_client = hs.get_federation_transport_client() + + self._renew_attestations_loop = self.clock.looping_call( + self._renew_attestations, 30 * 60 * 1000, + ) + + @defer.inlineCallbacks + def on_renew_attestation(self, group_id, user_id, content): + attestation = content["attestation"] + + yield self.attestations.verify_attestation( + attestation, + user_id=user_id, + group_id=group_id, + ) + + yield self.store.update_remote_attestion(group_id, user_id, attestation) + + defer.returnValue({}) + + @defer.inlineCallbacks + def _renew_attestations(self): + now = self.clock.time_msec() + + rows = yield self.store.get_attestations_need_renewals( + now + UPDATE_ATTESTATION_TIME_MS + ) + + @defer.inlineCallbacks + def _renew_attestation(self, group_id, user_id): + attestation = self.attestations.create_attestation(group_id, user_id) + + if self.hs.is_mine_id(group_id): + destination = get_domain_from_id(user_id) + else: + destination = get_domain_from_id(group_id) + + yield self.transport_client.renew_group_attestation( + destination, group_id, user_id, + content={"attestation": attestation}, + ) + + yield self.store.update_attestation_renewal( + group_id, user_id, attestation + ) + + for row in rows: + group_id = row["group_id"] + user_id = row["user_id"] + + preserve_fn(_renew_attestation)(group_id, user_id) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py new file mode 100644 index 0000000000..195f1eae54 --- /dev/null +++ b/synapse/groups/groups_server.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.api.errors import SynapseError +from synapse.types import UserID, get_domain_from_id + + +import functools +import logging + +logger = logging.getLogger(__name__) + + +# TODO: Allow users to "knock" or simpkly join depending on rules +# TODO: Federation admin APIs +# TODO: is_priveged flag to users and is_public to users and rooms +# TODO: Audit log for admins (profile updates, membership changes, users who tried +# to join but were rejected, etc) +# TODO: Flairs + + +UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000 + + +def check_group_is_ours(and_exists=False): + def g(func): + @functools.wraps(func) + @defer.inlineCallbacks + def h(self, group_id, *args, **kwargs): + if not self.is_mine_id(group_id): + raise SynapseError(400, "Group not on this server") + if and_exists: + group = yield self.store.get_group(group_id) + if not group: + raise SynapseError(404, "Unknown group") + + res = yield func(self, group_id, *args, **kwargs) + defer.returnValue(res) + + return h + return g + + +class GroupsServerHandler(object): + def __init__(self, hs): + self.hs = hs + self.store = hs.get_datastore() + self.room_list_handler = hs.get_room_list_handler() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.keyring = hs.get_keyring() + self.is_mine_id = hs.is_mine_id + self.signing_key = hs.config.signing_key[0] + self.server_name = hs.hostname + self.attestations = hs.get_groups_attestation_signing() + self.transport_client = hs.get_federation_transport_client() + + # Ensure attestations get renewed + hs.get_groups_attestation_renewer() + + @check_group_is_ours() + @defer.inlineCallbacks + def get_group_profile(self, group_id, requester_user_id): + group_description = yield self.store.get_group(group_id) + + if group_description: + defer.returnValue(group_description) + else: + raise SynapseError(404, "Unknown group") + + @check_group_is_ours(and_exists=True) + @defer.inlineCallbacks + def get_users_in_group(self, group_id, requester_user_id): + is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) + + user_results = yield self.store.get_users_in_group( + group_id, include_private=is_user_in_group, + ) + + chunk = [] + for user_result in user_results: + g_user_id = user_result["user_id"] + is_public = user_result["is_public"] + + entry = {"user_id": g_user_id} + + # TODO: Get profile information + + if not is_public: + entry["is_public"] = False + + if not self.is_mine_id(requester_user_id): + attestation = yield self.store.get_remote_attestation(group_id, g_user_id) + if not attestation: + continue + + entry["attestation"] = attestation + else: + entry["attestation"] = self.attestations.create_attestation( + group_id, g_user_id, + ) + + chunk.append(entry) + + # TODO: If admin add lists of users whose attestations have timed out + + defer.returnValue({ + "chunk": chunk, + "total_user_count_estimate": len(user_results), + }) + + @check_group_is_ours(and_exists=True) + @defer.inlineCallbacks + def get_rooms_in_group(self, group_id, requester_user_id): + is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) + + room_results = yield self.store.get_rooms_in_group( + group_id, include_private=is_user_in_group, + ) + + chunk = [] + for room_result in room_results: + room_id = room_result["room_id"] + is_public = room_result["is_public"] + + joined_users = yield self.store.get_users_in_room(room_id) + entry = yield self.room_list_handler.generate_room_entry( + room_id, len(joined_users), + with_alias=False, allow_private=True, + ) + + if not entry: + continue + + if not is_public: + entry["is_public"] = False + + chunk.append(entry) + + chunk.sort(key=lambda e: -e["num_joined_members"]) + + defer.returnValue({ + "chunk": chunk, + "total_room_count_estimate": len(room_results), + }) + + @check_group_is_ours(and_exists=True) + @defer.inlineCallbacks + def add_room(self, group_id, requester_user_id, room_id, content): + is_admin = yield self.store.is_user_admin_in_group(group_id, requester_user_id) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + # TODO: Check if room has already been added + + visibility = content.get("visibility") + if visibility: + vis_type = visibility["type"] + if vis_type not in ("public", "private"): + raise SynapseError( + 400, "Synapse only supports 'public'/'private' visibility" + ) + is_public = vis_type == "public" + else: + is_public = True + + yield self.store.add_room_to_group(group_id, room_id, is_public=is_public) + + defer.returnValue({}) + + @check_group_is_ours(and_exists=True) + @defer.inlineCallbacks + def invite_to_group(self, group_id, user_id, requester_user_id, content): + is_admin = yield self.store.is_user_admin_in_group( + group_id, requester_user_id + ) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + # TODO: Check if user knocked + # TODO: Check if user is already invited + + group = yield self.store.get_group(group_id) + content = { + "profile": { + "name": group["name"], + "avatar_url": group["avatar_url"], + }, + "inviter": requester_user_id, + } + + if self.hs.is_mine_id(user_id): + raise NotImplementedError() + else: + local_attestation = self.attestations.create_attestation(group_id, user_id) + content.update({ + "attestation": local_attestation, + }) + + res = yield self.transport_client.invite_to_group_notification( + get_domain_from_id(user_id), group_id, user_id, content + ) + + if res["state"] == "join": + if not self.hs.is_mine_id(user_id): + remote_attestation = res["attestation"] + + yield self.attestations.verify_attestation( + remote_attestation, + user_id=user_id, + group_id=group_id, + ) + else: + remote_attestation = None + + yield self.store.add_user_to_group( + group_id, user_id, + is_admin=False, + is_public=False, # TODO + local_attestation=local_attestation, + remote_attestation=remote_attestation, + ) + elif res["state"] == "invite": + yield self.store.add_group_invite( + group_id, user_id, + ) + defer.returnValue({ + "state": "invite" + }) + elif res["state"] == "reject": + defer.returnValue({ + "state": "reject" + }) + else: + raise SynapseError(502, "Unknown state returned by HS") + + @check_group_is_ours(and_exists=True) + @defer.inlineCallbacks + def accept_invite(self, group_id, user_id, content): + if not self.store.is_user_invited_to_local_group(group_id, user_id): + raise SynapseError(403, "User not invited to group") + + if not self.hs.is_mine_id(user_id): + remote_attestation = content["attestation"] + + yield self.attestations.verify_attestation( + remote_attestation, + user_id=user_id, + group_id=group_id, + ) + else: + remote_attestation = None + + local_attestation = self.attestations.create_attestation(group_id, user_id) + + visibility = content.get("visibility") + if visibility: + vis_type = visibility["type"] + if vis_type not in ("public", "private"): + raise SynapseError( + 400, "Synapse only supports 'public'/'private' visibility" + ) + is_public = vis_type == "public" + else: + is_public = True + + yield self.store.add_user_to_group( + group_id, user_id, + is_admin=False, + is_public=is_public, + local_attestation=local_attestation, + remote_attestation=remote_attestation, + ) + + defer.returnValue({ + "state": "join", + "attestation": local_attestation, + }) + + @check_group_is_ours(and_exists=True) + @defer.inlineCallbacks + def knock(self, group_id, user_id, content): + pass + + @check_group_is_ours(and_exists=True) + @defer.inlineCallbacks + def accept_knock(self, group_id, user_id, content): + pass + + @check_group_is_ours(and_exists=True) + @defer.inlineCallbacks + def remove_user_from_group(self, group_id, user_id, requester_user_id, content): + is_kick = False + if requester_user_id != user_id: + is_admin = yield self.store.is_user_admin_in_group( + group_id, requester_user_id + ) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + is_kick = True + + yield self.store.remove_user_to_group( + group_id, user_id, + ) + + if is_kick: + if self.hs.is_mine_id(user_id): + raise NotImplementedError() + else: + yield self.transport_client.remove_user_from_group_notification( + get_domain_from_id(user_id), group_id, user_id, {} + ) + + defer.returnValue({}) + + @check_group_is_ours() + @defer.inlineCallbacks + def create_group(self, group_id, user_id, content): + logger.info("Attempting to create group with ID: %r", group_id) + group = yield self.store.get_group(group_id) + if group: + raise SynapseError(400, "Group already exists") + + is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) + if not is_admin and not group_id.startswith("+u/"): + raise SynapseError(403, "Group ID must start with '+u/' or be a server admin") + + profile = content.get("profile", {}) + name = profile.get("name") + avatar_url = profile.get("avatar_url") + short_description = profile.get("short_description") + long_description = profile.get("long_description") + + yield self.store.create_group( + group_id, + user_id, + name=name, + avatar_url=avatar_url, + short_description=short_description, + long_description=long_description, + ) + + if not self.hs.is_mine_id(user_id): + remote_attestation = content["attestation"] + + yield self.attestations.verify_attestation( + remote_attestation, + user_id=user_id, + group_id=group_id, + ) + + local_attestation = self.attestations.create_attestation(group_id, user_id) + else: + local_attestation = None + remote_attestation = None + + yield self.store.add_user_to_group( + group_id, user_id, + is_admin=True, + is_public=True, # TODO + local_attestation=local_attestation, + remote_attestation=remote_attestation, + ) + + defer.returnValue({ + "group_id": group_id, + }) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 516cd9a6ac..41e1781df7 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -276,13 +276,14 @@ class RoomListHandler(BaseHandler): # We've already got enough, so lets just drop it. return - result = yield self._generate_room_entry(room_id, num_joined_users) + result = yield self.generate_room_entry(room_id, num_joined_users) if result and _matches_room_entry(result, search_filter): chunk.append(result) @cachedInlineCallbacks(num_args=1, cache_context=True) - def _generate_room_entry(self, room_id, num_joined_users, cache_context): + def generate_room_entry(self, room_id, num_joined_users, cache_context, + with_alias=True, allow_private=False): """Returns the entry for a room """ result = { @@ -316,14 +317,15 @@ class RoomListHandler(BaseHandler): join_rules_event = current_state.get((EventTypes.JoinRules, "")) if join_rules_event: join_rule = join_rules_event.content.get("join_rule", None) - if join_rule and join_rule != JoinRules.PUBLIC: + if not allow_private and join_rule and join_rule != JoinRules.PUBLIC: defer.returnValue(None) - aliases = yield self.store.get_aliases_for_room( - room_id, on_invalidate=cache_context.invalidate - ) - if aliases: - result["aliases"] = aliases + if with_alias: + aliases = yield self.store.get_aliases_for_room( + room_id, on_invalidate=cache_context.invalidate + ) + if aliases: + result["aliases"] = aliases name_event = yield current_state.get((EventTypes.Name, "")) if name_event: diff --git a/synapse/http/server.py b/synapse/http/server.py index 7ef3d526b1..8a27e3b422 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -145,7 +145,9 @@ def wrap_request_handler(request_handler, include_metrics=False): "error": "Internal server error", "errcode": Codes.UNKNOWN, }, - send_cors=True + send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + version_string=self.version_string, ) finally: try: diff --git a/synapse/server.py b/synapse/server.py index a38e5179e0..d857cca848 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -50,6 +50,8 @@ from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.user_directory import UserDirectoyHandler +from synapse.groups.groups_server import GroupsServerHandler +from synapse.groups.attestations import GroupAttestionRenewer, GroupAttestationSigning from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.notifier import Notifier @@ -139,6 +141,9 @@ class HomeServer(object): 'read_marker_handler', 'action_generator', 'user_directory_handler', + 'groups_server_handler', + 'groups_attestation_signing', + 'groups_attestation_renewer', ] def __init__(self, hostname, **kwargs): @@ -309,6 +314,15 @@ class HomeServer(object): def build_user_directory_handler(self): return UserDirectoyHandler(self) + def build_groups_server_handler(self): + return GroupsServerHandler(self) + + def build_groups_attestation_signing(self): + return GroupAttestationSigning(self) + + def build_groups_attestation_renewer(self): + return GroupAttestionRenewer(self) + def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index b92472df33..fdee9f1ad5 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -37,7 +37,7 @@ from .media_repository import MediaRepositoryStore from .rejections import RejectionsStore from .event_push_actions import EventPushActionsStore from .deviceinbox import DeviceInboxStore - +from .group_server import GroupServerStore from .state import StateStore from .signatures import SignatureStore from .filtering import FilteringStore @@ -88,6 +88,7 @@ class DataStore(RoomMemberStore, RoomStore, DeviceStore, DeviceInboxStore, UserDirectoryStore, + GroupServerStore, ): def __init__(self, db_conn, hs): diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py new file mode 100644 index 0000000000..01d9a982c8 --- /dev/null +++ b/synapse/storage/group_server.py @@ -0,0 +1,280 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from ._base import SQLBaseStore + +import ujson as json + + +class GroupServerStore(SQLBaseStore): + def get_group(self, group_id): + return self._simple_select_one( + table="groups", + keyvalues={ + "group_id": group_id, + }, + retcols=("name", "short_description", "long_description", "avatar_url",), + allow_none=True, + desc="is_user_in_group", + ) + + def get_users_in_group(self, group_id, include_private=False): + # TODO: Pagination + + keyvalues = { + "group_id": group_id, + } + if not include_private: + keyvalues["is_public"] = True + + return self._simple_select_list( + table="group_users", + keyvalues=keyvalues, + retcols=("user_id", "is_public",), + desc="get_users_in_group", + ) + + def get_rooms_in_group(self, group_id, include_private=False): + # TODO: Pagination + + keyvalues = { + "group_id": group_id, + } + if not include_private: + keyvalues["is_public"] = True + + return self._simple_select_list( + table="group_rooms", + keyvalues=keyvalues, + retcols=("room_id", "is_public",), + desc="get_rooms_in_group", + ) + + def is_user_in_group(self, user_id, group_id): + return self._simple_select_one_onecol( + table="group_users", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + retcol="user_id", + allow_none=True, + desc="is_user_in_group", + ).addCallback(lambda r: bool(r)) + + def is_user_admin_in_group(self, group_id, user_id): + return self._simple_select_one_onecol( + table="group_users", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + retcol="is_admin", + allow_none=True, + desc="is_user_adim_in_group", + ) + + def add_group_invite(self, group_id, user_id): + return self._simple_insert( + table="group_invites", + values={ + "group_id": group_id, + "user_id": user_id, + }, + desc="add_group_invite", + ) + + def is_user_invited_to_local_group(self, group_id, user_id): + return self._simple_select_one_onecol( + table="group_invites", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + retcol="user_id", + desc="is_user_invited_to_local_group", + allow_none=True, + ) + + def add_user_to_group(self, group_id, user_id, is_admin=False, is_public=True, + local_attestation=None, remote_attestation=None): + def _add_user_to_group_txn(txn): + self._simple_insert_txn( + txn, + table="group_users", + values={ + "group_id": group_id, + "user_id": user_id, + "is_admin": is_admin, + "is_public": is_public, + }, + ) + + self._simple_delete_txn( + txn, + table="group_invites", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + + if local_attestation: + self._simple_insert_txn( + txn, + table="group_attestations_renewals", + values={ + "group_id": group_id, + "user_id": user_id, + "valid_until_ms": local_attestation["valid_until_ms"], + }, + ) + if remote_attestation: + self._simple_insert_txn( + txn, + table="group_attestations_remote", + values={ + "group_id": group_id, + "user_id": user_id, + "valid_until_ms": remote_attestation["valid_until_ms"], + "attestation": json.dumps(remote_attestation), + }, + ) + + return self.runInteraction( + "add_user_to_group", _add_user_to_group_txn + ) + + def remove_user_to_group(self, group_id, user_id): + def _remove_user_to_group_txn(txn): + self._simple_delete_txn( + txn, + table="group_users", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + self._simple_delete_txn( + txn, + table="group_invites", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + self._simple_delete_txn( + txn, + table="group_attestations_renewals", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + self._simple_delete_txn( + txn, + table="group_attestations_remote", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + return self.runInteraction("remove_user_to_group", _remove_user_to_group_txn) + + def add_room_to_group(self, group_id, room_id, is_public): + return self._simple_insert( + table="group_rooms", + values={ + "group_id": group_id, + "room_id": room_id, + "is_public": is_public, + }, + desc="add_room_to_group", + ) + + @defer.inlineCallbacks + def create_group(self, group_id, user_id, name, avatar_url, short_description, + long_description,): + yield self._simple_insert( + table="groups", + values={ + "group_id": group_id, + "name": name, + "avatar_url": avatar_url, + "short_description": short_description, + "long_description": long_description, + }, + desc="create_group", + ) + + def get_attestations_need_renewals(self, valid_until_ms): + def _get_attestations_need_renewals_txn(txn): + sql = """ + SELECT group_id, user_id FROM group_attestations_renewals + WHERE valid_until_ms <= ? + """ + txn.execute(sql, (valid_until_ms,)) + return self.cursor_to_dict(txn) + return self.runInteraction( + "get_attestations_need_renewals", _get_attestations_need_renewals_txn + ) + + def update_attestation_renewal(self, group_id, user_id, attestation): + return self._simple_update_one( + table="group_attestations_renewals", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + updatevalues={ + "valid_until_ms": attestation["valid_until_ms"], + }, + desc="update_attestation_renewal", + ) + + def update_remote_attestion(self, group_id, user_id, attestation): + return self._simple_update_one( + table="group_attestations_remote", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + updatevalues={ + "valid_until_ms": attestation["valid_until_ms"], + "attestation": json.dumps(attestation) + }, + desc="update_remote_attestion", + ) + + @defer.inlineCallbacks + def get_remote_attestation(self, group_id, user_id): + row = yield self._simple_select_one( + table="group_attestations_remote", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + retcols=("valid_until_ms", "attestation"), + desc="get_remote_attestation", + allow_none=True, + ) + + now = int(self._clock.time_msec()) + if row and now < row["valid_until_ms"]: + defer.returnValue(json.loads(row["attestation"])) + + defer.returnValue(None) diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql new file mode 100644 index 0000000000..6f1a941990 --- /dev/null +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -0,0 +1,77 @@ +/* Copyright 2017 Vector Creations Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE groups ( + group_id TEXT NOT NULL, + name TEXT, + avatar_url TEXT, + short_description TEXT, + long_description TEXT +); + +CREATE UNIQUE INDEX groups_idx ON groups(group_id); + + +CREATE TABLE group_users ( + group_id TEXT NOT NULL, + user_id TEXT NOT NULL, + is_admin BOOLEAN NOT NULL, + is_public BOOLEAN NOT NULL +); + + +CREATE INDEX groups_users_g_idx ON group_users(group_id, user_id); +CREATE INDEX groups_users_u_idx ON group_users(user_id); + + +CREATE TABLE group_invites ( + group_id TEXT NOT NULL, + user_id TEXT NOT NULL +); + +CREATE INDEX groups_invites_g_idx ON group_invites(group_id, user_id); +CREATE INDEX groups_invites_u_idx ON group_invites(user_id); + + +CREATE TABLE group_rooms ( + group_id TEXT NOT NULL, + room_id TEXT NOT NULL, + is_public BOOLEAN NOT NULL +); + +CREATE INDEX groups_rooms_g_idx ON group_rooms(group_id, room_id); +CREATE INDEX groups_rooms_r_idx ON group_rooms(room_id); + + +CREATE TABLE group_attestations_renewals ( + group_id TEXT NOT NULL, + user_id TEXT NOT NULL, + valid_until_ms BIGINT NOT NULL +); + +CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals(group_id, user_id); +CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals(user_id); +CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals(valid_until_ms); + +CREATE TABLE group_attestations_remote ( + group_id TEXT NOT NULL, + user_id TEXT NOT NULL, + valid_until_ms BIGINT NOT NULL, + attestation TEXT NOT NULL +); + +CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote(group_id, user_id); +CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote(user_id); +CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote(valid_until_ms); From 2f82de18eec5b9457ce31d95a080bc8b0fe8e139 Mon Sep 17 00:00:00 2001 From: Krombel Date: Mon, 10 Jul 2017 17:34:58 +0200 Subject: [PATCH 0013/1637] fix test --- synapse/rest/client/v2_alpha/sync.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index fc4d7d7dff..31db47eba0 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -164,7 +164,9 @@ class SyncRestServlet(RestServlet): ) time_now = self.clock.time_msec() - response_content = self.encode_response(time_now, sync_result, requester.access_token_id, filter) + response_content = self.encode_response( + time_now, sync_result, requester.access_token_id, filter + ) defer.returnValue((200, response_content)) From 9a6fd3ef29cc66d785436acce96b15ca83aa99a8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 10:02:21 +0100 Subject: [PATCH 0014/1637] Don't compute push actions for backfilled events --- synapse/handlers/federation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 694b820d85..b790a7c2ef 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1413,7 +1413,7 @@ class FederationHandler(BaseHandler): auth_events=auth_events, ) - if not event.internal_metadata.is_outlier(): + if not event.internal_metadata.is_outlier() and not backfilled: yield self.action_generator.handle_push_actions_for_event( event, context ) From 925b3638ff3e47f2fc02e178cd480cce5e934da9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 10:04:21 +0100 Subject: [PATCH 0015/1637] Reduce log levels in tcp replication --- synapse/replication/tcp/protocol.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 062272f8dd..d59503b905 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -244,7 +244,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): becoming full. """ if self.state == ConnectionStates.CLOSED: - logger.info("[%s] Not sending, connection closed", self.id()) + logger.debug("[%s] Not sending, connection closed", self.id()) return if do_buffer and self.state != ConnectionStates.ESTABLISHED: @@ -264,7 +264,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): def _queue_command(self, cmd): """Queue the command until the connection is ready to write to again. """ - logger.info("[%s] Queing as conn %r, cmd: %r", self.id(), self.state, cmd) + logger.debug("[%s] Queing as conn %r, cmd: %r", self.id(), self.state, cmd) self.pending_commands.append(cmd) if len(self.pending_commands) > self.max_line_buffer: From 83936293eb3ddb8998191b537153eaeec5e7afb0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 09:58:59 +0100 Subject: [PATCH 0016/1637] Comments --- synapse/groups/attestations.py | 29 ++++- synapse/groups/groups_server.py | 108 ++++++++++++------ synapse/storage/group_server.py | 32 +++++- .../storage/schema/delta/43/group_server.sql | 6 +- 4 files changed, 132 insertions(+), 43 deletions(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index d83076a9b3..6937fa44cf 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -28,6 +28,8 @@ UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000 class GroupAttestationSigning(object): + """Creates and verifies group attestations. + """ def __init__(self, hs): self.keyring = hs.get_keyring() self.clock = hs.get_clock() @@ -36,11 +38,20 @@ class GroupAttestationSigning(object): @defer.inlineCallbacks def verify_attestation(self, attestation, group_id, user_id, server_name=None): + """Verifies that the given attestation matches the given paramaters. + + An optional server_name can be supplied to explicitly set which server's + signature is expected. Otherwise assumes that either the group_id or user_id + is local and uses the other's server as the one to check. + """ + if not server_name: if get_domain_from_id(group_id) == self.server_name: server_name = get_domain_from_id(user_id) - else: + elif get_domain_from_id(user_id) == self.server_name: server_name = get_domain_from_id(group_id) + else: + raise Exception("Expected eitehr group_id or user_id to be local") if user_id != attestation["user_id"]: raise SynapseError(400, "Attestation has incorrect user_id") @@ -48,6 +59,7 @@ class GroupAttestationSigning(object): if group_id != attestation["group_id"]: raise SynapseError(400, "Attestation has incorrect group_id") + # TODO: valid_until_ms = attestation["valid_until_ms"] if valid_until_ms - self.clock.time_msec() < MIN_ATTESTATION_LENGTH_MS: raise SynapseError(400, "Attestation not valid for long enough") @@ -55,6 +67,9 @@ class GroupAttestationSigning(object): yield self.keyring.verify_json_for_server(server_name, attestation) def create_attestation(self, group_id, user_id): + """Create an attestation for the group_id and user_id with default + validity length. + """ return sign_json({ "group_id": group_id, "user_id": user_id, @@ -63,11 +78,15 @@ class GroupAttestationSigning(object): class GroupAttestionRenewer(object): + """Responsible for sending and receiving attestation updates. + """ + def __init__(self, hs): self.clock = hs.get_clock() self.store = hs.get_datastore() self.assestations = hs.get_groups_attestation_signing() self.transport_client = hs.get_federation_transport_client() + self.is_mine_id = hs.is_mind_id self._renew_attestations_loop = self.clock.looping_call( self._renew_attestations, 30 * 60 * 1000, @@ -75,8 +94,13 @@ class GroupAttestionRenewer(object): @defer.inlineCallbacks def on_renew_attestation(self, group_id, user_id, content): + """When a remote updates an attestation + """ attestation = content["attestation"] + if not self.is_mine_id(group_id) and not self.is_mine_id(user_id): + raise SynapseError(400, "Neither user not group are on this server") + yield self.attestations.verify_attestation( attestation, user_id=user_id, @@ -89,6 +113,9 @@ class GroupAttestionRenewer(object): @defer.inlineCallbacks def _renew_attestations(self): + """Called periodically to check if we need to update any of our attestations + """ + now = self.clock.time_msec() rows = yield self.store.get_attestations_need_renewals( diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 195f1eae54..44083100f7 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -19,7 +19,6 @@ from synapse.api.errors import SynapseError from synapse.types import UserID, get_domain_from_id -import functools import logging logger = logging.getLogger(__name__) @@ -33,28 +32,6 @@ logger = logging.getLogger(__name__) # TODO: Flairs -UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000 - - -def check_group_is_ours(and_exists=False): - def g(func): - @functools.wraps(func) - @defer.inlineCallbacks - def h(self, group_id, *args, **kwargs): - if not self.is_mine_id(group_id): - raise SynapseError(400, "Group not on this server") - if and_exists: - group = yield self.store.get_group(group_id) - if not group: - raise SynapseError(404, "Unknown group") - - res = yield func(self, group_id, *args, **kwargs) - defer.returnValue(res) - - return h - return g - - class GroupsServerHandler(object): def __init__(self, hs): self.hs = hs @@ -72,9 +49,28 @@ class GroupsServerHandler(object): # Ensure attestations get renewed hs.get_groups_attestation_renewer() - @check_group_is_ours() + @defer.inlineCallbacks + def check_group_is_ours(self, group_id, and_exists=False): + """Check that the group is ours, and optionally if it exists. + + If group does exist then return group. + """ + if not self.is_mine_id(group_id): + raise SynapseError(400, "Group not on this server") + + group = yield self.store.get_group(group_id) + if and_exists and not group: + raise SynapseError(404, "Unknown group") + + defer.returnValue(group) + @defer.inlineCallbacks def get_group_profile(self, group_id, requester_user_id): + """Get the group profile as seen by requester_user_id + """ + + yield self.check_group_is_ours(group_id) + group_description = yield self.store.get_group(group_id) if group_description: @@ -82,9 +78,13 @@ class GroupsServerHandler(object): else: raise SynapseError(404, "Unknown group") - @check_group_is_ours(and_exists=True) @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): + """Get the users in group as seen by requester_user_id + """ + + yield self.check_group_is_ours(group_id, and_exists=True) + is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) user_results = yield self.store.get_users_in_group( @@ -123,9 +123,13 @@ class GroupsServerHandler(object): "total_user_count_estimate": len(user_results), }) - @check_group_is_ours(and_exists=True) @defer.inlineCallbacks def get_rooms_in_group(self, group_id, requester_user_id): + """Get the rooms in group as seen by requester_user_id + """ + + yield self.check_group_is_ours(group_id, and_exists=True) + is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) room_results = yield self.store.get_rooms_in_group( @@ -158,9 +162,13 @@ class GroupsServerHandler(object): "total_room_count_estimate": len(room_results), }) - @check_group_is_ours(and_exists=True) @defer.inlineCallbacks def add_room(self, group_id, requester_user_id, room_id, content): + """Add room to group + """ + + yield self.check_group_is_ours(group_id, and_exists=True) + is_admin = yield self.store.is_user_admin_in_group(group_id, requester_user_id) if not is_admin: raise SynapseError(403, "User is not admin in group") @@ -182,9 +190,13 @@ class GroupsServerHandler(object): defer.returnValue({}) - @check_group_is_ours(and_exists=True) @defer.inlineCallbacks def invite_to_group(self, group_id, user_id, requester_user_id, content): + """Invite user to group + """ + + group = yield self.check_group_is_ours(group_id, and_exists=True) + is_admin = yield self.store.is_user_admin_in_group( group_id, requester_user_id ) @@ -194,7 +206,6 @@ class GroupsServerHandler(object): # TODO: Check if user knocked # TODO: Check if user is already invited - group = yield self.store.get_group(group_id) content = { "profile": { "name": group["name"], @@ -248,9 +259,16 @@ class GroupsServerHandler(object): else: raise SynapseError(502, "Unknown state returned by HS") - @check_group_is_ours(and_exists=True) @defer.inlineCallbacks def accept_invite(self, group_id, user_id, content): + """User tries to accept an invite to the group. + + This is different from them asking to join, and so should error if no + invite exists (and they're not a member of the group) + """ + + yield self.check_group_is_ours(group_id, and_exists=True) + if not self.store.is_user_invited_to_local_group(group_id, user_id): raise SynapseError(403, "User not invited to group") @@ -291,19 +309,33 @@ class GroupsServerHandler(object): "attestation": local_attestation, }) - @check_group_is_ours(and_exists=True) @defer.inlineCallbacks def knock(self, group_id, user_id, content): - pass + """A user requests becoming a member of the group + """ + yield self.check_group_is_ours(group_id, and_exists=True) + + raise NotImplementedError() - @check_group_is_ours(and_exists=True) @defer.inlineCallbacks def accept_knock(self, group_id, user_id, content): - pass + """Accept a users knock to the room. + + Errors if the user hasn't knocked, rather than inviting them. + """ + + yield self.check_group_is_ours(group_id, and_exists=True) + + raise NotImplementedError() - @check_group_is_ours(and_exists=True) @defer.inlineCallbacks def remove_user_from_group(self, group_id, user_id, requester_user_id, content): + """Remove a user from the group; either a user is leaving or and admin + kicked htem. + """ + + yield self.check_group_is_ours(group_id, and_exists=True) + is_kick = False if requester_user_id != user_id: is_admin = yield self.store.is_user_admin_in_group( @@ -314,7 +346,7 @@ class GroupsServerHandler(object): is_kick = True - yield self.store.remove_user_to_group( + yield self.store.remove_user_from_group( group_id, user_id, ) @@ -328,11 +360,11 @@ class GroupsServerHandler(object): defer.returnValue({}) - @check_group_is_ours() @defer.inlineCallbacks def create_group(self, group_id, user_id, content): + group = yield self.check_group_is_ours(group_id) + logger.info("Attempting to create group with ID: %r", group_id) - group = yield self.store.get_group(group_id) if group: raise SynapseError(400, "Group already exists") diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 01d9a982c8..327d770862 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -89,6 +89,8 @@ class GroupServerStore(SQLBaseStore): ) def add_group_invite(self, group_id, user_id): + """Record that the group server has invited a user + """ return self._simple_insert( table="group_invites", values={ @@ -99,6 +101,8 @@ class GroupServerStore(SQLBaseStore): ) def is_user_invited_to_local_group(self, group_id, user_id): + """Has the group server invited a user? + """ return self._simple_select_one_onecol( table="group_invites", keyvalues={ @@ -112,6 +116,19 @@ class GroupServerStore(SQLBaseStore): def add_user_to_group(self, group_id, user_id, is_admin=False, is_public=True, local_attestation=None, remote_attestation=None): + """Add a user to the group server. + + Args: + group_id (str) + user_id (str) + is_admin (bool) + is_public (bool) + local_attestation (dict): The attestation the GS created to give + to the remote server. Optional if the user and group are on the + same server + remote_attestation (dict): The attestation given to GS by remote + server. Optional if the user and group are on the same server + """ def _add_user_to_group_txn(txn): self._simple_insert_txn( txn, @@ -159,8 +176,8 @@ class GroupServerStore(SQLBaseStore): "add_user_to_group", _add_user_to_group_txn ) - def remove_user_to_group(self, group_id, user_id): - def _remove_user_to_group_txn(txn): + def remove_user_from_group(self, group_id, user_id): + def _remove_user_from_group_txn(txn): self._simple_delete_txn( txn, table="group_users", @@ -193,7 +210,7 @@ class GroupServerStore(SQLBaseStore): "user_id": user_id, }, ) - return self.runInteraction("remove_user_to_group", _remove_user_to_group_txn) + return self.runInteraction("remove_user_from_group", _remove_user_from_group_txn) def add_room_to_group(self, group_id, room_id, is_public): return self._simple_insert( @@ -222,6 +239,8 @@ class GroupServerStore(SQLBaseStore): ) def get_attestations_need_renewals(self, valid_until_ms): + """Get all attestations that need to be renewed until givent time + """ def _get_attestations_need_renewals_txn(txn): sql = """ SELECT group_id, user_id FROM group_attestations_renewals @@ -234,6 +253,8 @@ class GroupServerStore(SQLBaseStore): ) def update_attestation_renewal(self, group_id, user_id, attestation): + """Update an attestation that we have renewed + """ return self._simple_update_one( table="group_attestations_renewals", keyvalues={ @@ -247,6 +268,8 @@ class GroupServerStore(SQLBaseStore): ) def update_remote_attestion(self, group_id, user_id, attestation): + """Update an attestation that a remote has renewed + """ return self._simple_update_one( table="group_attestations_remote", keyvalues={ @@ -262,6 +285,9 @@ class GroupServerStore(SQLBaseStore): @defer.inlineCallbacks def get_remote_attestation(self, group_id, user_id): + """Get the attestation that proves the remote agrees that the user is + in the group. + """ row = yield self._simple_select_one( table="group_attestations_remote", keyvalues={ diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index 6f1a941990..5dc7a497e2 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -24,6 +24,7 @@ CREATE TABLE groups ( CREATE UNIQUE INDEX groups_idx ON groups(group_id); +-- list of users the group server thinks are joined CREATE TABLE group_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, @@ -35,7 +36,7 @@ CREATE TABLE group_users ( CREATE INDEX groups_users_g_idx ON group_users(group_id, user_id); CREATE INDEX groups_users_u_idx ON group_users(user_id); - +-- list of users the group server thinks are invited CREATE TABLE group_invites ( group_id TEXT NOT NULL, user_id TEXT NOT NULL @@ -55,6 +56,7 @@ CREATE INDEX groups_rooms_g_idx ON group_rooms(group_id, room_id); CREATE INDEX groups_rooms_r_idx ON group_rooms(room_id); +-- List of attestations we've given out and need to renew CREATE TABLE group_attestations_renewals ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, @@ -65,6 +67,8 @@ CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals(gr CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals(user_id); CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals(valid_until_ms); + +-- List of attestations we've received from remotes and are interested in. CREATE TABLE group_attestations_remote ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, From 429925a5e9d24bef0533d936d2bca8a149b2ad1c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 11:44:08 +0100 Subject: [PATCH 0017/1637] Lift out visibility parsing --- synapse/groups/groups_server.py | 41 +++++++++++++++++---------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 44083100f7..1ac946abc3 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -175,16 +175,7 @@ class GroupsServerHandler(object): # TODO: Check if room has already been added - visibility = content.get("visibility") - if visibility: - vis_type = visibility["type"] - if vis_type not in ("public", "private"): - raise SynapseError( - 400, "Synapse only supports 'public'/'private' visibility" - ) - is_public = vis_type == "public" - else: - is_public = True + is_public = _parse_visibility_from_contents(content) yield self.store.add_room_to_group(group_id, room_id, is_public=is_public) @@ -285,16 +276,7 @@ class GroupsServerHandler(object): local_attestation = self.attestations.create_attestation(group_id, user_id) - visibility = content.get("visibility") - if visibility: - vis_type = visibility["type"] - if vis_type not in ("public", "private"): - raise SynapseError( - 400, "Synapse only supports 'public'/'private' visibility" - ) - is_public = vis_type == "public" - else: - is_public = True + is_public = _parse_visibility_from_contents(content) yield self.store.add_user_to_group( group_id, user_id, @@ -412,3 +394,22 @@ class GroupsServerHandler(object): defer.returnValue({ "group_id": group_id, }) + + +def _parse_visibility_from_contents(content): + """Given a content for a request parse out whether the entity should be + public or not + """ + + visibility = content.get("visibility") + if visibility: + vis_type = visibility["type"] + if vis_type not in ("public", "private"): + raise SynapseError( + 400, "Synapse only supports 'public'/'private' visibility" + ) + is_public = vis_type == "public" + else: + is_public = True + + return is_public From 8ba89f1050c523506b99be376b9b052fd68e5bd5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 11:45:32 +0100 Subject: [PATCH 0018/1637] Remove u/ requirement --- synapse/groups/groups_server.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 1ac946abc3..61fe0d49d9 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -350,9 +350,10 @@ class GroupsServerHandler(object): if group: raise SynapseError(400, "Group already exists") + # TODO: Add config to enforce that only server admins can create rooms is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) - if not is_admin and not group_id.startswith("+u/"): - raise SynapseError(403, "Group ID must start with '+u/' or be a server admin") + if not is_admin: + raise SynapseError(403, "Only server admin can create group on this server") profile = content.get("profile", {}) name = profile.get("name") From 6322fbbd41b3a44bc67982fd56999c317df08c08 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 11:52:03 +0100 Subject: [PATCH 0019/1637] Comment --- synapse/federation/transport/client.py | 12 ++++++++++++ synapse/federation/transport/server.py | 14 ++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 17b93a28ab..d0f8da7516 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -474,6 +474,10 @@ class TransportLayerClient(object): @log_function def invite_to_group_notification(self, destination, group_id, user_id, content): + """Sent by group server to inform a user's server that they have been + invited. + """ + path = PREFIX + "/groups/local/%s/users/%s/invite" % (group_id, user_id) return self.client.post_json( @@ -486,6 +490,10 @@ class TransportLayerClient(object): @log_function def remove_user_from_group_notification(self, destination, group_id, user_id, content): + """Sent by group server to inform a user's server that they have been + kicked from the group. + """ + path = PREFIX + "/groups/local/%s/users/%s/remove" % (group_id, user_id) return self.client.post_json( @@ -497,6 +505,10 @@ class TransportLayerClient(object): @log_function def renew_group_attestation(self, destination, group_id, user_id, content): + """Sent by either a group server or a user's server to periodically update + the attestations + """ + path = PREFIX + "/groups/%s/renew_attestation/%s" % (group_id, user_id) return self.client.post_json( diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index e6b0f432fc..2286f6f8fd 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -610,6 +610,8 @@ class FederationVersionServlet(BaseFederationServlet): class FederationGroupsProfileServlet(BaseFederationServlet): + """Get the basic profile of a group on behalf of a user + """ PATH = "/groups/(?P[^/]*)/profile$" @defer.inlineCallbacks @@ -626,6 +628,8 @@ class FederationGroupsProfileServlet(BaseFederationServlet): class FederationGroupsRoomsServlet(BaseFederationServlet): + """Get the rooms in a group on behalf of a user + """ PATH = "/groups/(?P[^/]*)/rooms$" @defer.inlineCallbacks @@ -642,6 +646,8 @@ class FederationGroupsRoomsServlet(BaseFederationServlet): class FederationGroupsUsersServlet(BaseFederationServlet): + """Get the users in a group on behalf of a user + """ PATH = "/groups/(?P[^/]*)/users$" @defer.inlineCallbacks @@ -658,6 +664,8 @@ class FederationGroupsUsersServlet(BaseFederationServlet): class FederationGroupsInviteServlet(BaseFederationServlet): + """Ask a group server to invite someone to the group + """ PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/invite$" @defer.inlineCallbacks @@ -674,6 +682,8 @@ class FederationGroupsInviteServlet(BaseFederationServlet): class FederationGroupsAcceptInviteServlet(BaseFederationServlet): + """Accept an invitation from the group server + """ PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/accept_invite$" @defer.inlineCallbacks @@ -689,6 +699,8 @@ class FederationGroupsAcceptInviteServlet(BaseFederationServlet): class FederationGroupsRemoveUserServlet(BaseFederationServlet): + """Leave or kick a user from the group + """ PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/remove$" @defer.inlineCallbacks @@ -705,6 +717,8 @@ class FederationGroupsRemoveUserServlet(BaseFederationServlet): class FederationGroupsRenewAttestaionServlet(BaseFederationServlet): + """A group or user's server renews their attestation + """ PATH = "/groups/(?P[^/]*)/renew_attestation/(?P[^/]*)$" @defer.inlineCallbacks From 85b9f76f1dbc03b6649b267d307cd0b8f493bc6a Mon Sep 17 00:00:00 2001 From: Krombel Date: Tue, 11 Jul 2017 13:14:35 +0200 Subject: [PATCH 0020/1637] split out reducing stuff; just make encode_* static --- synapse/rest/client/v2_alpha/sync.py | 62 ++++++++++++---------------- 1 file changed, 27 insertions(+), 35 deletions(-) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 31db47eba0..6dcc407451 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -172,45 +172,37 @@ class SyncRestServlet(RestServlet): @staticmethod def encode_response(time_now, sync_result, access_token_id, filter): - response = { + joined = SyncRestServlet.encode_joined( + sync_result.joined, time_now, access_token_id, filter.event_fields + ) + + invited = SyncRestServlet.encode_invited( + sync_result.invited, time_now, access_token_id, + ) + + archived = SyncRestServlet.encode_archived( + sync_result.archived, time_now, access_token_id, + filter.event_fields, + ) + + return { + "account_data": {"events": sync_result.account_data}, + "to_device": {"events": sync_result.to_device}, + "device_lists": { + "changed": list(sync_result.device_lists), + }, + "presence": SyncRestServlet.encode_presence( + sync_result.presence, time_now + ), + "rooms": { + "join": joined, + "invite": invited, + "leave": archived, + }, "device_one_time_keys_count": sync_result.device_one_time_keys_count, "next_batch": sync_result.next_batch.to_string(), } - if sync_result.account_data: - response["account_data"] = {"events": sync_result.account_data} - if sync_result.to_device: - response["to_device"] = {"events": sync_result.to_device} - if sync_result.device_lists: - response["device_lists"] = { - "changed": list(sync_result.device_lists), - } - - if sync_result.presence: - response["presence"] = SyncRestServlet.encode_presence( - sync_result.presence, time_now - ) - - rooms = {} - if sync_result.joined: - rooms["join"] = SyncRestServlet.encode_joined( - sync_result.joined, time_now, access_token_id, filter.event_fields - ) - if sync_result.invited: - rooms["invite"] = SyncRestServlet.encode_invited( - sync_result.invited, time_now, access_token_id - ) - if sync_result.archived: - rooms["leave"] = SyncRestServlet.encode_archived( - sync_result.archived, time_now, access_token_id, - filter.event_fields, - ) - - if rooms: - response["rooms"] = rooms - - return response - @staticmethod def encode_presence(events, time_now): return { From 0aac30d53b1dba2f399cad0044a905286d8c79d2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 14:23:50 +0100 Subject: [PATCH 0021/1637] Comments --- synapse/groups/attestations.py | 4 ++-- synapse/groups/groups_server.py | 6 +++++- synapse/storage/schema/delta/43/group_server.sql | 4 ++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 6937fa44cf..0741b55c1c 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -38,7 +38,7 @@ class GroupAttestationSigning(object): @defer.inlineCallbacks def verify_attestation(self, attestation, group_id, user_id, server_name=None): - """Verifies that the given attestation matches the given paramaters. + """Verifies that the given attestation matches the given parameters. An optional server_name can be supplied to explicitly set which server's signature is expected. Otherwise assumes that either the group_id or user_id @@ -51,7 +51,7 @@ class GroupAttestationSigning(object): elif get_domain_from_id(user_id) == self.server_name: server_name = get_domain_from_id(group_id) else: - raise Exception("Expected eitehr group_id or user_id to be local") + raise Exception("Expected either group_id or user_id to be local") if user_id != attestation["user_id"]: raise SynapseError(400, "Attestation has incorrect user_id") diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 61fe0d49d9..414c95e3fe 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -80,7 +80,9 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): - """Get the users in group as seen by requester_user_id + """Get the users in group as seen by requester_user_id. + + The ordering is arbitrary at the moment """ yield self.check_group_is_ours(group_id, and_exists=True) @@ -126,6 +128,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def get_rooms_in_group(self, group_id, requester_user_id): """Get the rooms in group as seen by requester_user_id + + This returns rooms in order of decreasing number of joined users """ yield self.check_group_is_ours(group_id, and_exists=True) diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index 5dc7a497e2..bfe8c2ca4a 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -28,7 +28,7 @@ CREATE UNIQUE INDEX groups_idx ON groups(group_id); CREATE TABLE group_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, - is_admin BOOLEAN NOT NULL, + is_admin BOOLEAN NOT NULL, -- whether the users membership can be seen by everyone is_public BOOLEAN NOT NULL ); @@ -49,7 +49,7 @@ CREATE INDEX groups_invites_u_idx ON group_invites(user_id); CREATE TABLE group_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, - is_public BOOLEAN NOT NULL + is_public BOOLEAN NOT NULL -- whether the room can be seen by everyone ); CREATE INDEX groups_rooms_g_idx ON group_rooms(group_id, room_id); From e52c391cd452077fc219fad0db8b9e5499251e5b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 14:25:46 +0100 Subject: [PATCH 0022/1637] Rename column to attestation_json --- synapse/storage/group_server.py | 8 ++++---- synapse/storage/schema/delta/43/group_server.sql | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 327d770862..105ab9920e 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -168,7 +168,7 @@ class GroupServerStore(SQLBaseStore): "group_id": group_id, "user_id": user_id, "valid_until_ms": remote_attestation["valid_until_ms"], - "attestation": json.dumps(remote_attestation), + "attestation_json": json.dumps(remote_attestation), }, ) @@ -278,7 +278,7 @@ class GroupServerStore(SQLBaseStore): }, updatevalues={ "valid_until_ms": attestation["valid_until_ms"], - "attestation": json.dumps(attestation) + "attestation_json": json.dumps(attestation) }, desc="update_remote_attestion", ) @@ -294,13 +294,13 @@ class GroupServerStore(SQLBaseStore): "group_id": group_id, "user_id": user_id, }, - retcols=("valid_until_ms", "attestation"), + retcols=("valid_until_ms", "attestation_json"), desc="get_remote_attestation", allow_none=True, ) now = int(self._clock.time_msec()) if row and now < row["valid_until_ms"]: - defer.returnValue(json.loads(row["attestation"])) + defer.returnValue(json.loads(row["attestation_json"])) defer.returnValue(None) diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index bfe8c2ca4a..b55b0a8deb 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -73,7 +73,7 @@ CREATE TABLE group_attestations_remote ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL, - attestation TEXT NOT NULL + attestation_json TEXT NOT NULL ); CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote(group_id, user_id); From 26752df503880b565a49350e0dd8881f9b2285e9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 14:29:03 +0100 Subject: [PATCH 0023/1637] Typo --- synapse/groups/attestations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 0741b55c1c..9ac09366d3 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -86,7 +86,7 @@ class GroupAttestionRenewer(object): self.store = hs.get_datastore() self.assestations = hs.get_groups_attestation_signing() self.transport_client = hs.get_federation_transport_client() - self.is_mine_id = hs.is_mind_id + self.is_mine_id = hs.is_mine_id self._renew_attestations_loop = self.clock.looping_call( self._renew_attestations, 30 * 60 * 1000, From bbb739d24a448c500dbc56c9cedf262d42c7f2f4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 14:31:36 +0100 Subject: [PATCH 0024/1637] Comment --- synapse/groups/attestations.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 9ac09366d3..5ef7a12cb7 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -22,8 +22,10 @@ from synapse.util.logcontext import preserve_fn from signedjson.sign import sign_json +# Default validity duration for new attestations we create DEFAULT_ATTESTATION_LENGTH_MS = 3 * 24 * 60 * 60 * 1000 -MIN_ATTESTATION_LENGTH_MS = 1 * 60 * 60 * 1000 + +# Start trying to update our attestations when they come this close to expiring UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000 @@ -58,11 +60,12 @@ class GroupAttestationSigning(object): if group_id != attestation["group_id"]: raise SynapseError(400, "Attestation has incorrect group_id") - - # TODO: valid_until_ms = attestation["valid_until_ms"] - if valid_until_ms - self.clock.time_msec() < MIN_ATTESTATION_LENGTH_MS: - raise SynapseError(400, "Attestation not valid for long enough") + + # TODO: We also want to check that *new* attestations that people give + # us to store are valid for at least a little while. + if valid_until_ms < self.clock.time_msec(): + raise SynapseError(400, "Attestation expired") yield self.keyring.verify_json_for_server(server_name, attestation) From fe4e885f54353f82c5c56d1d2a31593124d39e8a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 14:35:07 +0100 Subject: [PATCH 0025/1637] Add federation API for adding room to group --- synapse/federation/transport/server.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 2286f6f8fd..5d6ff79235 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -645,6 +645,24 @@ class FederationGroupsRoomsServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class FederationGroupsAddRoomsServlet(BaseFederationServlet): + """Add room to group + """ + PATH = "/groups/(?P[^/]*)/room/(?)$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, room_id): + requester_user_id = content["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.add_room( + group_id, requester_user_id, room_id, content + ) + + defer.returnValue((200, new_content)) + + class FederationGroupsUsersServlet(BaseFederationServlet): """Get the users in a group on behalf of a user """ From 410b4e14a176293ee1f41f24a641db031c6192a4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jul 2017 15:44:18 +0100 Subject: [PATCH 0026/1637] Move comment --- synapse/storage/schema/delta/43/group_server.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index b55b0a8deb..cf0659c51d 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -28,8 +28,8 @@ CREATE UNIQUE INDEX groups_idx ON groups(group_id); CREATE TABLE group_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, - is_admin BOOLEAN NOT NULL, -- whether the users membership can be seen by everyone - is_public BOOLEAN NOT NULL + is_admin BOOLEAN NOT NULL, + is_public BOOLEAN NOT NULL -- whether the users membership can be seen by everyone ); From 6d586dc05c35f1c0159b1eff3d83d7e3973b425d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Jul 2017 09:58:37 +0100 Subject: [PATCH 0027/1637] Comment --- synapse/storage/schema/delta/43/group_server.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index cf0659c51d..c223ee275a 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -15,7 +15,7 @@ CREATE TABLE groups ( group_id TEXT NOT NULL, - name TEXT, + name TEXT, -- the display name of the room avatar_url TEXT, short_description TEXT, long_description TEXT From e9aec001f463a4704836e7f02645afc641238d28 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Jul 2017 10:30:10 +0100 Subject: [PATCH 0028/1637] Use less DB for device list handling in sync --- synapse/handlers/sync.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 91c6c6be3c..e6df1819b9 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -579,18 +579,17 @@ class SyncHandler(object): since_token = sync_result_builder.since_token if since_token and since_token.device_list_key: - room_ids = yield self.store.get_rooms_for_user(user_id) - - user_ids_changed = set() changed = yield self.store.get_user_whose_devices_changed( since_token.device_list_key ) - for other_user_id in changed: - other_room_ids = yield self.store.get_rooms_for_user(other_user_id) - if room_ids.intersection(other_room_ids): - user_ids_changed.add(other_user_id) + if not changed: + defer.returnValue([]) - defer.returnValue(user_ids_changed) + users_who_share_room = yield self.store.get_users_who_share_room_with_user( + user_id + ) + + defer.returnValue(users_who_share_room & changed) else: defer.returnValue([]) From a62406aaa5c4ef3780e42c9de443a2cc1e82cd9a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jul 2017 15:44:40 +0100 Subject: [PATCH 0029/1637] Add group summary APIs --- synapse/federation/transport/server.py | 17 + synapse/groups/groups_server.py | 256 ++++++- synapse/storage/group_server.py | 643 ++++++++++++++++++ .../storage/schema/delta/43/group_server.sql | 56 ++ 4 files changed, 970 insertions(+), 2 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 5d6ff79235..bbb66190e0 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -627,6 +627,22 @@ class FederationGroupsProfileServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class FederationGroupsSummaryServlet(BaseFederationServlet): + PATH = "/groups/(?P[^/]*)/summary$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id): + requester_user_id = content["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.get_group_summary( + group_id, requester_user_id + ) + + defer.returnValue((200, new_content)) + + class FederationGroupsRoomsServlet(BaseFederationServlet): """Get the rooms in a group on behalf of a user """ @@ -784,6 +800,7 @@ ROOM_LIST_CLASSES = ( GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsProfileServlet, + FederationGroupsSummaryServlet, FederationGroupsRoomsServlet, FederationGroupsUsersServlet, FederationGroupsInviteServlet, diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 414c95e3fe..29a911e18e 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -64,6 +64,255 @@ class GroupsServerHandler(object): defer.returnValue(group) + @defer.inlineCallbacks + def get_group_summary(self, group_id, requester_user_id): + yield self.check_group_is_ours(group_id, and_exists=True) + + is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) + + profile = yield self.get_group_profile(group_id, requester_user_id) + + users, roles = yield self.store.get_users_for_summary_by_role( + group_id, include_private=is_user_in_group, + ) + + # TODO: Add profiles to users + # TODO: Add assestations to users + + rooms, categories = yield self.store.get_rooms_for_summary_by_category( + group_id, include_private=is_user_in_group, + ) + + for room_entry in rooms: + room_id = room_entry["room_id"] + joined_users = yield self.store.get_users_in_room(room_id) + entry = yield self.room_list_handler.generate_room_entry( + room_id, len(joined_users), + with_alias=False, allow_private=True, + ) + entry.pop("room_id", None) + + room_entry["profile"] = entry + + rooms.sort(key=lambda e: e.get("order", 0)) + + for entry in users: + user_id = entry["user_id"] + + if not self.is_mine_id(requester_user_id): + attestation = yield self.store.get_remote_attestation(group_id, user_id) + if not attestation: + continue + + entry["attestation"] = attestation + else: + entry["attestation"] = self.attestations.create_attestation( + group_id, user_id, + ) + + users.sort(key=lambda e: e.get("order", 0)) + + defer.returnValue({ + "profile": profile, + "users_section": { + "users": users, + "roles": roles, + "total_user_count_estimate": 0, # TODO + }, + "rooms_section": { + "rooms": rooms, + "categories": categories, + "total_room_count_estimate": 0, # TODO + }, + }) + + @defer.inlineCallbacks + def update_group_summary_room(self, group_id, user_id, room_id, category_id, content): + yield self.check_group_is_ours(group_id, and_exists=True) + + is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + order = content.get("order", None) + + is_public = _parse_visibility_from_contents(content) + + yield self.store.add_room_to_summary( + group_id=group_id, + room_id=room_id, + category_id=category_id, + order=order, + is_public=is_public, + ) + + defer.returnValue({}) + + @defer.inlineCallbacks + def delete_group_summary_room(self, group_id, user_id, room_id, category_id): + yield self.check_group_is_ours(group_id, and_exists=True) + + is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + yield self.store.remove_room_from_summary( + group_id=group_id, + room_id=room_id, + category_id=category_id, + ) + + defer.returnValue({}) + + @defer.inlineCallbacks + def get_group_categories(self, group_id, user_id): + yield self.check_group_is_ours(group_id, and_exists=True) + + categories = yield self.store.get_group_categories( + group_id=group_id, + ) + defer.returnValue({"categories": categories}) + + @defer.inlineCallbacks + def get_group_category(self, group_id, user_id, category_id): + yield self.check_group_is_ours(group_id, and_exists=True) + + res = yield self.store.get_group_category( + group_id=group_id, + category_id=category_id, + ) + + defer.returnValue(res) + + @defer.inlineCallbacks + def update_group_category(self, group_id, user_id, category_id, content): + yield self.check_group_is_ours(group_id, and_exists=True) + + is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + is_public = _parse_visibility_from_contents(content) + profile = content.get("profile") + + yield self.store.upsert_group_category( + group_id=group_id, + category_id=category_id, + is_public=is_public, + profile=profile, + ) + + defer.returnValue({}) + + @defer.inlineCallbacks + def delete_group_category(self, group_id, user_id, category_id): + yield self.check_group_is_ours(group_id, and_exists=True) + + is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + yield self.store.remove_group_category( + group_id=group_id, + category_id=category_id, + ) + + defer.returnValue({}) + + @defer.inlineCallbacks + def get_group_roles(self, group_id, user_id): + yield self.check_group_is_ours(group_id, and_exists=True) + + roles = yield self.store.get_group_roles( + group_id=group_id, + ) + defer.returnValue({"roles": roles}) + + @defer.inlineCallbacks + def get_group_role(self, group_id, user_id, role_id): + yield self.check_group_is_ours(group_id, and_exists=True) + + res = yield self.store.get_group_role( + group_id=group_id, + role_id=role_id, + ) + defer.returnValue(res) + + @defer.inlineCallbacks + def update_group_role(self, group_id, user_id, role_id, content): + yield self.check_group_is_ours(group_id, and_exists=True) + + is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + is_public = _parse_visibility_from_contents(content) + + profile = content.get("profile") + + yield self.store.upsert_group_role( + group_id=group_id, + role_id=role_id, + is_public=is_public, + profile=profile, + ) + + defer.returnValue({}) + + @defer.inlineCallbacks + def delete_group_role(self, group_id, user_id, role_id): + yield self.check_group_is_ours(group_id, and_exists=True) + + is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + yield self.store.remove_group_role( + group_id=group_id, + role_id=role_id, + ) + + defer.returnValue({}) + + @defer.inlineCallbacks + def update_group_summary_user(self, group_id, requester_user_id, user_id, role_id, + content): + yield self.check_group_is_ours(group_id, and_exists=True) + + is_admin = yield self.store.is_user_admin_in_group(group_id, requester_user_id) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + order = content.get("order", None) + + is_public = _parse_visibility_from_contents(content) + + yield self.store.add_user_to_summary( + group_id=group_id, + user_id=user_id, + role_id=role_id, + order=order, + is_public=is_public, + ) + + defer.returnValue({}) + + @defer.inlineCallbacks + def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): + yield self.check_group_is_ours(group_id, and_exists=True) + + is_admin = yield self.store.is_user_admin_in_group(group_id, requester_user_id) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + + yield self.store.remove_user_from_summary( + group_id=group_id, + user_id=user_id, + role_id=role_id, + ) + + defer.returnValue({}) + @defer.inlineCallbacks def get_group_profile(self, group_id, requester_user_id): """Get the group profile as seen by requester_user_id @@ -210,7 +459,9 @@ class GroupsServerHandler(object): } if self.hs.is_mine_id(user_id): - raise NotImplementedError() + groups_local = self.hs.get_groups_local_handler() + res = yield groups_local.on_invite(group_id, user_id, content) + local_attestation = None else: local_attestation = self.attestations.create_attestation(group_id, user_id) content.update({ @@ -338,7 +589,8 @@ class GroupsServerHandler(object): if is_kick: if self.hs.is_mine_id(user_id): - raise NotImplementedError() + groups_local = self.hs.get_groups_local_handler() + yield groups_local.user_removed_from_group(group_id, user_id, {}) else: yield self.transport_client.remove_user_from_group_notification( get_domain_from_id(user_id), group_id, user_id, {} diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 105ab9920e..f4818ff174 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -15,11 +15,16 @@ from twisted.internet import defer +from synapse.api.errors import SynapseError + from ._base import SQLBaseStore import ujson as json +_DEFAULT_CATEGORY_ID = "default" + + class GroupServerStore(SQLBaseStore): def get_group(self, group_id): return self._simple_select_one( @@ -64,6 +69,492 @@ class GroupServerStore(SQLBaseStore): desc="get_rooms_in_group", ) + def get_rooms_for_summary_by_category(self, group_id, include_private=False): + def _get_rooms_for_summary_txn(txn): + keyvalues = { + "group_id": group_id, + } + if not include_private: + keyvalues["is_public"] = True + + sql = """ + SELECT room_id, is_public, category_id, room_order + FROM group_summary_rooms + WHERE group_id = ? + """ + + if not include_private: + sql += " AND is_public = ?" + txn.execute(sql, (group_id, True)) + else: + txn.execute(sql, (group_id,)) + + rooms = [ + { + "room_id": row[0], + "is_public": row[1], + "category_id": row[2] if row[2] != _DEFAULT_CATEGORY_ID else None, + "order": row[3], + } + for row in txn + ] + + sql = """ + SELECT category_id, is_public, profile, cat_order + FROM group_summary_room_categories + INNER JOIN group_room_categories USING (group_id, category_id) + WHERE group_id = ? + """ + + if not include_private: + sql += " AND is_public = ?" + txn.execute(sql, (group_id, True)) + else: + txn.execute(sql, (group_id,)) + + categories = { + row[0]: { + "is_public": row[1], + "profile": json.loads(row[2]), + "order": row[3], + } + for row in txn + } + + return rooms, categories + return self.runInteraction( + "get_rooms_for_summary", _get_rooms_for_summary_txn + ) + + def add_room_to_summary(self, group_id, room_id, category_id, order, is_public): + return self.runInteraction( + "add_room_to_summary", self._add_room_to_summary_txn, + group_id, room_id, category_id, order, is_public, + ) + + def _add_room_to_summary_txn(self, txn, group_id, room_id, category_id, order, + is_public): + if category_id is None: + category_id = _DEFAULT_CATEGORY_ID + else: + cat_exists = self._simple_select_one_onecol_txn( + txn, + table="group_room_categories", + keyvalues={ + "group_id": group_id, + "category_id": category_id, + }, + retcol="group_id", + allow_none=True, + ) + if not cat_exists: + raise SynapseError(400, "Category doesn't exist") + + # TODO: Check room is part of group already + cat_exists = self._simple_select_one_onecol_txn( + txn, + table="group_summary_room_categories", + keyvalues={ + "group_id": group_id, + "category_id": category_id, + }, + retcol="group_id", + allow_none=True, + ) + if not cat_exists: + txn.execute(""" + INSERT INTO group_summary_room_categories + (group_id, category_id, cat_order) + SELECT ?, ?, COALESCE(MAX(cat_order), 1) + FROM group_summary_room_categories + WHERE group_id = ? AND category_id = ? + """, (group_id, category_id, group_id, category_id)) + + existing = self._simple_select_one_txn( + txn, + table="group_summary_rooms", + keyvalues={ + "group_id": group_id, + "room_id": room_id, + "category_id": category_id, + }, + retcols=("room_order", "is_public",), + allow_none=True, + ) + + if order is not None: + sql = """ + UPDATE group_summary_rooms SET room_order = room_order + 1 + WHERE group_id = ? AND category_id = ? AND room_order >= ? + """ + txn.execute(sql, (group_id, category_id, order,)) + elif not existing: + sql = """ + SELECT COALESCE(MAX(room_order), 0) + 1 FROM group_summary_rooms + WHERE group_id = ? AND category_id = ? + """ + txn.execute(sql, (group_id, category_id,)) + order, = txn.fetchone() + + if existing: + to_update = {} + if order is not None: + to_update["room_order"] = order + if is_public is not None: + to_update["is_public"] = is_public + self._simple_update_txn( + txn, + table="group_summary_rooms", + keyvalues={ + "group_id": group_id, + "category_id": category_id, + "room_id": room_id, + }, + values=to_update, + ) + else: + if is_public is None: + is_public = True + + self._simple_insert_txn( + txn, + table="group_summary_rooms", + values={ + "group_id": group_id, + "category_id": category_id, + "room_id": room_id, + "room_order": order, + "is_public": is_public, + }, + ) + + def remove_room_from_summary(self, group_id, room_id, category_id): + if category_id is None: + category_id = _DEFAULT_CATEGORY_ID + + return self._simple_delete( + table="group_summary_rooms", + keyvalues={ + "group_id": group_id, + "category_id": category_id, + "room_id": room_id, + }, + desc="remove_room_from_summary", + ) + + @defer.inlineCallbacks + def get_group_categories(self, group_id): + rows = yield self._simple_select_list( + table="group_room_categories", + keyvalues={ + "group_id": group_id, + }, + retcols=("category_id", "is_public", "profile"), + desc="get_group_categories", + ) + + defer.returnValue({ + row["category_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), + } + for row in rows + }) + + @defer.inlineCallbacks + def get_group_category(self, group_id, category_id): + category = yield self._simple_select_one( + table="group_room_categories", + keyvalues={ + "group_id": group_id, + "category_id": category_id, + }, + retcols=("is_public", "profile"), + desc="get_group_category", + ) + + category["profile"] = json.loads(category["profile"]) + + defer.returnValue(category) + + def upsert_group_category(self, group_id, category_id, profile, is_public): + insertion_values = {} + update_values = {"category_id": category_id} # This cannot be empty + + if profile is None: + insertion_values["profile"] = "{}" + else: + update_values["profile"] = json.dumps(profile) + + if is_public is None: + insertion_values["is_public"] = True + else: + update_values["is_public"] = is_public + + return self._simple_upsert( + table="group_room_categories", + keyvalues={ + "group_id": group_id, + "category_id": category_id, + }, + values=update_values, + insertion_values=insertion_values, + desc="upsert_group_category", + ) + + def remove_group_category(self, group_id, category_id): + return self._simple_delete( + table="group_room_categories", + keyvalues={ + "group_id": group_id, + "category_id": category_id, + }, + desc="remove_group_category", + ) + + @defer.inlineCallbacks + def get_group_roles(self, group_id): + rows = yield self._simple_select_list( + table="group_roles", + keyvalues={ + "group_id": group_id, + }, + retcols=("role_id", "is_public", "profile"), + desc="get_group_roles", + ) + + defer.returnValue({ + row["role_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), + } + for row in rows + }) + + @defer.inlineCallbacks + def get_group_role(self, group_id, role_id): + role = yield self._simple_select_one( + table="group_roles", + keyvalues={ + "group_id": group_id, + "role_id": role_id, + }, + retcols=("is_public", "profile"), + desc="get_group_role", + ) + + role["profile"] = json.loads(role["profile"]) + + defer.returnValue(role) + + def upsert_group_role(self, group_id, role_id, profile, is_public): + insertion_values = {} + update_values = {"role_id": role_id} # This cannot be empty + + if profile is None: + insertion_values["profile"] = "{}" + else: + update_values["profile"] = json.dumps(profile) + + if is_public is None: + insertion_values["is_public"] = True + else: + update_values["is_public"] = is_public + + return self._simple_upsert( + table="group_roles", + keyvalues={ + "group_id": group_id, + "role_id": role_id, + }, + values=update_values, + insertion_values=insertion_values, + desc="upsert_group_role", + ) + + def remove_group_role(self, group_id, role_id): + return self._simple_delete( + table="group_roles", + keyvalues={ + "group_id": group_id, + "role_id": role_id, + }, + desc="remove_group_role", + ) + + def add_user_to_summary(self, group_id, user_id, role_id, order, is_public): + return self.runInteraction( + "add_user_to_summary", self._add_user_to_summary_txn, + group_id, user_id, role_id, order, is_public, + ) + + def _add_user_to_summary_txn(self, txn, group_id, user_id, role_id, order, + is_public): + if role_id is None: + role_id = _DEFAULT_CATEGORY_ID + else: + role_exists = self._simple_select_one_onecol_txn( + txn, + table="group_roles", + keyvalues={ + "group_id": group_id, + "role_id": role_id, + }, + retcol="group_id", + allow_none=True, + ) + if not role_exists: + raise SynapseError(400, "Role doesn't exist") + + # TODO: Check room is part of group already + role_exists = self._simple_select_one_onecol_txn( + txn, + table="group_summary_roles", + keyvalues={ + "group_id": group_id, + "role_id": role_id, + }, + retcol="group_id", + allow_none=True, + ) + if not role_exists: + txn.execute(""" + INSERT INTO group_summary_roles + (group_id, role_id, role_order) + SELECT ?, ?, COALESCE(MAX(role_order), 1) + FROM group_summary_roles + WHERE group_id = ? AND role_id = ? + """, (group_id, role_id, group_id, role_id)) + + existing = self._simple_select_one_txn( + txn, + table="group_summary_users", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + "role_id": role_id, + }, + retcols=("user_order", "is_public",), + allow_none=True, + ) + + if order is not None: + sql = """ + UPDATE group_summary_users SET user_order = user_order + 1 + WHERE group_id = ? AND role_id = ? AND user_order >= ? + """ + txn.execute(sql, (group_id, role_id, order,)) + elif not existing: + sql = """ + SELECT COALESCE(MAX(user_order), 0) + 1 FROM group_summary_users + WHERE group_id = ? AND role_id = ? + """ + txn.execute(sql, (group_id, role_id,)) + order, = txn.fetchone() + + if existing: + to_update = {} + if order is not None: + to_update["user_order"] = order + if is_public is not None: + to_update["is_public"] = is_public + self._simple_update_txn( + txn, + table="group_summary_users", + keyvalues={ + "group_id": group_id, + "role_id": role_id, + "user_id": user_id, + }, + values=to_update, + ) + else: + if is_public is None: + is_public = True + + self._simple_insert_txn( + txn, + table="group_summary_users", + values={ + "group_id": group_id, + "role_id": role_id, + "user_id": user_id, + "user_order": order, + "is_public": is_public, + }, + ) + + def remove_user_from_summary(self, group_id, user_id, role_id): + if role_id is None: + role_id = _DEFAULT_CATEGORY_ID + + return self._simple_delete( + table="group_summary_users", + keyvalues={ + "group_id": group_id, + "role_id": role_id, + "user_id": user_id, + }, + desc="remove_user_from_summary", + ) + + def get_users_for_summary_by_role(self, group_id, include_private=False): + def _get_users_for_summary_txn(txn): + keyvalues = { + "group_id": group_id, + } + if not include_private: + keyvalues["is_public"] = True + + sql = """ + SELECT user_id, is_public, role_id, user_order + FROM group_summary_users + WHERE group_id = ? + """ + + if not include_private: + sql += " AND is_public = ?" + txn.execute(sql, (group_id, True)) + else: + txn.execute(sql, (group_id,)) + + users = [ + { + "user_id": row[0], + "is_public": row[1], + "role_id": row[2] if row[2] != _DEFAULT_CATEGORY_ID else None, + "order": row[3], + } + for row in txn + ] + + sql = """ + SELECT role_id, is_public, profile, role_order + FROM group_summary_roles + INNER JOIN group_roles USING (group_id, role_id) + WHERE group_id = ? + """ + + if not include_private: + sql += " AND is_public = ?" + txn.execute(sql, (group_id, True)) + else: + txn.execute(sql, (group_id,)) + + roles = { + row[0]: { + "is_public": row[1], + "profile": json.loads(row[2]), + "order": row[3], + } + for row in txn + } + + return users, roles + return self.runInteraction( + "get_users_for_summary_by_role", _get_users_for_summary_txn + ) + def is_user_in_group(self, user_id, group_id): return self._simple_select_one_onecol( table="group_users", @@ -223,6 +714,103 @@ class GroupServerStore(SQLBaseStore): desc="add_room_to_group", ) + @defer.inlineCallbacks + def register_user_group_membership(self, group_id, user_id, membership, + is_admin=False, content={}, + local_attestation=None, + remote_attestation=None, + ): + def _register_user_group_membership_txn(txn, next_id): + # TODO: Upsert? + self._simple_delete_txn( + txn, + table="local_group_membership", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + self._simple_insert_txn( + txn, + table="local_group_membership", + values={ + "group_id": group_id, + "user_id": user_id, + "is_admin": is_admin, + "membership": membership, + "content": json.dumps(content), + }, + ) + self._simple_delete_txn( + txn, + table="local_group_updates", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + "type": "membership", + }, + ) + self._simple_insert_txn( + txn, + table="local_group_updates", + values={ + "stream_id": next_id, + "group_id": group_id, + "user_id": user_id, + "type": "membership", + "content": json.dumps({"membership": membership, "content": content}), + } + ) + self._group_updates_stream_cache.entity_has_changed(user_id, next_id) + + # TODO: Insert profile to ensuer it comes down stream if its a join. + + if membership == "join": + if local_attestation: + self._simple_insert_txn( + txn, + table="group_attestations_renewals", + values={ + "group_id": group_id, + "user_id": user_id, + "valid_until_ms": local_attestation["valid_until_ms"], + } + ) + if remote_attestation: + self._simple_insert_txn( + txn, + table="group_attestations_remote", + values={ + "group_id": group_id, + "user_id": user_id, + "valid_until_ms": remote_attestation["valid_until_ms"], + "attestation": json.dumps(remote_attestation), + } + ) + else: + self._simple_delete_txn( + txn, + table="group_attestations_renewals", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + self._simple_delete_txn( + txn, + table="group_attestations_remote", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + + with self._group_updates_id_gen.get_next() as next_id: + yield self.runInteraction( + "register_user_group_membership", + _register_user_group_membership_txn, next_id, + ) + @defer.inlineCallbacks def create_group(self, group_id, user_id, name, avatar_url, short_description, long_description,): @@ -238,6 +826,61 @@ class GroupServerStore(SQLBaseStore): desc="create_group", ) + def get_joined_groups(self, user_id): + return self._simple_select_onecol( + table="local_group_membership", + keyvalues={ + "user_id": user_id, + "membership": "join", + }, + retcol="group_id", + desc="get_joined_groups", + ) + + def get_all_groups_for_user(self, user_id, now_token): + def _get_all_groups_for_user_txn(txn): + sql = """ + SELECT group_id, type, membership, u.content + FROM local_group_updates AS u + INNER JOIN local_group_membership USING (group_id, user_id) + WHERE user_id = ? AND membership != 'leave' + AND stream_id <= ? + """ + txn.execute(sql, (user_id, now_token,)) + return self.cursor_to_dict(txn) + return self.runInteraction( + "get_all_groups_for_user", _get_all_groups_for_user_txn, + ) + + def get_groups_changes_for_user(self, user_id, from_token, to_token): + from_token = int(from_token) + has_changed = self._group_updates_stream_cache.has_entity_changed( + user_id, from_token, + ) + if not has_changed: + return [] + + def _get_groups_changes_for_user_txn(txn): + sql = """ + SELECT group_id, membership, type, u.content + FROM local_group_updates AS u + INNER JOIN local_group_membership USING (group_id, user_id) + WHERE user_id = ? AND ? < stream_id AND stream_id <= ? + """ + txn.execute(sql, (user_id, from_token, to_token,)) + return [{ + "group_id": group_id, + "membership": membership, + "type": gtype, + "content": json.loads(content_json), + } for group_id, membership, gtype, content_json in txn] + return self.runInteraction( + "get_groups_changes_for_user", _get_groups_changes_for_user_txn, + ) + + def get_group_stream_token(self): + return self._group_updates_id_gen.get_current_token() + def get_attestations_need_renewals(self, valid_until_ms): """Get all attestations that need to be renewed until givent time """ diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index c223ee275a..3013b89b7e 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -56,6 +56,62 @@ CREATE INDEX groups_rooms_g_idx ON group_rooms(group_id, room_id); CREATE INDEX groups_rooms_r_idx ON group_rooms(room_id); +CREATE TABLE group_summary_rooms ( + group_id TEXT NOT NULL, + room_id TEXT NOT NULL, + category_id TEXT NOT NULL, + room_order BIGINT NOT NULL, + is_public BOOLEAN NOT NULL, + UNIQUE (group_id, category_id, room_id, room_order), + CHECK (room_order > 0) +); + +CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms(group_id, room_id, category_id); + +CREATE TABLE group_summary_room_categories ( + group_id TEXT NOT NULL, + category_id TEXT NOT NULL, + cat_order BIGINT NOT NULL, + UNIQUE (group_id, category_id, cat_order), + CHECK (cat_order > 0) +); + +CREATE TABLE group_room_categories ( + group_id TEXT NOT NULL, + category_id TEXT NOT NULL, + profile TEXT NOT NULL, + is_public BOOLEAN NOT NULL, + UNIQUE (group_id, category_id) +); + + +CREATE TABLE group_summary_users ( + group_id TEXT NOT NULL, + user_id TEXT NOT NULL, + role_id TEXT NOT NULL, + user_order BIGINT NOT NULL, + is_public BOOLEAN NOT NULL +); + +CREATE INDEX group_summary_users_g_idx ON group_summary_users(group_id); + +CREATE TABLE group_summary_roles ( + group_id TEXT NOT NULL, + role_id TEXT NOT NULL, + role_order BIGINT NOT NULL, + UNIQUE (group_id, role_id, role_order), + CHECK (role_order > 0) +); + +CREATE TABLE group_roles ( + group_id TEXT NOT NULL, + role_id TEXT NOT NULL, + profile TEXT NOT NULL, + is_public BOOLEAN NOT NULL, + UNIQUE (group_id, role_id) +); + + -- List of attestations we've given out and need to renew CREATE TABLE group_attestations_renewals ( group_id TEXT NOT NULL, From 8d55877c9eaffc8dcaf26cdc7a032c774c9a2f5b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Jul 2017 11:43:39 +0100 Subject: [PATCH 0030/1637] Simplify checking if admin --- synapse/groups/groups_server.py | 72 +++++++++------------------------ 1 file changed, 19 insertions(+), 53 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 29a911e18e..ec45da2d7a 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -50,7 +50,7 @@ class GroupsServerHandler(object): hs.get_groups_attestation_renewer() @defer.inlineCallbacks - def check_group_is_ours(self, group_id, and_exists=False): + def check_group_is_ours(self, group_id, and_exists=False, and_is_admin=None): """Check that the group is ours, and optionally if it exists. If group does exist then return group. @@ -62,6 +62,11 @@ class GroupsServerHandler(object): if and_exists and not group: raise SynapseError(404, "Unknown group") + if and_is_admin: + is_admin = yield self.store.is_user_admin_in_group(group_id, and_is_admin) + if not is_admin: + raise SynapseError(403, "User is not admin in group") + defer.returnValue(group) @defer.inlineCallbacks @@ -128,11 +133,7 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def update_group_summary_room(self, group_id, user_id, room_id, category_id, content): - yield self.check_group_is_ours(group_id, and_exists=True) - - is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) - if not is_admin: - raise SynapseError(403, "User is not admin in group") + yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) order = content.get("order", None) @@ -150,11 +151,7 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def delete_group_summary_room(self, group_id, user_id, room_id, category_id): - yield self.check_group_is_ours(group_id, and_exists=True) - - is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) - if not is_admin: - raise SynapseError(403, "User is not admin in group") + yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) yield self.store.remove_room_from_summary( group_id=group_id, @@ -186,11 +183,7 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def update_group_category(self, group_id, user_id, category_id, content): - yield self.check_group_is_ours(group_id, and_exists=True) - - is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) - if not is_admin: - raise SynapseError(403, "User is not admin in group") + yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) is_public = _parse_visibility_from_contents(content) profile = content.get("profile") @@ -206,11 +199,7 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def delete_group_category(self, group_id, user_id, category_id): - yield self.check_group_is_ours(group_id, and_exists=True) - - is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) - if not is_admin: - raise SynapseError(403, "User is not admin in group") + yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) yield self.store.remove_group_category( group_id=group_id, @@ -240,11 +229,7 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def update_group_role(self, group_id, user_id, role_id, content): - yield self.check_group_is_ours(group_id, and_exists=True) - - is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) - if not is_admin: - raise SynapseError(403, "User is not admin in group") + yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) is_public = _parse_visibility_from_contents(content) @@ -261,11 +246,7 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def delete_group_role(self, group_id, user_id, role_id): - yield self.check_group_is_ours(group_id, and_exists=True) - - is_admin = yield self.store.is_user_admin_in_group(group_id, user_id) - if not is_admin: - raise SynapseError(403, "User is not admin in group") + yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) yield self.store.remove_group_role( group_id=group_id, @@ -277,11 +258,7 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def update_group_summary_user(self, group_id, requester_user_id, user_id, role_id, content): - yield self.check_group_is_ours(group_id, and_exists=True) - - is_admin = yield self.store.is_user_admin_in_group(group_id, requester_user_id) - if not is_admin: - raise SynapseError(403, "User is not admin in group") + yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) order = content.get("order", None) @@ -299,11 +276,7 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): - yield self.check_group_is_ours(group_id, and_exists=True) - - is_admin = yield self.store.is_user_admin_in_group(group_id, requester_user_id) - if not is_admin: - raise SynapseError(403, "User is not admin in group") + yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) yield self.store.remove_user_from_summary( group_id=group_id, @@ -419,12 +392,9 @@ class GroupsServerHandler(object): def add_room(self, group_id, requester_user_id, room_id, content): """Add room to group """ - - yield self.check_group_is_ours(group_id, and_exists=True) - - is_admin = yield self.store.is_user_admin_in_group(group_id, requester_user_id) - if not is_admin: - raise SynapseError(403, "User is not admin in group") + yield self.check_group_is_ours( + group_id, and_exists=True, and_is_admin=requester_user_id + ) # TODO: Check if room has already been added @@ -439,13 +409,9 @@ class GroupsServerHandler(object): """Invite user to group """ - group = yield self.check_group_is_ours(group_id, and_exists=True) - - is_admin = yield self.store.is_user_admin_in_group( - group_id, requester_user_id + group = yield self.check_group_is_ours( + group_id, and_exists=True, and_is_admin=requester_user_id ) - if not is_admin: - raise SynapseError(403, "User is not admin in group") # TODO: Check if user knocked # TODO: Check if user is already invited From 26451a09eb938e6a72be3d77ff8c9e3fd2b33539 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Jul 2017 14:11:59 +0100 Subject: [PATCH 0031/1637] Comments --- synapse/groups/groups_server.py | 38 +++++++++++++++++++ synapse/storage/group_server.py | 29 ++++++++++++++ .../storage/schema/delta/43/group_server.sql | 17 ++++++--- 3 files changed, 79 insertions(+), 5 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index ec45da2d7a..83dfcd0fd4 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -54,6 +54,12 @@ class GroupsServerHandler(object): """Check that the group is ours, and optionally if it exists. If group does exist then return group. + + Args: + group_id (str) + and_exists (bool): whether to also check if group exists + and_is_admin (str): whether to also check if given str is a user_id + that is an admin """ if not self.is_mine_id(group_id): raise SynapseError(400, "Group not on this server") @@ -71,6 +77,14 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): + """Get the summary for a group as seen by requester_user_id. + + The group summary consists of the profile of the room, and a curated + list of users and rooms. These list *may* be organised by role/category. + The roles/categories are ordered, and so are the users/rooms within them. + + A user/room may appear in multiple roles/categories. + """ yield self.check_group_is_ours(group_id, and_exists=True) is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) @@ -133,6 +147,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def update_group_summary_room(self, group_id, user_id, room_id, category_id, content): + """Add/update a room to the group summary + """ yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) order = content.get("order", None) @@ -151,6 +167,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def delete_group_summary_room(self, group_id, user_id, room_id, category_id): + """Remove a room from the summary + """ yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) yield self.store.remove_room_from_summary( @@ -163,6 +181,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def get_group_categories(self, group_id, user_id): + """Get all categories in a group (as seen by user) + """ yield self.check_group_is_ours(group_id, and_exists=True) categories = yield self.store.get_group_categories( @@ -172,6 +192,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def get_group_category(self, group_id, user_id, category_id): + """Get a specific category in a group (as seen by user) + """ yield self.check_group_is_ours(group_id, and_exists=True) res = yield self.store.get_group_category( @@ -183,6 +205,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def update_group_category(self, group_id, user_id, category_id, content): + """Add/Update a group category + """ yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) is_public = _parse_visibility_from_contents(content) @@ -199,6 +223,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def delete_group_category(self, group_id, user_id, category_id): + """Delete a group category + """ yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) yield self.store.remove_group_category( @@ -210,6 +236,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def get_group_roles(self, group_id, user_id): + """Get all roles in a group (as seen by user) + """ yield self.check_group_is_ours(group_id, and_exists=True) roles = yield self.store.get_group_roles( @@ -219,6 +247,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def get_group_role(self, group_id, user_id, role_id): + """Get a specific role in a group (as seen by user) + """ yield self.check_group_is_ours(group_id, and_exists=True) res = yield self.store.get_group_role( @@ -229,6 +259,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def update_group_role(self, group_id, user_id, role_id, content): + """Add/update a role in a group + """ yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) is_public = _parse_visibility_from_contents(content) @@ -246,6 +278,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def delete_group_role(self, group_id, user_id, role_id): + """Remove role from group + """ yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) yield self.store.remove_group_role( @@ -258,6 +292,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def update_group_summary_user(self, group_id, requester_user_id, user_id, role_id, content): + """Add/update a users entry in the group summary + """ yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) order = content.get("order", None) @@ -276,6 +312,8 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): + """Remove a user from the group summary + """ yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) yield self.store.remove_user_from_summary( diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index f4818ff174..18bfaeda6e 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -22,6 +22,8 @@ from ._base import SQLBaseStore import ujson as json +# The category ID for the "default" category. We don't store as null in the +# database to avoid the fun of null != null _DEFAULT_CATEGORY_ID = "default" @@ -70,6 +72,10 @@ class GroupServerStore(SQLBaseStore): ) def get_rooms_for_summary_by_category(self, group_id, include_private=False): + """Get the rooms and categories that should be included in a summary request + + Returns ([rooms], [categories]) + """ def _get_rooms_for_summary_txn(txn): keyvalues = { "group_id": group_id, @@ -134,6 +140,14 @@ class GroupServerStore(SQLBaseStore): def _add_room_to_summary_txn(self, txn, group_id, room_id, category_id, order, is_public): + """Add room to summary. + + This automatically adds the room to the end of the list of rooms to be + included in the summary response. If a role is given then user will + be added under that category (the category will automatically be added tothe + the summary if a user is listed under that role in the summary). + """ + if category_id is None: category_id = _DEFAULT_CATEGORY_ID else: @@ -278,6 +292,8 @@ class GroupServerStore(SQLBaseStore): defer.returnValue(category) def upsert_group_category(self, group_id, category_id, profile, is_public): + """Add/update room category for group + """ insertion_values = {} update_values = {"category_id": category_id} # This cannot be empty @@ -348,6 +364,8 @@ class GroupServerStore(SQLBaseStore): defer.returnValue(role) def upsert_group_role(self, group_id, role_id, profile, is_public): + """Add/remove user role + """ insertion_values = {} update_values = {"role_id": role_id} # This cannot be empty @@ -390,6 +408,13 @@ class GroupServerStore(SQLBaseStore): def _add_user_to_summary_txn(self, txn, group_id, user_id, role_id, order, is_public): + """Add user to summary. + + This automatically adds the user to the end of the list of users to be + included in the summary response. If a role is given then user will + be added under that role (the role will automatically be added to the + summary if a user is listed under that role in the summary). + """ if role_id is None: role_id = _DEFAULT_CATEGORY_ID else: @@ -499,6 +524,10 @@ class GroupServerStore(SQLBaseStore): ) def get_users_for_summary_by_role(self, group_id, include_private=False): + """Get the users and roles that should be included in a summary request + + Returns ([users], [roles]) + """ def _get_users_for_summary_txn(txn): keyvalues = { "group_id": group_id, diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index 3013b89b7e..472aab0a78 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -56,18 +56,21 @@ CREATE INDEX groups_rooms_g_idx ON group_rooms(group_id, room_id); CREATE INDEX groups_rooms_r_idx ON group_rooms(room_id); +-- Rooms to include in the summary CREATE TABLE group_summary_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, category_id TEXT NOT NULL, room_order BIGINT NOT NULL, - is_public BOOLEAN NOT NULL, + is_public BOOLEAN NOT NULL, -- whether the room should be show to everyone UNIQUE (group_id, category_id, room_id, room_order), CHECK (room_order > 0) ); CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms(group_id, room_id, category_id); + +-- Categories to include in the summary CREATE TABLE group_summary_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, @@ -76,25 +79,27 @@ CREATE TABLE group_summary_room_categories ( CHECK (cat_order > 0) ); +-- The categories in the group CREATE TABLE group_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, profile TEXT NOT NULL, - is_public BOOLEAN NOT NULL, + is_public BOOLEAN NOT NULL, -- whether the category should be show to everyone UNIQUE (group_id, category_id) ); - +-- The users to include in the group summary CREATE TABLE group_summary_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, role_id TEXT NOT NULL, user_order BIGINT NOT NULL, - is_public BOOLEAN NOT NULL + is_public BOOLEAN NOT NULL -- whether the user should be show to everyone ); CREATE INDEX group_summary_users_g_idx ON group_summary_users(group_id); +-- The roles to include in the group summary CREATE TABLE group_summary_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, @@ -103,11 +108,13 @@ CREATE TABLE group_summary_roles ( CHECK (role_order > 0) ); + +-- The roles in a groups CREATE TABLE group_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, profile TEXT NOT NULL, - is_public BOOLEAN NOT NULL, + is_public BOOLEAN NOT NULL, -- whether the role should be show to everyone UNIQUE (group_id, role_id) ); From 5bbb53580a3fc732f0e4aab49f7893bd4f7e2a43 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Jul 2017 10:25:29 +0100 Subject: [PATCH 0032/1637] raise NotImplementedError --- synapse/groups/groups_server.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 83dfcd0fd4..dc9d361f57 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -463,9 +463,7 @@ class GroupsServerHandler(object): } if self.hs.is_mine_id(user_id): - groups_local = self.hs.get_groups_local_handler() - res = yield groups_local.on_invite(group_id, user_id, content) - local_attestation = None + raise NotImplementedError() else: local_attestation = self.attestations.create_attestation(group_id, user_id) content.update({ @@ -593,8 +591,7 @@ class GroupsServerHandler(object): if is_kick: if self.hs.is_mine_id(user_id): - groups_local = self.hs.get_groups_local_handler() - yield groups_local.user_removed_from_group(group_id, user_id, {}) + raise NotImplementedError() else: yield self.transport_client.remove_user_from_group_notification( get_domain_from_id(user_id), group_id, user_id, {} From 7a39da8cc6cac4014789a29d9abaf48ec13971d5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Jul 2017 11:13:19 +0100 Subject: [PATCH 0033/1637] Add summary APIs to federation --- synapse/federation/transport/server.py | 234 +++++++++++++++++++++++-- 1 file changed, 223 insertions(+), 11 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index bbb66190e0..1ea2b37ce8 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -615,8 +615,8 @@ class FederationGroupsProfileServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/profile$" @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id): - requester_user_id = content["requester_user_id"] + def on_GET(self, origin, content, query, group_id): + requester_user_id = query["requester_user_id"] if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -631,8 +631,8 @@ class FederationGroupsSummaryServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/summary$" @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id): - requester_user_id = content["requester_user_id"] + def on_GET(self, origin, content, query, group_id): + requester_user_id = query["requester_user_id"] if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -649,8 +649,8 @@ class FederationGroupsRoomsServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/rooms$" @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id): - requester_user_id = content["requester_user_id"] + def on_GET(self, origin, content, query, group_id): + requester_user_id = query["requester_user_id"] if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -668,7 +668,7 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, room_id): - requester_user_id = content["requester_user_id"] + requester_user_id = query["requester_user_id"] if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -685,8 +685,8 @@ class FederationGroupsUsersServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/users$" @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id): - requester_user_id = content["requester_user_id"] + def on_GET(self, origin, content, query, group_id): + requester_user_id = query["requester_user_id"] if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -704,7 +704,7 @@ class FederationGroupsInviteServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, user_id): - requester_user_id = content["requester_user_id"] + requester_user_id = query["requester_user_id"] if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -739,7 +739,7 @@ class FederationGroupsRemoveUserServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, user_id): - requester_user_id = content["requester_user_id"] + requester_user_id = query["requester_user_id"] if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -766,6 +766,212 @@ class FederationGroupsRenewAttestaionServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class FederationGroupsSummaryRoomsServlet(BaseFederationServlet): + """Add/remove a room from the group summary, with optional category. + + Matches both: + - /groups/:group/summary/rooms/:room_id + - /groups/:group/summary/categories/:category/rooms/:room_id + """ + PATH = ( + "/groups/(?P[^/]*)/summary" + "(/categories/(?P[^/]+))?" + "/rooms/(?P[^/]*)$" + ) + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, category_id, room_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.update_group_summary_room( + group_id, requester_user_id, + room_id=room_id, + category_id=category_id, + content=content, + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_DELETE(self, origin, content, query, group_id, category_id, room_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.delete_group_summary_room( + group_id, requester_user_id, + room_id=room_id, + category_id=category_id, + ) + + defer.returnValue((200, resp)) + + +class FederationGroupsCategoriesServlet(BaseFederationServlet): + PATH = ( + "/groups/(?P[^/]*)/categories/$" + ) + + @defer.inlineCallbacks + def on_GET(self, origin, content, query, group_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.get_group_categories( + group_id, requester_user_id, + ) + + defer.returnValue((200, resp)) + + +class FederationGroupsCategoryServlet(BaseFederationServlet): + PATH = ( + "/groups/(?P[^/]*)/categories/(?P[^/]+)$" + ) + + @defer.inlineCallbacks + def on_GET(self, origin, content, query, group_id, category_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.get_group_category( + group_id, requester_user_id, category_id + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, category_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.upsert_group_category( + group_id, requester_user_id, category_id, content, + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_DELETE(self, origin, content, query, group_id, category_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.delete_group_category( + group_id, requester_user_id, category_id, + ) + + defer.returnValue((200, resp)) + + +class FederationGroupsRolesServlet(BaseFederationServlet): + PATH = ( + "/groups/(?P[^/]*)/roles/$" + ) + + @defer.inlineCallbacks + def on_GET(self, origin, content, query, group_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.get_group_roles( + group_id, requester_user_id, + ) + + defer.returnValue((200, resp)) + + +class FederationGroupsRoleServlet(BaseFederationServlet): + PATH = ( + "/groups/(?P[^/]*)/roles/(?P[^/]+)$" + ) + + @defer.inlineCallbacks + def on_GET(self, origin, content, query, group_id, role_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.get_group_role( + group_id, requester_user_id, role_id + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, role_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.update_group_role( + group_id, requester_user_id, role_id, content, + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_DELETE(self, origin, content, query, group_id, role_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.delete_group_role( + group_id, requester_user_id, role_id, + ) + + defer.returnValue((200, resp)) + + +class FederationGroupsSummaryUsersServlet(BaseFederationServlet): + """Add/remove a user from the group summary, with optional role. + + Matches both: + - /groups/:group/summary/users/:user_id + - /groups/:group/summary/roles/:role/users/:user_id + """ + PATH = ( + "/groups/(?P[^/]*)/summary" + "(/roles/(?P[^/]+))?" + "/users/(?P[^/]*)$" + ) + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, role_id, user_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.update_group_summary_user( + group_id, requester_user_id, + user_id=user_id, + role_id=role_id, + content=content, + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_DELETE(self, origin, content, query, group_id, role_id, user_id): + requester_user_id = query["requester_user_id"] + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + resp = yield self.handler.delete_group_summary_user( + group_id, requester_user_id, + user_id=user_id, + role_id=role_id, + ) + + defer.returnValue((200, resp)) + + FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, FederationPullServlet, @@ -806,6 +1012,12 @@ GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsInviteServlet, FederationGroupsAcceptInviteServlet, FederationGroupsRemoveUserServlet, + FederationGroupsSummaryRoomsServlet, + FederationGroupsCategoriesServlet, + FederationGroupsCategoryServlet, + FederationGroupsRolesServlet, + FederationGroupsRoleServlet, + FederationGroupsSummaryUsersServlet, ) From a78cda4bafd1eb33a40e8d841de311ea2dbbc086 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Jul 2017 11:17:07 +0100 Subject: [PATCH 0034/1637] Remove TODO --- synapse/groups/groups_server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index dc9d361f57..a00bafe3af 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -96,7 +96,6 @@ class GroupsServerHandler(object): ) # TODO: Add profiles to users - # TODO: Add assestations to users rooms, categories = yield self.store.get_rooms_for_summary_by_category( group_id, include_private=is_user_in_group, From f60218ec412dd9ef13768d7c216da982f5eb6870 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Jul 2017 11:23:53 +0100 Subject: [PATCH 0035/1637] Push: Don't acquire lock unless necessary --- synapse/push/bulk_push_rule_evaluator.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 803ac3e75b..f304f4daf2 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -213,6 +213,10 @@ class RulesForRoom(object): """ state_group = context.state_group + if state_group and self.state_group == state_group: + logger.debug("Using cached rules for %r", self.room_id) + defer.returnValue(self.rules_by_user) + with (yield self.linearizer.queue(())): if state_group and self.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) From 8575e3160f98a0b33cd0ec6080389701dcb535e8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Jul 2017 13:32:40 +0100 Subject: [PATCH 0036/1637] Comments --- synapse/federation/transport/server.py | 8 ++++++ synapse/storage/group_server.py | 36 +++++++++++++++++--------- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 1ea2b37ce8..304c2a2a4c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -810,6 +810,8 @@ class FederationGroupsSummaryRoomsServlet(BaseFederationServlet): class FederationGroupsCategoriesServlet(BaseFederationServlet): + """Get all categories for a group + """ PATH = ( "/groups/(?P[^/]*)/categories/$" ) @@ -828,6 +830,8 @@ class FederationGroupsCategoriesServlet(BaseFederationServlet): class FederationGroupsCategoryServlet(BaseFederationServlet): + """Add/remove/get a category in a group + """ PATH = ( "/groups/(?P[^/]*)/categories/(?P[^/]+)$" ) @@ -870,6 +874,8 @@ class FederationGroupsCategoryServlet(BaseFederationServlet): class FederationGroupsRolesServlet(BaseFederationServlet): + """Get roles in a group + """ PATH = ( "/groups/(?P[^/]*)/roles/$" ) @@ -888,6 +894,8 @@ class FederationGroupsRolesServlet(BaseFederationServlet): class FederationGroupsRoleServlet(BaseFederationServlet): + """Add/remove/get a role in a group + """ PATH = ( "/groups/(?P[^/]*)/roles/(?P[^/]+)$" ) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 18bfaeda6e..b328ef8bc4 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -140,12 +140,16 @@ class GroupServerStore(SQLBaseStore): def _add_room_to_summary_txn(self, txn, group_id, room_id, category_id, order, is_public): - """Add room to summary. + """Add (or update) room's entry in summary. - This automatically adds the room to the end of the list of rooms to be - included in the summary response. If a role is given then user will - be added under that category (the category will automatically be added tothe - the summary if a user is listed under that role in the summary). + Args: + group_id (str) + room_id (str) + category_id (str): If not None then adds the category to the end of + the summary if its not already there. [Optional] + order (int): If not None inserts the room at that position, e.g. + an order of 1 will put the room first. Otherwise, the room gets + added to the end. """ if category_id is None: @@ -164,7 +168,7 @@ class GroupServerStore(SQLBaseStore): if not cat_exists: raise SynapseError(400, "Category doesn't exist") - # TODO: Check room is part of group already + # TODO: Check category is part of summary already cat_exists = self._simple_select_one_onecol_txn( txn, table="group_summary_room_categories", @@ -176,6 +180,7 @@ class GroupServerStore(SQLBaseStore): allow_none=True, ) if not cat_exists: + # If not, add it with an order larger than all others txn.execute(""" INSERT INTO group_summary_room_categories (group_id, category_id, cat_order) @@ -197,6 +202,7 @@ class GroupServerStore(SQLBaseStore): ) if order is not None: + # Shuffle other room orders that come after the given order sql = """ UPDATE group_summary_rooms SET room_order = room_order + 1 WHERE group_id = ? AND category_id = ? AND room_order >= ? @@ -408,12 +414,16 @@ class GroupServerStore(SQLBaseStore): def _add_user_to_summary_txn(self, txn, group_id, user_id, role_id, order, is_public): - """Add user to summary. + """Add (or update) user's entry in summary. - This automatically adds the user to the end of the list of users to be - included in the summary response. If a role is given then user will - be added under that role (the role will automatically be added to the - summary if a user is listed under that role in the summary). + Args: + group_id (str) + user_id (str) + role_id (str): If not None then adds the role to the end of + the summary if its not already there. [Optional] + order (int): If not None inserts the user at that position, e.g. + an order of 1 will put the user first. Otherwise, the user gets + added to the end. """ if role_id is None: role_id = _DEFAULT_CATEGORY_ID @@ -431,7 +441,7 @@ class GroupServerStore(SQLBaseStore): if not role_exists: raise SynapseError(400, "Role doesn't exist") - # TODO: Check room is part of group already + # TODO: Check role is part of the summary already role_exists = self._simple_select_one_onecol_txn( txn, table="group_summary_roles", @@ -443,6 +453,7 @@ class GroupServerStore(SQLBaseStore): allow_none=True, ) if not role_exists: + # If not, add it with an order larger than all others txn.execute(""" INSERT INTO group_summary_roles (group_id, role_id, role_order) @@ -464,6 +475,7 @@ class GroupServerStore(SQLBaseStore): ) if order is not None: + # Shuffle other users orders that come after the given order sql = """ UPDATE group_summary_users SET user_order = user_order + 1 WHERE group_id = ? AND role_id = ? AND user_order >= ? From 3b0470dba59274c65e69a4eab8909eaa55393a2a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Jul 2017 13:53:21 +0100 Subject: [PATCH 0037/1637] Remove unused functions --- synapse/storage/group_server.py | 152 -------------------------------- 1 file changed, 152 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index b328ef8bc4..2e05c23fd7 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -755,103 +755,6 @@ class GroupServerStore(SQLBaseStore): desc="add_room_to_group", ) - @defer.inlineCallbacks - def register_user_group_membership(self, group_id, user_id, membership, - is_admin=False, content={}, - local_attestation=None, - remote_attestation=None, - ): - def _register_user_group_membership_txn(txn, next_id): - # TODO: Upsert? - self._simple_delete_txn( - txn, - table="local_group_membership", - keyvalues={ - "group_id": group_id, - "user_id": user_id, - }, - ) - self._simple_insert_txn( - txn, - table="local_group_membership", - values={ - "group_id": group_id, - "user_id": user_id, - "is_admin": is_admin, - "membership": membership, - "content": json.dumps(content), - }, - ) - self._simple_delete_txn( - txn, - table="local_group_updates", - keyvalues={ - "group_id": group_id, - "user_id": user_id, - "type": "membership", - }, - ) - self._simple_insert_txn( - txn, - table="local_group_updates", - values={ - "stream_id": next_id, - "group_id": group_id, - "user_id": user_id, - "type": "membership", - "content": json.dumps({"membership": membership, "content": content}), - } - ) - self._group_updates_stream_cache.entity_has_changed(user_id, next_id) - - # TODO: Insert profile to ensuer it comes down stream if its a join. - - if membership == "join": - if local_attestation: - self._simple_insert_txn( - txn, - table="group_attestations_renewals", - values={ - "group_id": group_id, - "user_id": user_id, - "valid_until_ms": local_attestation["valid_until_ms"], - } - ) - if remote_attestation: - self._simple_insert_txn( - txn, - table="group_attestations_remote", - values={ - "group_id": group_id, - "user_id": user_id, - "valid_until_ms": remote_attestation["valid_until_ms"], - "attestation": json.dumps(remote_attestation), - } - ) - else: - self._simple_delete_txn( - txn, - table="group_attestations_renewals", - keyvalues={ - "group_id": group_id, - "user_id": user_id, - }, - ) - self._simple_delete_txn( - txn, - table="group_attestations_remote", - keyvalues={ - "group_id": group_id, - "user_id": user_id, - }, - ) - - with self._group_updates_id_gen.get_next() as next_id: - yield self.runInteraction( - "register_user_group_membership", - _register_user_group_membership_txn, next_id, - ) - @defer.inlineCallbacks def create_group(self, group_id, user_id, name, avatar_url, short_description, long_description,): @@ -867,61 +770,6 @@ class GroupServerStore(SQLBaseStore): desc="create_group", ) - def get_joined_groups(self, user_id): - return self._simple_select_onecol( - table="local_group_membership", - keyvalues={ - "user_id": user_id, - "membership": "join", - }, - retcol="group_id", - desc="get_joined_groups", - ) - - def get_all_groups_for_user(self, user_id, now_token): - def _get_all_groups_for_user_txn(txn): - sql = """ - SELECT group_id, type, membership, u.content - FROM local_group_updates AS u - INNER JOIN local_group_membership USING (group_id, user_id) - WHERE user_id = ? AND membership != 'leave' - AND stream_id <= ? - """ - txn.execute(sql, (user_id, now_token,)) - return self.cursor_to_dict(txn) - return self.runInteraction( - "get_all_groups_for_user", _get_all_groups_for_user_txn, - ) - - def get_groups_changes_for_user(self, user_id, from_token, to_token): - from_token = int(from_token) - has_changed = self._group_updates_stream_cache.has_entity_changed( - user_id, from_token, - ) - if not has_changed: - return [] - - def _get_groups_changes_for_user_txn(txn): - sql = """ - SELECT group_id, membership, type, u.content - FROM local_group_updates AS u - INNER JOIN local_group_membership USING (group_id, user_id) - WHERE user_id = ? AND ? < stream_id AND stream_id <= ? - """ - txn.execute(sql, (user_id, from_token, to_token,)) - return [{ - "group_id": group_id, - "membership": membership, - "type": gtype, - "content": json.loads(content_json), - } for group_id, membership, gtype, content_json in txn] - return self.runInteraction( - "get_groups_changes_for_user", _get_groups_changes_for_user_txn, - ) - - def get_group_stream_token(self): - return self._group_updates_id_gen.get_current_token() - def get_attestations_need_renewals(self, valid_until_ms): """Get all attestations that need to be renewed until givent time """ From 8d26385d76eefb4ab5d7703b76517b7cb6039f17 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 13 Jul 2017 11:37:09 +0100 Subject: [PATCH 0038/1637] Add more metrics to push rule evaluation --- synapse/push/bulk_push_rule_evaluator.py | 44 ++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index f304f4daf2..9134969553 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -20,6 +20,8 @@ from twisted.internet import defer from .push_rule_evaluator import PushRuleEvaluatorForEvent from synapse.api.constants import EventTypes, Membership +from synapse.metrics import get_metrics_for +from synapse.util.caches import metrics as cache_metrics from synapse.util.caches.descriptors import cached from synapse.util.async import Linearizer @@ -31,6 +33,23 @@ logger = logging.getLogger(__name__) rules_by_room = {} +push_metrics = get_metrics_for(__name__) + +push_rules_invalidation_counter = push_metrics.register_counter( + "push_rules_invalidation_counter" +) +push_rules_state_size_counter = push_metrics.register_counter( + "push_rules_state_size_counter" +) + +# Measures whether we use the fast path of using state deltas, or if we have to +# recalculate from scratch +push_rules_delta_state_cache_metric = cache_metrics.register_cache( + "cache", + size_callback=lambda: 0, # Meaningless size, as this isn't a cache that stores values + cache_name="push_rules_delta_state_cache_metric", +) + class BulkPushRuleEvaluator(object): """Calculates the outcome of push rules for an event for all users in the @@ -41,6 +60,12 @@ class BulkPushRuleEvaluator(object): self.hs = hs self.store = hs.get_datastore() + self.room_push_rule_cache_metrics = cache_metrics.register_cache( + "cache", + size_callback=lambda: 0, # There's not good value for this + cache_name="room_push_rule_cache", + ) + @defer.inlineCallbacks def _get_rules_for_event(self, event, context): """This gets the rules for all users in the room at the time of the event, @@ -78,7 +103,10 @@ class BulkPushRuleEvaluator(object): # It's important that RulesForRoom gets added to self._get_rules_for_room.cache # before any lookup methods get called on it as otherwise there may be # a race if invalidate_all gets called (which assumes its in the cache) - return RulesForRoom(self.hs, room_id, self._get_rules_for_room.cache) + return RulesForRoom( + self.hs, room_id, self._get_rules_for_room.cache, + self.room_push_rule_cache_metrics, + ) @defer.inlineCallbacks def action_for_event_by_user(self, event, context): @@ -161,17 +189,19 @@ class RulesForRoom(object): the entire cache for the room. """ - def __init__(self, hs, room_id, rules_for_room_cache): + def __init__(self, hs, room_id, rules_for_room_cache, room_push_rule_cache_metrics): """ Args: hs (HomeServer) room_id (str) rules_for_room_cache(Cache): The cache object that caches these RoomsForUser objects. + room_push_rule_cache_metrics (CacheMetric) """ self.room_id = room_id self.is_mine_id = hs.is_mine_id self.store = hs.get_datastore() + self.room_push_rule_cache_metrics = room_push_rule_cache_metrics self.linearizer = Linearizer(name="rules_for_room") @@ -215,13 +245,17 @@ class RulesForRoom(object): if state_group and self.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) + self.room_push_rule_cache_metrics.inc_hits() defer.returnValue(self.rules_by_user) with (yield self.linearizer.queue(())): if state_group and self.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) + self.room_push_rule_cache_metrics.inc_hits() defer.returnValue(self.rules_by_user) + self.room_push_rule_cache_metrics.inc_misses() + ret_rules_by_user = {} missing_member_event_ids = {} if state_group and self.state_group == context.prev_group: @@ -229,8 +263,13 @@ class RulesForRoom(object): # results. ret_rules_by_user = self.rules_by_user current_state_ids = context.delta_ids + + push_rules_delta_state_cache_metric.inc_hits() else: current_state_ids = context.current_state_ids + push_rules_delta_state_cache_metric.inc_misses() + + push_rules_state_size_counter.inc_by(len(current_state_ids)) logger.debug( "Looking for member changes in %r %r", state_group, current_state_ids @@ -375,6 +414,7 @@ class RulesForRoom(object): self.state_group = object() self.member_map = {} self.rules_by_user = {} + push_rules_invalidation_counter.inc() def update_cache(self, sequence, members, rules_by_user, state_group): if sequence == self.sequence: From 4b203bdba51a314abef56ccee4d77e1945d16735 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jul 2017 14:02:00 +0100 Subject: [PATCH 0039/1637] Correctly increment orders --- synapse/storage/group_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 2e05c23fd7..c23dc79ca5 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -184,7 +184,7 @@ class GroupServerStore(SQLBaseStore): txn.execute(""" INSERT INTO group_summary_room_categories (group_id, category_id, cat_order) - SELECT ?, ?, COALESCE(MAX(cat_order), 1) + SELECT ?, ?, COALESCE(MAX(cat_order), 0) + 1 FROM group_summary_room_categories WHERE group_id = ? AND category_id = ? """, (group_id, category_id, group_id, category_id)) @@ -457,7 +457,7 @@ class GroupServerStore(SQLBaseStore): txn.execute(""" INSERT INTO group_summary_roles (group_id, role_id, role_order) - SELECT ?, ?, COALESCE(MAX(role_order), 1) + SELECT ?, ?, COALESCE(MAX(role_order), 0) + 1 FROM group_summary_roles WHERE group_id = ? AND role_id = ? """, (group_id, role_id, group_id, role_id)) From 85fda57208bb79e54fe473fda64351f04ffe1cda Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jul 2017 14:03:54 +0100 Subject: [PATCH 0040/1637] Add DEFAULT_ROLE_ID --- synapse/storage/group_server.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index c23dc79ca5..e8a799d8c7 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -24,7 +24,8 @@ import ujson as json # The category ID for the "default" category. We don't store as null in the # database to avoid the fun of null != null -_DEFAULT_CATEGORY_ID = "default" +_DEFAULT_CATEGORY_ID = "" +_DEFAULT_ROLE_ID = "" class GroupServerStore(SQLBaseStore): @@ -426,7 +427,7 @@ class GroupServerStore(SQLBaseStore): added to the end. """ if role_id is None: - role_id = _DEFAULT_CATEGORY_ID + role_id = _DEFAULT_ROLE_ID else: role_exists = self._simple_select_one_onecol_txn( txn, @@ -523,7 +524,7 @@ class GroupServerStore(SQLBaseStore): def remove_user_from_summary(self, group_id, user_id, role_id): if role_id is None: - role_id = _DEFAULT_CATEGORY_ID + role_id = _DEFAULT_ROLE_ID return self._simple_delete( table="group_summary_users", @@ -563,7 +564,7 @@ class GroupServerStore(SQLBaseStore): { "user_id": row[0], "is_public": row[1], - "role_id": row[2] if row[2] != _DEFAULT_CATEGORY_ID else None, + "role_id": row[2] if row[2] != _DEFAULT_ROLE_ID else None, "order": row[3], } for row in txn From cb3aee8219b68e99fc391b96813e6588279e8d86 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jul 2017 14:06:09 +0100 Subject: [PATCH 0041/1637] Ensure category and role ids are non-null --- synapse/federation/transport/server.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 304c2a2a4c..4f7d2546cf 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -785,6 +785,9 @@ class FederationGroupsSummaryRoomsServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") + if category_id == "": + raise SynapseError(400, "category_id cannot be empty string") + resp = yield self.handler.update_group_summary_room( group_id, requester_user_id, room_id=room_id, @@ -800,6 +803,9 @@ class FederationGroupsSummaryRoomsServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") + if category_id == "": + raise SynapseError(400, "category_id cannot be empty string") + resp = yield self.handler.delete_group_summary_room( group_id, requester_user_id, room_id=room_id, @@ -854,6 +860,9 @@ class FederationGroupsCategoryServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") + if category_id == "": + raise SynapseError(400, "category_id cannot be empty string") + resp = yield self.handler.upsert_group_category( group_id, requester_user_id, category_id, content, ) @@ -866,6 +875,9 @@ class FederationGroupsCategoryServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") + if category_id == "": + raise SynapseError(400, "category_id cannot be empty string") + resp = yield self.handler.delete_group_category( group_id, requester_user_id, category_id, ) @@ -918,6 +930,9 @@ class FederationGroupsRoleServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") + if role_id == "": + raise SynapseError(400, "role_id cannot be empty string") + resp = yield self.handler.update_group_role( group_id, requester_user_id, role_id, content, ) @@ -930,6 +945,9 @@ class FederationGroupsRoleServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") + if role_id == "": + raise SynapseError(400, "role_id cannot be empty string") + resp = yield self.handler.delete_group_role( group_id, requester_user_id, role_id, ) @@ -956,6 +974,9 @@ class FederationGroupsSummaryUsersServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") + if role_id == "": + raise SynapseError(400, "role_id cannot be empty string") + resp = yield self.handler.update_group_summary_user( group_id, requester_user_id, user_id=user_id, @@ -971,6 +992,9 @@ class FederationGroupsSummaryUsersServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") + if role_id == "": + raise SynapseError(400, "role_id cannot be empty string") + resp = yield self.handler.delete_group_summary_user( group_id, requester_user_id, user_id=user_id, From bfde0760224c09a5e6327d4ae4181ecb10ccfc2e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jul 2017 16:11:26 +0100 Subject: [PATCH 0042/1637] Increase cache hit ratio for push We don't update the cache in all code paths, which causes subsequent calls to miss the cache --- synapse/push/bulk_push_rule_evaluator.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 9134969553..b0d64aa6c4 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -316,6 +316,14 @@ class RulesForRoom(object): yield self._update_rules_with_member_event_ids( ret_rules_by_user, missing_member_event_ids, state_group, event ) + else: + # The push rules didn't change but lets update the cache anyway + self.update_cache( + self.sequence, + members={}, # There were no membership changes + rules_by_user=ret_rules_by_user, + state_group=state_group + ) if logger.isEnabledFor(logging.DEBUG): logger.debug( From 2f9eafdd369796d8b7731b24ab8cf6a98ad19e29 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jul 2017 14:52:27 +0100 Subject: [PATCH 0043/1637] Add local group server support --- synapse/federation/transport/client.py | 77 +++ synapse/federation/transport/server.py | 44 ++ synapse/groups/groups_server.py | 7 +- synapse/handlers/groups_local.py | 278 ++++++++ synapse/rest/__init__.py | 2 + synapse/rest/client/v2_alpha/groups.py | 642 ++++++++++++++++++ synapse/server.py | 5 + synapse/storage/__init__.py | 15 + synapse/storage/group_server.py | 152 +++++ .../storage/schema/delta/43/group_server.sql | 28 + 10 files changed, 1248 insertions(+), 2 deletions(-) create mode 100644 synapse/handlers/groups_local.py create mode 100644 synapse/rest/client/v2_alpha/groups.py diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index d0f8da7516..ea340e345c 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -472,6 +472,72 @@ class TransportLayerClient(object): defer.returnValue(content) + @log_function + def get_group_profile(self, destination, group_id, requester_user_id): + path = PREFIX + "/groups/%s/profile" % (group_id,) + + return self.client.post_json( + destination=destination, + path=path, + data={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + @log_function + def get_group_summary(self, destination, group_id, requester_user_id): + path = PREFIX + "/groups/%s/summary" % (group_id,) + + return self.client.post_json( + destination=destination, + path=path, + data={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + @log_function + def get_group_rooms(self, destination, group_id, requester_user_id): + path = PREFIX + "/groups/%s/rooms" % (group_id,) + + return self.client.post_json( + destination=destination, + path=path, + data={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + @log_function + def get_group_users(self, destination, group_id, requester_user_id): + path = PREFIX + "/groups/%s/users" % (group_id,) + + return self.client.post_json( + destination=destination, + path=path, + data={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + @log_function + def accept_group_invite(self, destination, group_id, user_id, content): + path = PREFIX + "/groups/%s/users/%s/accept_invite" % (group_id, user_id) + + return self.client.post_json( + destination=destination, + path=path, + data=content, + ignore_backoff=True, + ) + + @log_function + def invite_to_group(self, destination, group_id, user_id, content): + path = PREFIX + "/groups/%s/users/%s/invite" % (group_id, user_id) + + return self.client.post_json( + destination=destination, + path=path, + data=content, + ignore_backoff=True, + ) + @log_function def invite_to_group_notification(self, destination, group_id, user_id, content): """Sent by group server to inform a user's server that they have been @@ -487,6 +553,17 @@ class TransportLayerClient(object): ignore_backoff=True, ) + @log_function + def remove_user_from_group(self, destination, group_id, user_id, content): + path = PREFIX + "/groups/%s/users/%s/remove" % (group_id, user_id) + + return self.client.post_json( + destination=destination, + path=path, + data=content, + ignore_backoff=True, + ) + @log_function def remove_user_from_group_notification(self, destination, group_id, user_id, content): diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 4f7d2546cf..0f08334f33 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -715,6 +715,21 @@ class FederationGroupsInviteServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class FederationGroupsLocalInviteServlet(BaseFederationServlet): + PATH = "/groups/local/(?P[^/]*)/users/(?P[^/]*)/invite$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, user_id): + if get_domain_from_id(group_id) != origin: + raise SynapseError(403, "group_id doesn't match origin") + + new_content = yield self.handler.on_invite( + group_id, user_id, content, + ) + + defer.returnValue((200, new_content)) + + class FederationGroupsAcceptInviteServlet(BaseFederationServlet): """Accept an invitation from the group server """ @@ -750,6 +765,21 @@ class FederationGroupsRemoveUserServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet): + PATH = "/groups/local/(?P[^/]*)/users/(?P[^/]*)/remove$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, user_id): + if get_domain_from_id(group_id) != origin: + raise SynapseError(403, "user_id doesn't match origin") + + new_content = yield self.handler.user_removed_from_group( + group_id, user_id, content, + ) + + defer.returnValue((200, new_content)) + + class FederationGroupsRenewAttestaionServlet(BaseFederationServlet): """A group or user's server renews their attestation """ @@ -1053,6 +1083,12 @@ GROUP_SERVER_SERVLET_CLASSES = ( ) +GROUP_LOCAL_SERVLET_CLASSES = ( + FederationGroupsLocalInviteServlet, + FederationGroupsRemoveLocalUserServlet, +) + + GROUP_ATTESTATION_SERVLET_CLASSES = ( FederationGroupsRenewAttestaionServlet, ) @@ -1083,6 +1119,14 @@ def register_servlets(hs, resource, authenticator, ratelimiter): server_name=hs.hostname, ).register(resource) + for servletclass in GROUP_LOCAL_SERVLET_CLASSES: + servletclass( + handler=hs.get_groups_local_handler(), + authenticator=authenticator, + ratelimiter=ratelimiter, + server_name=hs.hostname, + ).register(resource) + for servletclass in GROUP_ATTESTATION_SERVLET_CLASSES: servletclass( handler=hs.get_groups_attestation_renewer(), diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index a00bafe3af..c8559577f7 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -462,7 +462,9 @@ class GroupsServerHandler(object): } if self.hs.is_mine_id(user_id): - raise NotImplementedError() + groups_local = self.hs.get_groups_local_handler() + res = yield groups_local.on_invite(group_id, user_id, content) + local_attestation = None else: local_attestation = self.attestations.create_attestation(group_id, user_id) content.update({ @@ -590,7 +592,8 @@ class GroupsServerHandler(object): if is_kick: if self.hs.is_mine_id(user_id): - raise NotImplementedError() + groups_local = self.hs.get_groups_local_handler() + yield groups_local.user_removed_from_group(group_id, user_id, {}) else: yield self.transport_client.remove_user_from_group_notification( get_domain_from_id(user_id), group_id, user_id, {} diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py new file mode 100644 index 0000000000..3df255b05a --- /dev/null +++ b/synapse/handlers/groups_local.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.api.errors import SynapseError + +import logging + +logger = logging.getLogger(__name__) + + +# TODO: Validate attestations +# TODO: Allow users to "knock" or simpkly join depending on rules +# TODO: is_priveged flag to users and is_public to users and rooms +# TODO: Roles +# TODO: Audit log for admins (profile updates, membership changes, users who tried +# to join but were rejected, etc) +# TODO: Flairs +# TODO: Add group memebership /sync + + +def _create_rerouter(name): + def f(self, group_id, *args, **kwargs): + if self.is_mine_id(group_id): + return getattr(self.groups_server_handler, name)( + group_id, *args, **kwargs + ) + + repl_layer = self.hs.get_replication_layer() + return getattr(repl_layer, name)(group_id, *args, **kwargs) + return f + + +class GroupsLocalHandler(object): + def __init__(self, hs): + self.hs = hs + self.store = hs.get_datastore() + self.room_list_handler = hs.get_room_list_handler() + self.groups_server_handler = hs.get_groups_server_handler() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.keyring = hs.get_keyring() + self.is_mine_id = hs.is_mine_id + self.signing_key = hs.config.signing_key[0] + self.server_name = hs.hostname + self.attestations = hs.get_groups_attestation_signing() + + # Ensure attestations get renewed + hs.get_groups_attestation_renewer() + + get_group_profile = _create_rerouter("get_group_profile") + get_rooms_in_group = _create_rerouter("get_rooms_in_group") + + update_group_summary_room = _create_rerouter("update_group_summary_room") + delete_group_summary_room = _create_rerouter("delete_group_summary_room") + + update_group_category = _create_rerouter("update_group_category") + delete_group_category = _create_rerouter("delete_group_category") + get_group_category = _create_rerouter("get_group_category") + get_group_categories = _create_rerouter("get_group_categories") + + update_group_summary_user = _create_rerouter("update_group_summary_user") + delete_group_summary_user = _create_rerouter("delete_group_summary_user") + + update_group_role = _create_rerouter("update_group_role") + delete_group_role = _create_rerouter("delete_group_role") + get_group_role = _create_rerouter("get_group_role") + get_group_roles = _create_rerouter("get_group_roles") + + @defer.inlineCallbacks + def get_group_summary(self, group_id, requester_user_id): + if self.is_mine_id(group_id): + res = yield self.groups_server_handler.get_group_summary( + group_id, requester_user_id + ) + defer.returnValue(res) + + repl_layer = self.hs.get_replication_layer() + res = yield repl_layer.get_group_summary(group_id, requester_user_id) + + chunk = res["users_section"]["users"] + valid_users = [] + for entry in chunk: + g_user_id = entry["user_id"] + attestation = entry.pop("attestation") + try: + yield self.attestations.verify_attestation( + attestation, + group_id=group_id, + user_id=g_user_id, + ) + valid_users.append(entry) + except Exception as e: + logger.info("Failed to verify user is in group: %s", e) + + res["users_section"]["users"] = valid_users + + res["users_section"]["users"].sort(key=lambda e: e.get("order", 0)) + res["rooms_section"]["rooms"].sort(key=lambda e: e.get("order", 0)) + + defer.returnValue(res) + + def create_group(self, group_id, user_id, content): + logger.info("Asking to create group with ID: %r", group_id) + + if self.is_mine_id(group_id): + return self.groups_server_handler.create_group( + group_id, user_id, content + ) + + repl_layer = self.hs.get_replication_layer() + return repl_layer.create_group(group_id, user_id, content) # TODO + + def add_room(self, group_id, user_id, room_id, content): + if self.is_mine_id(group_id): + return self.groups_server_handler.add_room( + group_id, user_id, room_id, content + ) + + repl_layer = self.hs.get_replication_layer() + return repl_layer.add_room_to_group(group_id, user_id, room_id, content) # TODO + + @defer.inlineCallbacks + def get_users_in_group(self, group_id, requester_user_id): + if self.is_mine_id(group_id): + res = yield self.groups_server_handler.get_users_in_group( + group_id, requester_user_id + ) + defer.returnValue(res) + + repl_layer = self.hs.get_replication_layer() + res = yield repl_layer.get_users_in_group(group_id, requester_user_id) # TODO + + chunk = res["chunk"] + valid_entries = [] + for entry in chunk: + g_user_id = entry["user_id"] + attestation = entry.pop("attestation") + try: + yield self.attestations.verify_attestation( + attestation, + group_id=group_id, + user_id=g_user_id, + ) + valid_entries.append(entry) + except Exception as e: + logger.info("Failed to verify user is in group: %s", e) + + res["chunk"] = valid_entries + + defer.returnValue(res) + + @defer.inlineCallbacks + def join_group(self, group_id, user_id, content): + raise NotImplementedError() # TODO + + @defer.inlineCallbacks + def accept_invite(self, group_id, user_id, content): + if self.is_mine_id(group_id): + yield self.groups_server_handler.accept_invite( + group_id, user_id, content + ) + local_attestation = None + remote_attestation = None + else: + local_attestation = self.attestations.create_attestation(group_id, user_id) + content["attestation"] = local_attestation + + repl_layer = self.hs.get_replication_layer() + res = yield repl_layer.accept_group_invite(group_id, user_id, content) + + remote_attestation = res["attestation"] + + yield self.attestations.verify_attestation( + remote_attestation, + group_id=group_id, + user_id=user_id, + ) + + yield self.store.register_user_group_membership( + group_id, user_id, + membership="join", + is_admin=False, + local_attestation=local_attestation, + remote_attestation=remote_attestation, + ) + + defer.returnValue({}) + + @defer.inlineCallbacks + def invite(self, group_id, user_id, requester_user_id, config): + content = { + "requester_user_id": requester_user_id, + "config": config, + } + if self.is_mine_id(group_id): + res = yield self.groups_server_handler.invite_to_group( + group_id, user_id, requester_user_id, content, + ) + else: + repl_layer = self.hs.get_replication_layer() + res = yield repl_layer.invite_to_group( + group_id, user_id, content, + ) + + defer.returnValue(res) + + @defer.inlineCallbacks + def on_invite(self, group_id, user_id, content): + # TODO: Support auto join and rejection + + if not self.is_mine_id(user_id): + raise SynapseError(400, "User not on this server") + + local_profile = {} + if "profile" in content: + if "name" in content["profile"]: + local_profile["name"] = content["profile"]["name"] + if "avatar_url" in content["profile"]: + local_profile["avatar_url"] = content["profile"]["avatar_url"] + + yield self.store.register_user_group_membership( + group_id, user_id, + membership="invite", + content={"profile": local_profile, "inviter": content["inviter"]}, + ) + + defer.returnValue({"state": "invite"}) + + @defer.inlineCallbacks + def remove_user_from_group(self, group_id, user_id, requester_user_id, content): + if user_id == requester_user_id: + yield self.store.register_user_group_membership( + group_id, user_id, + membership="leave", + ) + + # TODO: Should probably remember that we tried to leave so that we can + # retry if the group server is currently down. + + if self.is_mine_id(group_id): + res = yield self.groups_server_handler.remove_user_from_group( + group_id, user_id, requester_user_id, content, + ) + else: + content["requester_user_id"] = requester_user_id + repl_layer = self.hs.get_replication_layer() + res = yield repl_layer.remove_user_from_group( + group_id, user_id, content + ) # TODO + + defer.returnValue(res) + + @defer.inlineCallbacks + def user_removed_from_group(self, group_id, user_id, content): + # TODO: Check if user in group + yield self.store.register_user_group_membership( + group_id, user_id, + membership="leave", + ) + + @defer.inlineCallbacks + def get_joined_groups(self, user_id): + group_ids = yield self.store.get_joined_groups(user_id) + defer.returnValue({"groups": group_ids}) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 3d809d181b..16f5a73b95 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -52,6 +52,7 @@ from synapse.rest.client.v2_alpha import ( thirdparty, sendtodevice, user_directory, + groups, ) from synapse.http.server import JsonResource @@ -102,3 +103,4 @@ class ClientRestResource(JsonResource): thirdparty.register_servlets(hs, client_resource) sendtodevice.register_servlets(hs, client_resource) user_directory.register_servlets(hs, client_resource) + groups.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py new file mode 100644 index 0000000000..255552c365 --- /dev/null +++ b/synapse/rest/client/v2_alpha/groups.py @@ -0,0 +1,642 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.types import GroupID + +from ._base import client_v2_patterns + +import logging + +logger = logging.getLogger(__name__) + + +class GroupServlet(RestServlet): + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/profile$") + + def __init__(self, hs): + super(GroupServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + group_description = yield self.groups_handler.get_group_profile(group_id, user_id) + + defer.returnValue((200, group_description)) + + +class GroupSummaryServlet(RestServlet): + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/summary$") + + def __init__(self, hs): + super(GroupSummaryServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + get_group_summary = yield self.groups_handler.get_group_summary(group_id, user_id) + + defer.returnValue((200, get_group_summary)) + + +class GroupSummaryRoomsServlet(RestServlet): + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/summary/rooms$") + + def __init__(self, hs): + super(GroupSummaryServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + get_group_summary = yield self.groups_handler.get_group_summary(group_id, user_id) + + defer.returnValue((200, get_group_summary)) + + +class GroupSummaryRoomsDefaultCatServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/summary/rooms/(?P[^/]*)$" + ) + + def __init__(self, hs): + super(GroupSummaryRoomsDefaultCatServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id, room_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + resp = yield self.groups_handler.update_group_summary_room( + group_id, user_id, + room_id=room_id, + category_id=None, + content=content, + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_DELETE(self, request, group_id, room_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + resp = yield self.groups_handler.delete_group_summary_room( + group_id, user_id, + room_id=room_id, + category_id=None, + ) + + defer.returnValue((200, resp)) + + +class GroupSummaryRoomsCatServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/summary" + "/categories/(?P[^/]+)/rooms/(?P[^/]+)$" + ) + + def __init__(self, hs): + super(GroupSummaryRoomsCatServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id, category_id, room_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + resp = yield self.groups_handler.update_group_summary_room( + group_id, user_id, + room_id=room_id, + category_id=category_id, + content=content, + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_DELETE(self, request, group_id, category_id, room_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + resp = yield self.groups_handler.delete_group_summary_room( + group_id, user_id, + room_id=room_id, + category_id=category_id, + ) + + defer.returnValue((200, resp)) + + +class GroupCategoryServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/categories/(?P[^/]+)$" + ) + + def __init__(self, hs): + super(GroupCategoryServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, group_id, category_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + category = yield self.groups_handler.get_group_category( + group_id, user_id, + category_id=category_id, + ) + + defer.returnValue((200, category)) + + @defer.inlineCallbacks + def on_PUT(self, request, group_id, category_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + resp = yield self.groups_handler.update_group_category( + group_id, user_id, + category_id=category_id, + content=content, + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_DELETE(self, request, group_id, category_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + resp = yield self.groups_handler.delete_group_category( + group_id, user_id, + category_id=category_id, + ) + + defer.returnValue((200, resp)) + + +class GroupCategoriesServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/categories/$" + ) + + def __init__(self, hs): + super(GroupCategoriesServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + category = yield self.groups_handler.get_group_categories( + group_id, user_id, + ) + + defer.returnValue((200, category)) + + +class GroupRoleServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/roles/(?P[^/]+)$" + ) + + def __init__(self, hs): + super(GroupRoleServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, group_id, role_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + category = yield self.groups_handler.get_group_role( + group_id, user_id, + role_id=role_id, + ) + + defer.returnValue((200, category)) + + @defer.inlineCallbacks + def on_PUT(self, request, group_id, role_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + resp = yield self.groups_handler.update_group_role( + group_id, user_id, + role_id=role_id, + content=content, + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_DELETE(self, request, group_id, role_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + resp = yield self.groups_handler.delete_group_role( + group_id, user_id, + role_id=role_id, + ) + + defer.returnValue((200, resp)) + + +class GroupRolesServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/roles/$" + ) + + def __init__(self, hs): + super(GroupRolesServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + category = yield self.groups_handler.get_group_roles( + group_id, user_id, + ) + + defer.returnValue((200, category)) + + +class GroupSummaryUsersDefaultRoleServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/summary/users/(?P[^/]*)$" + ) + + def __init__(self, hs): + super(GroupSummaryUsersDefaultRoleServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id, user_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + resp = yield self.groups_handler.update_group_summary_user( + group_id, requester_user_id, + user_id=user_id, + role_id=None, + content=content, + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_DELETE(self, request, group_id, user_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + resp = yield self.groups_handler.delete_group_summary_user( + group_id, requester_user_id, + user_id=user_id, + role_id=None, + ) + + defer.returnValue((200, resp)) + + +class GroupSummaryUsersRoleServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/summary" + "/roles/(?P[^/]+)/users/(?P[^/]+)$" + ) + + def __init__(self, hs): + super(GroupSummaryUsersRoleServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id, role_id, user_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + resp = yield self.groups_handler.update_group_summary_user( + group_id, requester_user_id, + user_id=user_id, + role_id=role_id, + content=content, + ) + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def on_DELETE(self, request, group_id, role_id, user_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + resp = yield self.groups_handler.delete_group_summary_user( + group_id, requester_user_id, + user_id=user_id, + role_id=role_id, + ) + + defer.returnValue((200, resp)) + + +class GroupRoomServlet(RestServlet): + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/rooms$") + + def __init__(self, hs): + super(GroupRoomServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + result = yield self.groups_handler.get_rooms_in_group(group_id, user_id) + + defer.returnValue((200, result)) + + +class GroupUsersServlet(RestServlet): + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/users$") + + def __init__(self, hs): + super(GroupUsersServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + result = yield self.groups_handler.get_users_in_group(group_id, user_id) + + defer.returnValue((200, result)) + + +class GroupCreateServlet(RestServlet): + PATTERNS = client_v2_patterns("/create_group$") + + def __init__(self, hs): + super(GroupCreateServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + self.server_name = hs.hostname + + @defer.inlineCallbacks + def on_POST(self, request): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + # TODO: Create group on remote server + content = parse_json_object_from_request(request) + localpart = content.pop("localpart") + group_id = GroupID.create(localpart, self.server_name).to_string() + + result = yield self.groups_handler.create_group(group_id, user_id, content) + + defer.returnValue((200, result)) + + +class GroupAdminRoomsServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)$" + ) + + def __init__(self, hs): + super(GroupAdminRoomsServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id, room_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + result = yield self.groups_handler.add_room(group_id, user_id, room_id, content) + + defer.returnValue((200, result)) + + +class GroupAdminUsersInviteServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/admin/users/invite/(?P[^/]*)$" + ) + + def __init__(self, hs): + super(GroupAdminUsersInviteServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + self.store = hs.get_datastore() + self.is_mine_id = hs.is_mine_id + + @defer.inlineCallbacks + def on_PUT(self, request, group_id, user_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + config = content.get("config", {}) + result = yield self.groups_handler.invite( + group_id, user_id, requester_user_id, config, + ) + + defer.returnValue((200, result)) + + +class GroupAdminUsersKickServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/admin/users/remove/(?P[^/]*)$" + ) + + def __init__(self, hs): + super(GroupAdminUsersKickServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id, user_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + result = yield self.groups_handler.remove_user_from_group( + group_id, user_id, requester_user_id, content, + ) + + defer.returnValue((200, result)) + + +class GroupSelfLeaveServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/self/leave$" + ) + + def __init__(self, hs): + super(GroupSelfLeaveServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + result = yield self.groups_handler.remove_user_from_group( + group_id, requester_user_id, requester_user_id, content, + ) + + defer.returnValue((200, result)) + + +class GroupSelfJoinServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/self/join$" + ) + + def __init__(self, hs): + super(GroupSelfJoinServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + result = yield self.groups_handler.join_group( + group_id, requester_user_id, content, + ) + + defer.returnValue((200, result)) + + +class GroupSelfAcceptInviteServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/self/accept_invite$" + ) + + def __init__(self, hs): + super(GroupSelfAcceptInviteServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + result = yield self.groups_handler.accept_invite( + group_id, requester_user_id, content, + ) + + defer.returnValue((200, result)) + + +class GroupsForUserServlet(RestServlet): + PATTERNS = client_v2_patterns( + "/joined_groups$" + ) + + def __init__(self, hs): + super(GroupsForUserServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + result = yield self.groups_handler.get_joined_groups(user_id) + + defer.returnValue((200, result)) + + +def register_servlets(hs, http_server): + GroupServlet(hs).register(http_server) + GroupSummaryServlet(hs).register(http_server) + GroupUsersServlet(hs).register(http_server) + GroupRoomServlet(hs).register(http_server) + GroupCreateServlet(hs).register(http_server) + GroupAdminRoomsServlet(hs).register(http_server) + GroupAdminUsersInviteServlet(hs).register(http_server) + GroupAdminUsersKickServlet(hs).register(http_server) + GroupSelfLeaveServlet(hs).register(http_server) + GroupSelfJoinServlet(hs).register(http_server) + GroupSelfAcceptInviteServlet(hs).register(http_server) + GroupsForUserServlet(hs).register(http_server) + GroupSummaryRoomsDefaultCatServlet(hs).register(http_server) + GroupCategoryServlet(hs).register(http_server) + GroupCategoriesServlet(hs).register(http_server) + GroupSummaryRoomsCatServlet(hs).register(http_server) + GroupRoleServlet(hs).register(http_server) + GroupRolesServlet(hs).register(http_server) + GroupSummaryUsersDefaultRoleServlet(hs).register(http_server) + GroupSummaryUsersRoleServlet(hs).register(http_server) diff --git a/synapse/server.py b/synapse/server.py index d857cca848..d0a6272766 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -50,6 +50,7 @@ from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.user_directory import UserDirectoyHandler +from synapse.handlers.groups_local import GroupsLocalHandler from synapse.groups.groups_server import GroupsServerHandler from synapse.groups.attestations import GroupAttestionRenewer, GroupAttestationSigning from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory @@ -141,6 +142,7 @@ class HomeServer(object): 'read_marker_handler', 'action_generator', 'user_directory_handler', + 'groups_local_handler', 'groups_server_handler', 'groups_attestation_signing', 'groups_attestation_renewer', @@ -314,6 +316,9 @@ class HomeServer(object): def build_user_directory_handler(self): return UserDirectoyHandler(self) + def build_groups_local_handler(self): + return GroupsLocalHandler(self) + def build_groups_server_handler(self): return GroupsServerHandler(self) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index fdee9f1ad5..594566eb38 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -136,6 +136,9 @@ class DataStore(RoomMemberStore, RoomStore, db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")], ) + self._group_updates_id_gen = StreamIdGenerator( + db_conn, "local_group_updates", "stream_id", + ) if isinstance(self.database_engine, PostgresEngine): self._cache_id_gen = StreamIdGenerator( @@ -236,6 +239,18 @@ class DataStore(RoomMemberStore, RoomStore, prefilled_cache=curr_state_delta_prefill, ) + _group_updates_prefill, min_group_updates_id = self._get_cache_dict( + db_conn, "local_group_updates", + entity_column="user_id", + stream_column="stream_id", + max_value=self._group_updates_id_gen.get_current_token(), + limit=1000, + ) + self._group_updates_stream_cache = StreamChangeCache( + "_group_updates_stream_cache", min_group_updates_id, + prefilled_cache=_group_updates_prefill, + ) + cur = LoggingTransaction( db_conn.cursor(), name="_find_stream_orderings_for_times_txn", diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index e8a799d8c7..036549d437 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -756,6 +756,103 @@ class GroupServerStore(SQLBaseStore): desc="add_room_to_group", ) + @defer.inlineCallbacks + def register_user_group_membership(self, group_id, user_id, membership, + is_admin=False, content={}, + local_attestation=None, + remote_attestation=None, + ): + def _register_user_group_membership_txn(txn, next_id): + # TODO: Upsert? + self._simple_delete_txn( + txn, + table="local_group_membership", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + self._simple_insert_txn( + txn, + table="local_group_membership", + values={ + "group_id": group_id, + "user_id": user_id, + "is_admin": is_admin, + "membership": membership, + "content": json.dumps(content), + }, + ) + self._simple_delete_txn( + txn, + table="local_group_updates", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + "type": "membership", + }, + ) + self._simple_insert_txn( + txn, + table="local_group_updates", + values={ + "stream_id": next_id, + "group_id": group_id, + "user_id": user_id, + "type": "membership", + "content": json.dumps({"membership": membership, "content": content}), + } + ) + self._group_updates_stream_cache.entity_has_changed(user_id, next_id) + + # TODO: Insert profile to ensuer it comes down stream if its a join. + + if membership == "join": + if local_attestation: + self._simple_insert_txn( + txn, + table="group_attestations_renewals", + values={ + "group_id": group_id, + "user_id": user_id, + "valid_until_ms": local_attestation["valid_until_ms"], + } + ) + if remote_attestation: + self._simple_insert_txn( + txn, + table="group_attestations_remote", + values={ + "group_id": group_id, + "user_id": user_id, + "valid_until_ms": remote_attestation["valid_until_ms"], + "attestation": json.dumps(remote_attestation), + } + ) + else: + self._simple_delete_txn( + txn, + table="group_attestations_renewals", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + self._simple_delete_txn( + txn, + table="group_attestations_remote", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) + + with self._group_updates_id_gen.get_next() as next_id: + yield self.runInteraction( + "register_user_group_membership", + _register_user_group_membership_txn, next_id, + ) + @defer.inlineCallbacks def create_group(self, group_id, user_id, name, avatar_url, short_description, long_description,): @@ -771,6 +868,61 @@ class GroupServerStore(SQLBaseStore): desc="create_group", ) + def get_joined_groups(self, user_id): + return self._simple_select_onecol( + table="local_group_membership", + keyvalues={ + "user_id": user_id, + "membership": "join", + }, + retcol="group_id", + desc="get_joined_groups", + ) + + def get_all_groups_for_user(self, user_id, now_token): + def _get_all_groups_for_user_txn(txn): + sql = """ + SELECT group_id, type, membership, u.content + FROM local_group_updates AS u + INNER JOIN local_group_membership USING (group_id, user_id) + WHERE user_id = ? AND membership != 'leave' + AND stream_id <= ? + """ + txn.execute(sql, (user_id, now_token,)) + return self.cursor_to_dict(txn) + return self.runInteraction( + "get_all_groups_for_user", _get_all_groups_for_user_txn, + ) + + def get_groups_changes_for_user(self, user_id, from_token, to_token): + from_token = int(from_token) + has_changed = self._group_updates_stream_cache.has_entity_changed( + user_id, from_token, + ) + if not has_changed: + return [] + + def _get_groups_changes_for_user_txn(txn): + sql = """ + SELECT group_id, membership, type, u.content + FROM local_group_updates AS u + INNER JOIN local_group_membership USING (group_id, user_id) + WHERE user_id = ? AND ? < stream_id AND stream_id <= ? + """ + txn.execute(sql, (user_id, from_token, to_token,)) + return [{ + "group_id": group_id, + "membership": membership, + "type": gtype, + "content": json.loads(content_json), + } for group_id, membership, gtype, content_json in txn] + return self.runInteraction( + "get_groups_changes_for_user", _get_groups_changes_for_user_txn, + ) + + def get_group_stream_token(self): + return self._group_updates_id_gen.get_current_token() + def get_attestations_need_renewals(self, valid_until_ms): """Get all attestations that need to be renewed until givent time """ diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index 472aab0a78..e32db8b313 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -142,3 +142,31 @@ CREATE TABLE group_attestations_remote ( CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote(group_id, user_id); CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote(user_id); CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote(valid_until_ms); + + +CREATE TABLE local_group_membership ( + group_id TEXT NOT NULL, + user_id TEXT NOT NULL, + is_admin BOOLEAN NOT NULL, + membership TEXT NOT NULL, + content TEXT NOT NULL +); + +CREATE INDEX local_group_membership_u_idx ON local_group_membership(user_id, group_id); +CREATE INDEX local_group_membership_g_idx ON local_group_membership(group_id); + + +CREATE TABLE local_group_updates ( + stream_id BIGINT NOT NULL, + group_id TEXT NOT NULL, + user_id TEXT NOT NULL, + type TEXT NOT NULL, + content TEXT NOT NULL +); + + +CREATE TABLE local_group_profiles ( + group_id TEXT NOT NULL, + name TEXT, + avatar_url TEXT +); From e96ee95a7e84e7d75ac57395bb64e1c3428596e9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 09:33:16 +0100 Subject: [PATCH 0044/1637] Remove sync stuff --- synapse/storage/__init__.py | 15 --------- synapse/storage/group_server.py | 55 --------------------------------- 2 files changed, 70 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 594566eb38..fdee9f1ad5 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -136,9 +136,6 @@ class DataStore(RoomMemberStore, RoomStore, db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")], ) - self._group_updates_id_gen = StreamIdGenerator( - db_conn, "local_group_updates", "stream_id", - ) if isinstance(self.database_engine, PostgresEngine): self._cache_id_gen = StreamIdGenerator( @@ -239,18 +236,6 @@ class DataStore(RoomMemberStore, RoomStore, prefilled_cache=curr_state_delta_prefill, ) - _group_updates_prefill, min_group_updates_id = self._get_cache_dict( - db_conn, "local_group_updates", - entity_column="user_id", - stream_column="stream_id", - max_value=self._group_updates_id_gen.get_current_token(), - limit=1000, - ) - self._group_updates_stream_cache = StreamChangeCache( - "_group_updates_stream_cache", min_group_updates_id, - prefilled_cache=_group_updates_prefill, - ) - cur = LoggingTransaction( db_conn.cursor(), name="_find_stream_orderings_for_times_txn", diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 036549d437..2dcdcbfdfc 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -868,61 +868,6 @@ class GroupServerStore(SQLBaseStore): desc="create_group", ) - def get_joined_groups(self, user_id): - return self._simple_select_onecol( - table="local_group_membership", - keyvalues={ - "user_id": user_id, - "membership": "join", - }, - retcol="group_id", - desc="get_joined_groups", - ) - - def get_all_groups_for_user(self, user_id, now_token): - def _get_all_groups_for_user_txn(txn): - sql = """ - SELECT group_id, type, membership, u.content - FROM local_group_updates AS u - INNER JOIN local_group_membership USING (group_id, user_id) - WHERE user_id = ? AND membership != 'leave' - AND stream_id <= ? - """ - txn.execute(sql, (user_id, now_token,)) - return self.cursor_to_dict(txn) - return self.runInteraction( - "get_all_groups_for_user", _get_all_groups_for_user_txn, - ) - - def get_groups_changes_for_user(self, user_id, from_token, to_token): - from_token = int(from_token) - has_changed = self._group_updates_stream_cache.has_entity_changed( - user_id, from_token, - ) - if not has_changed: - return [] - - def _get_groups_changes_for_user_txn(txn): - sql = """ - SELECT group_id, membership, type, u.content - FROM local_group_updates AS u - INNER JOIN local_group_membership USING (group_id, user_id) - WHERE user_id = ? AND ? < stream_id AND stream_id <= ? - """ - txn.execute(sql, (user_id, from_token, to_token,)) - return [{ - "group_id": group_id, - "membership": membership, - "type": gtype, - "content": json.loads(content_json), - } for group_id, membership, gtype, content_json in txn] - return self.runInteraction( - "get_groups_changes_for_user", _get_groups_changes_for_user_txn, - ) - - def get_group_stream_token(self): - return self._group_updates_id_gen.get_current_token() - def get_attestations_need_renewals(self, valid_until_ms): """Get all attestations that need to be renewed until givent time """ From 45407301111e55d04f46957a824d73eab69796de Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 09:36:17 +0100 Subject: [PATCH 0045/1637] Remove unused tables --- synapse/storage/schema/delta/43/group_server.sql | 7 ------- 1 file changed, 7 deletions(-) diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index e32db8b313..f9e11c9146 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -163,10 +163,3 @@ CREATE TABLE local_group_updates ( type TEXT NOT NULL, content TEXT NOT NULL ); - - -CREATE TABLE local_group_profiles ( - group_id TEXT NOT NULL, - name TEXT, - avatar_url TEXT -); From 6e9f147faa528c7493dbaa4e12b64baef2379d83 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 09:47:25 +0100 Subject: [PATCH 0046/1637] Add GroupID type --- synapse/types.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/types.py b/synapse/types.py index 111948540d..b32c0e360d 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -156,6 +156,11 @@ class EventID(DomainSpecificString): SIGIL = "$" +class GroupID(DomainSpecificString): + """Structure representing a group ID.""" + SIGIL = "+" + + class StreamToken( namedtuple("Token", ( "room_key", From 508460f24077da74d8a3d3ce891c0b55ebbce2e8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 09:55:46 +0100 Subject: [PATCH 0047/1637] Remove sync stuff --- synapse/storage/group_server.py | 20 ------------------- .../storage/schema/delta/43/group_server.sql | 10 +--------- 2 files changed, 1 insertion(+), 29 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 2dcdcbfdfc..bff2324cc7 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -783,26 +783,6 @@ class GroupServerStore(SQLBaseStore): "content": json.dumps(content), }, ) - self._simple_delete_txn( - txn, - table="local_group_updates", - keyvalues={ - "group_id": group_id, - "user_id": user_id, - "type": "membership", - }, - ) - self._simple_insert_txn( - txn, - table="local_group_updates", - values={ - "stream_id": next_id, - "group_id": group_id, - "user_id": user_id, - "type": "membership", - "content": json.dumps({"membership": membership, "content": content}), - } - ) self._group_updates_stream_cache.entity_has_changed(user_id, next_id) # TODO: Insert profile to ensuer it comes down stream if its a join. diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index f9e11c9146..e1fd47aa7f 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -144,6 +144,7 @@ CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote(user_i CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote(valid_until_ms); +-- The group membership for the HS's users CREATE TABLE local_group_membership ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, @@ -154,12 +155,3 @@ CREATE TABLE local_group_membership ( CREATE INDEX local_group_membership_u_idx ON local_group_membership(user_id, group_id); CREATE INDEX local_group_membership_g_idx ON local_group_membership(group_id); - - -CREATE TABLE local_group_updates ( - stream_id BIGINT NOT NULL, - group_id TEXT NOT NULL, - user_id TEXT NOT NULL, - type TEXT NOT NULL, - content TEXT NOT NULL -); From 3e703eb04e1b30dc2bce03d3895ac79ac24a063d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 10:17:25 +0100 Subject: [PATCH 0048/1637] Comment --- synapse/storage/group_server.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index bff2324cc7..3c6ee7df68 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -762,6 +762,20 @@ class GroupServerStore(SQLBaseStore): local_attestation=None, remote_attestation=None, ): + """Registers that a local user is a member of a (local or remote) group. + + Args: + group_id (str) + user_id (str) + membership (str) + is_admin (bool) + content (dict): Content of the membership, e.g. includes the inviter + if the user has been invited. + local_attestation (dict): If remote group then store the fact that we + have given out an attestation, else None. + remote_attestation (dict): If remote group then store the remote + attestation from the group, else None. + """ def _register_user_group_membership_txn(txn, next_id): # TODO: Upsert? self._simple_delete_txn( From 68f34e85cebcacef428d1f38942990c43c3cdd01 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 10:29:57 +0100 Subject: [PATCH 0049/1637] Use transport client directly --- synapse/handlers/groups_local.py | 43 +++++++++++++++++++------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 3df255b05a..e0f53120be 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -32,15 +32,17 @@ logger = logging.getLogger(__name__) # TODO: Add group memebership /sync -def _create_rerouter(name): +def _create_rerouter(func_name): + """Returns a function that looks at the group id and calls the function + on federation or the local group server if the group is local + """ def f(self, group_id, *args, **kwargs): if self.is_mine_id(group_id): - return getattr(self.groups_server_handler, name)( + return getattr(self.groups_server_handler, func_name)( group_id, *args, **kwargs ) - repl_layer = self.hs.get_replication_layer() - return getattr(repl_layer, name)(group_id, *args, **kwargs) + return getattr(self.transport_client, func_name)(group_id, *args, **kwargs) return f @@ -50,6 +52,7 @@ class GroupsLocalHandler(object): self.store = hs.get_datastore() self.room_list_handler = hs.get_room_list_handler() self.groups_server_handler = hs.get_groups_server_handler() + self.transport_client = hs.get_federation_transport_client() self.auth = hs.get_auth() self.clock = hs.get_clock() self.keyring = hs.get_keyring() @@ -82,15 +85,19 @@ class GroupsLocalHandler(object): @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): + """Get the group summary for a group. + + If the group is remote we check that the users have valid attestations. + """ if self.is_mine_id(group_id): res = yield self.groups_server_handler.get_group_summary( group_id, requester_user_id ) defer.returnValue(res) - repl_layer = self.hs.get_replication_layer() - res = yield repl_layer.get_group_summary(group_id, requester_user_id) + res = yield self.transport_client.get_group_summary(group_id, requester_user_id) + # Loop through the users and validate the attestations. chunk = res["users_section"]["users"] valid_users = [] for entry in chunk: @@ -121,8 +128,7 @@ class GroupsLocalHandler(object): group_id, user_id, content ) - repl_layer = self.hs.get_replication_layer() - return repl_layer.create_group(group_id, user_id, content) # TODO + return self.transport_client.create_group(group_id, user_id, content) # TODO def add_room(self, group_id, user_id, room_id, content): if self.is_mine_id(group_id): @@ -130,8 +136,9 @@ class GroupsLocalHandler(object): group_id, user_id, room_id, content ) - repl_layer = self.hs.get_replication_layer() - return repl_layer.add_room_to_group(group_id, user_id, room_id, content) # TODO + return self.transport_client.add_room_to_group( + group_id, user_id, room_id, content, + ) # TODO @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): @@ -141,8 +148,9 @@ class GroupsLocalHandler(object): ) defer.returnValue(res) - repl_layer = self.hs.get_replication_layer() - res = yield repl_layer.get_users_in_group(group_id, requester_user_id) # TODO + res = yield self.transport_client.get_users_in_group( + group_id, requester_user_id, + ) # TODO chunk = res["chunk"] valid_entries = [] @@ -179,8 +187,9 @@ class GroupsLocalHandler(object): local_attestation = self.attestations.create_attestation(group_id, user_id) content["attestation"] = local_attestation - repl_layer = self.hs.get_replication_layer() - res = yield repl_layer.accept_group_invite(group_id, user_id, content) + res = yield self.transport_client.accept_group_invite( + group_id, user_id, content, + ) remote_attestation = res["attestation"] @@ -211,8 +220,7 @@ class GroupsLocalHandler(object): group_id, user_id, requester_user_id, content, ) else: - repl_layer = self.hs.get_replication_layer() - res = yield repl_layer.invite_to_group( + res = yield self.transport_client.invite_to_group( group_id, user_id, content, ) @@ -257,8 +265,7 @@ class GroupsLocalHandler(object): ) else: content["requester_user_id"] = requester_user_id - repl_layer = self.hs.get_replication_layer() - res = yield repl_layer.remove_user_from_group( + res = yield self.transport_client.remove_user_from_group( group_id, user_id, content ) # TODO From cccfcfa7b9e2af28bb56d6f970754fe5aa07ad56 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 10:31:59 +0100 Subject: [PATCH 0050/1637] Comments --- synapse/federation/transport/server.py | 34 ++++++++++++++------------ synapse/handlers/groups_local.py | 3 +++ 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 0f08334f33..d68e90d2f7 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -715,21 +715,6 @@ class FederationGroupsInviteServlet(BaseFederationServlet): defer.returnValue((200, new_content)) -class FederationGroupsLocalInviteServlet(BaseFederationServlet): - PATH = "/groups/local/(?P[^/]*)/users/(?P[^/]*)/invite$" - - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, user_id): - if get_domain_from_id(group_id) != origin: - raise SynapseError(403, "group_id doesn't match origin") - - new_content = yield self.handler.on_invite( - group_id, user_id, content, - ) - - defer.returnValue((200, new_content)) - - class FederationGroupsAcceptInviteServlet(BaseFederationServlet): """Accept an invitation from the group server """ @@ -765,7 +750,26 @@ class FederationGroupsRemoveUserServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class FederationGroupsLocalInviteServlet(BaseFederationServlet): + """A group server has invited a local user + """ + PATH = "/groups/local/(?P[^/]*)/users/(?P[^/]*)/invite$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, user_id): + if get_domain_from_id(group_id) != origin: + raise SynapseError(403, "group_id doesn't match origin") + + new_content = yield self.handler.on_invite( + group_id, user_id, content, + ) + + defer.returnValue((200, new_content)) + + class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet): + """A group server has removed a local user + """ PATH = "/groups/local/(?P[^/]*)/users/(?P[^/]*)/remove$" @defer.inlineCallbacks diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index e0f53120be..0857b14c7a 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -64,6 +64,9 @@ class GroupsLocalHandler(object): # Ensure attestations get renewed hs.get_groups_attestation_renewer() + # The following functions merely route the query to the local groups server + # or federation depending on if the group is local or remote + get_group_profile = _create_rerouter("get_group_profile") get_rooms_in_group = _create_rerouter("get_rooms_in_group") From e5ea6dd021ea71f3b5bc9a37fb896c351ee550b1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 14:37:06 +0100 Subject: [PATCH 0051/1637] Add client apis --- synapse/federation/transport/client.py | 196 +++++++++++++++++++++++-- synapse/handlers/groups_local.py | 2 +- 2 files changed, 188 insertions(+), 10 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index ea340e345c..500f3622a2 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -476,10 +476,10 @@ class TransportLayerClient(object): def get_group_profile(self, destination, group_id, requester_user_id): path = PREFIX + "/groups/%s/profile" % (group_id,) - return self.client.post_json( + return self.client.get_json( destination=destination, path=path, - data={"requester_user_id": requester_user_id}, + args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @@ -487,10 +487,10 @@ class TransportLayerClient(object): def get_group_summary(self, destination, group_id, requester_user_id): path = PREFIX + "/groups/%s/summary" % (group_id,) - return self.client.post_json( + return self.client.get_json( destination=destination, path=path, - data={"requester_user_id": requester_user_id}, + args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @@ -498,10 +498,22 @@ class TransportLayerClient(object): def get_group_rooms(self, destination, group_id, requester_user_id): path = PREFIX + "/groups/%s/rooms" % (group_id,) + return self.client.get_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + def add_room_to_group(self, destination, group_id, requester_user_id, room_id, + content): + path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) + return self.client.post_json( destination=destination, path=path, - data={"requester_user_id": requester_user_id}, + args={"requester_user_id": requester_user_id}, + data=content, ignore_backoff=True, ) @@ -509,10 +521,10 @@ class TransportLayerClient(object): def get_group_users(self, destination, group_id, requester_user_id): path = PREFIX + "/groups/%s/users" % (group_id,) - return self.client.post_json( + return self.client.get_json( destination=destination, path=path, - data={"requester_user_id": requester_user_id}, + args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) @@ -528,12 +540,13 @@ class TransportLayerClient(object): ) @log_function - def invite_to_group(self, destination, group_id, user_id, content): + def invite_to_group(self, destination, group_id, user_id, requester_user_id, content): path = PREFIX + "/groups/%s/users/%s/invite" % (group_id, user_id) return self.client.post_json( destination=destination, path=path, + args=requester_user_id, data=content, ignore_backoff=True, ) @@ -554,12 +567,14 @@ class TransportLayerClient(object): ) @log_function - def remove_user_from_group(self, destination, group_id, user_id, content): + def remove_user_from_group(self, destination, group_id, requester_user_id, + user_id, content): path = PREFIX + "/groups/%s/users/%s/remove" % (group_id, user_id) return self.client.post_json( destination=destination, path=path, + args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) @@ -594,3 +609,166 @@ class TransportLayerClient(object): data=content, ignore_backoff=True, ) + + @log_function + def update_group_summary_room(self, destination, group_id, user_id, room_id, + category_id, content): + if category_id: + path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % ( + group_id, category_id, room_id, + ) + else: + path = PREFIX + "/groups/%s/summary/rooms/%s" % (group_id, room_id,) + + return self.client.post_json( + destination=destination, + path=path, + args={"requester_user_id": user_id}, + data=content, + ignore_backoff=True, + ) + + @log_function + def delete_group_summary_room(self, destination, group_id, user_id, room_id, + category_id): + if category_id: + path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % ( + group_id, category_id, room_id, + ) + else: + path = PREFIX + "/groups/%s/summary/rooms/%s" % (group_id, room_id,) + + return self.client.delete_json( + destination=destination, + path=path, + args={"requester_user_id": user_id}, + ignore_backoff=True, + ) + + @log_function + def get_group_categories(self, destination, group_id, requester_user_id): + path = PREFIX + "/groups/%s/categories" % (group_id,) + + return self.client.get_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + @log_function + def get_group_category(self, destination, group_id, requester_user_id, category_id): + path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,) + + return self.client.get_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + @log_function + def update_group_category(self, destination, group_id, requester_user_id, category_id, + content): + path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,) + + return self.client.post_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + data=content, + ignore_backoff=True, + ) + + @log_function + def delete_group_category(self, destination, group_id, requester_user_id, + category_id): + path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,) + + return self.client.delete_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + @log_function + def get_group_roles(self, destination, group_id, requester_user_id): + path = PREFIX + "/groups/%s/roles" % (group_id,) + + return self.client.get_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + @log_function + def get_group_role(self, destination, group_id, requester_user_id, role_id): + path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,) + + return self.client.get_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + @log_function + def update_group_role(self, destination, group_id, requester_user_id, role_id, + content): + path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,) + + return self.client.post_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + data=content, + ignore_backoff=True, + ) + + @log_function + def delete_group_role(self, destination, group_id, requester_user_id, role_id): + path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,) + + return self.client.delete_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + + @log_function + def update_group_summary_user(self, destination, group_id, requester_user_id, + user_id, role_id, content): + if role_id: + path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % ( + group_id, role_id, user_id, + ) + else: + path = PREFIX + "/groups/%s/summary/users/%s" % (group_id, user_id,) + + return self.client.post_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + data=content, + ignore_backoff=True, + ) + + @log_function + def delete_group_summary_user(self, destination, group_id, requester_user_id, + user_id, role_id): + if role_id: + path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % ( + group_id, role_id, user_id, + ) + else: + path = PREFIX + "/groups/%s/summary/users/%s" % (group_id, user_id,) + + return self.client.delete_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 0857b14c7a..6962210526 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -224,7 +224,7 @@ class GroupsLocalHandler(object): ) else: res = yield self.transport_client.invite_to_group( - group_id, user_id, content, + group_id, user_id, requester_user_id, content, ) defer.returnValue(res) From 332839f6ea4f4ae6e89c383bfb334b6ddecd3e53 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 14:45:37 +0100 Subject: [PATCH 0052/1637] Update federation client pokes --- synapse/handlers/groups_local.py | 35 ++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 6962210526..7d7fc5d976 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -16,6 +16,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError +from synapse.types import get_domain_from_id import logging @@ -41,8 +42,11 @@ def _create_rerouter(func_name): return getattr(self.groups_server_handler, func_name)( group_id, *args, **kwargs ) - - return getattr(self.transport_client, func_name)(group_id, *args, **kwargs) + else: + destination = get_domain_from_id(group_id) + return getattr(self.transport_client, func_name)( + destination, group_id, *args, **kwargs + ) return f @@ -98,7 +102,9 @@ class GroupsLocalHandler(object): ) defer.returnValue(res) - res = yield self.transport_client.get_group_summary(group_id, requester_user_id) + res = yield self.transport_client.get_group_summary( + get_domain_from_id(group_id), group_id, requester_user_id, + ) # Loop through the users and validate the attestations. chunk = res["users_section"]["users"] @@ -131,7 +137,9 @@ class GroupsLocalHandler(object): group_id, user_id, content ) - return self.transport_client.create_group(group_id, user_id, content) # TODO + return self.transport_client.create_group( + get_domain_from_id(group_id), group_id, user_id, content, + ) # TODO def add_room(self, group_id, user_id, room_id, content): if self.is_mine_id(group_id): @@ -140,8 +148,8 @@ class GroupsLocalHandler(object): ) return self.transport_client.add_room_to_group( - group_id, user_id, room_id, content, - ) # TODO + get_domain_from_id(group_id), group_id, user_id, room_id, content, + ) @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): @@ -151,9 +159,9 @@ class GroupsLocalHandler(object): ) defer.returnValue(res) - res = yield self.transport_client.get_users_in_group( - group_id, requester_user_id, - ) # TODO + res = yield self.transport_client.get_group_users( + get_domain_from_id(group_id), group_id, requester_user_id, + ) chunk = res["chunk"] valid_entries = [] @@ -191,7 +199,7 @@ class GroupsLocalHandler(object): content["attestation"] = local_attestation res = yield self.transport_client.accept_group_invite( - group_id, user_id, content, + get_domain_from_id(group_id), group_id, user_id, content, ) remote_attestation = res["attestation"] @@ -224,7 +232,8 @@ class GroupsLocalHandler(object): ) else: res = yield self.transport_client.invite_to_group( - group_id, user_id, requester_user_id, content, + get_domain_from_id(group_id), group_id, user_id, requester_user_id, + content, ) defer.returnValue(res) @@ -269,8 +278,8 @@ class GroupsLocalHandler(object): else: content["requester_user_id"] = requester_user_id res = yield self.transport_client.remove_user_from_group( - group_id, user_id, content - ) # TODO + get_domain_from_id(group_id), group_id, user_id, content + ) defer.returnValue(res) From 12ed4ee48e37ea51d4addddc1b4e6e9a194199ba Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 15:33:09 +0100 Subject: [PATCH 0053/1637] Correctly parse query params --- synapse/federation/transport/server.py | 38 +++++++++++++------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index d68e90d2f7..29e966ac29 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -616,7 +616,7 @@ class FederationGroupsProfileServlet(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query, group_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -632,7 +632,7 @@ class FederationGroupsSummaryServlet(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query, group_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -650,7 +650,7 @@ class FederationGroupsRoomsServlet(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query, group_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -668,7 +668,7 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, room_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -686,7 +686,7 @@ class FederationGroupsUsersServlet(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query, group_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -704,7 +704,7 @@ class FederationGroupsInviteServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, user_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -739,7 +739,7 @@ class FederationGroupsRemoveUserServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, user_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -815,7 +815,7 @@ class FederationGroupsSummaryRoomsServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, category_id, room_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -833,7 +833,7 @@ class FederationGroupsSummaryRoomsServlet(BaseFederationServlet): @defer.inlineCallbacks def on_DELETE(self, origin, content, query, group_id, category_id, room_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -858,7 +858,7 @@ class FederationGroupsCategoriesServlet(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query, group_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -878,7 +878,7 @@ class FederationGroupsCategoryServlet(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query, group_id, category_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -890,7 +890,7 @@ class FederationGroupsCategoryServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, category_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -905,7 +905,7 @@ class FederationGroupsCategoryServlet(BaseFederationServlet): @defer.inlineCallbacks def on_DELETE(self, origin, content, query, group_id, category_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -928,7 +928,7 @@ class FederationGroupsRolesServlet(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query, group_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -948,7 +948,7 @@ class FederationGroupsRoleServlet(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query, group_id, role_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -960,7 +960,7 @@ class FederationGroupsRoleServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, role_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -975,7 +975,7 @@ class FederationGroupsRoleServlet(BaseFederationServlet): @defer.inlineCallbacks def on_DELETE(self, origin, content, query, group_id, role_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -1004,7 +1004,7 @@ class FederationGroupsSummaryUsersServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, role_id, user_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -1022,7 +1022,7 @@ class FederationGroupsSummaryUsersServlet(BaseFederationServlet): @defer.inlineCallbacks def on_DELETE(self, origin, content, query, group_id, role_id, user_id): - requester_user_id = query["requester_user_id"] + requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") From 94ecd871a047707da5998f83440c039d064de8aa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 16:38:54 +0100 Subject: [PATCH 0054/1637] Fix typos --- synapse/federation/transport/client.py | 4 ++-- synapse/handlers/groups_local.py | 5 +++-- synapse/storage/group_server.py | 25 +++++++++++++++++-------- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 500f3622a2..e4d84c06c1 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -495,7 +495,7 @@ class TransportLayerClient(object): ) @log_function - def get_group_rooms(self, destination, group_id, requester_user_id): + def get_rooms_in_group(self, destination, group_id, requester_user_id): path = PREFIX + "/groups/%s/rooms" % (group_id,) return self.client.get_json( @@ -518,7 +518,7 @@ class TransportLayerClient(object): ) @log_function - def get_group_users(self, destination, group_id, requester_user_id): + def get_users_in_group(self, destination, group_id, requester_user_id): path = PREFIX + "/groups/%s/users" % (group_id,) return self.client.get_json( diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 7d7fc5d976..50f7fce885 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -159,7 +159,7 @@ class GroupsLocalHandler(object): ) defer.returnValue(res) - res = yield self.transport_client.get_group_users( + res = yield self.transport_client.get_users_in_group( get_domain_from_id(group_id), group_id, requester_user_id, ) @@ -278,7 +278,8 @@ class GroupsLocalHandler(object): else: content["requester_user_id"] = requester_user_id res = yield self.transport_client.remove_user_from_group( - get_domain_from_id(group_id), group_id, user_id, content + get_domain_from_id(group_id), group_id, requester_user_id, + user_id, content, ) defer.returnValue(res) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 3c6ee7df68..0a69e0f501 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -776,7 +776,7 @@ class GroupServerStore(SQLBaseStore): remote_attestation (dict): If remote group then store the remote attestation from the group, else None. """ - def _register_user_group_membership_txn(txn, next_id): + def _register_user_group_membership_txn(txn): # TODO: Upsert? self._simple_delete_txn( txn, @@ -797,7 +797,6 @@ class GroupServerStore(SQLBaseStore): "content": json.dumps(content), }, ) - self._group_updates_stream_cache.entity_has_changed(user_id, next_id) # TODO: Insert profile to ensuer it comes down stream if its a join. @@ -820,7 +819,7 @@ class GroupServerStore(SQLBaseStore): "group_id": group_id, "user_id": user_id, "valid_until_ms": remote_attestation["valid_until_ms"], - "attestation": json.dumps(remote_attestation), + "attestation_json": json.dumps(remote_attestation), } ) else: @@ -841,11 +840,10 @@ class GroupServerStore(SQLBaseStore): }, ) - with self._group_updates_id_gen.get_next() as next_id: - yield self.runInteraction( - "register_user_group_membership", - _register_user_group_membership_txn, next_id, - ) + yield self.runInteraction( + "register_user_group_membership", + _register_user_group_membership_txn, + ) @defer.inlineCallbacks def create_group(self, group_id, user_id, name, avatar_url, short_description, @@ -928,3 +926,14 @@ class GroupServerStore(SQLBaseStore): defer.returnValue(json.loads(row["attestation_json"])) defer.returnValue(None) + + def get_joined_groups(self, user_id): + return self._simple_select_onecol( + table="local_group_membership", + keyvalues={ + "user_id": user_id, + "membership": "join", + }, + retcol="group_id", + desc="get_joined_groups", + ) From 05c13f6c221c1c034f30a76c41dbee14f2620520 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 16:40:21 +0100 Subject: [PATCH 0055/1637] Add 'args' param to post_json --- synapse/http/matrixfederationclient.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 747a791f83..f58bf41d5a 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -347,7 +347,7 @@ class MatrixFederationHttpClient(object): @defer.inlineCallbacks def post_json(self, destination, path, data={}, long_retries=False, - timeout=None, ignore_backoff=False): + timeout=None, ignore_backoff=False, args={}): """ Sends the specifed json data using POST Args: @@ -383,6 +383,7 @@ class MatrixFederationHttpClient(object): destination, "POST", path, + query_bytes=encode_query_args(args), body_callback=body_callback, headers_dict={"Content-Type": ["application/json"]}, long_retries=long_retries, @@ -427,13 +428,6 @@ class MatrixFederationHttpClient(object): """ logger.debug("get_json args: %s", args) - encoded_args = {} - for k, vs in args.items(): - if isinstance(vs, basestring): - vs = [vs] - encoded_args[k] = [v.encode("UTF-8") for v in vs] - - query_bytes = urllib.urlencode(encoded_args, True) logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) def body_callback(method, url_bytes, headers_dict): @@ -444,7 +438,7 @@ class MatrixFederationHttpClient(object): destination, "GET", path, - query_bytes=query_bytes, + query_bytes=encode_query_args(args), body_callback=body_callback, retry_on_dns_fail=retry_on_dns_fail, timeout=timeout, @@ -610,3 +604,15 @@ def check_content_type_is_json(headers): raise RuntimeError( "Content-Type not application/json: was '%s'" % c_type ) + + +def encode_query_args(args): + encoded_args = {} + for k, vs in args.items(): + if isinstance(vs, basestring): + vs = [vs] + encoded_args[k] = [v.encode("UTF-8") for v in vs] + + query_bytes = urllib.urlencode(encoded_args, True) + + return query_bytes From e884ff31d8af31cc29f8a85d9bea03b806891e8b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 16:41:44 +0100 Subject: [PATCH 0056/1637] Add DELETE --- synapse/http/matrixfederationclient.py | 46 ++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index f58bf41d5a..8b94e6f29f 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -454,6 +454,52 @@ class MatrixFederationHttpClient(object): defer.returnValue(json.loads(body)) + @defer.inlineCallbacks + def delete_json(self, destination, path, long_retries=False, + timeout=None, ignore_backoff=False, args={}): + """Send a DELETE request to the remote expecting some json response + + Args: + destination (str): The remote server to send the HTTP request + to. + path (str): The HTTP path. + long_retries (bool): A boolean that indicates whether we should + retry for a short or long time. + timeout(int): How long to try (in ms) the destination for before + giving up. None indicates no timeout. + ignore_backoff (bool): true to ignore the historical backoff data and + try the request anyway. + Returns: + Deferred: Succeeds when we get a 2xx HTTP response. The result + will be the decoded JSON body. + + Fails with ``HTTPRequestException`` if we get an HTTP response + code >= 300. + + Fails with ``NotRetryingDestination`` if we are not yet ready + to retry this server. + """ + + response = yield self._request( + destination, + "DELETE", + path, + query_bytes=encode_query_args(args), + headers_dict={"Content-Type": ["application/json"]}, + long_retries=long_retries, + timeout=timeout, + ignore_backoff=ignore_backoff, + ) + + if 200 <= response.code < 300: + # We need to update the transactions table to say it was sent? + check_content_type_is_json(response.headers) + + with logcontext.PreserveLoggingContext(): + body = yield readBody(response) + + defer.returnValue(json.loads(body)) + @defer.inlineCallbacks def get_file(self, destination, path, output_stream, args={}, retry_on_dns_fail=True, max_size=None, From 6027b1992f507cb1a11836797827d9e69e2a6020 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 16:51:25 +0100 Subject: [PATCH 0057/1637] Fix permissions --- synapse/groups/groups_server.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index c8559577f7..b9ad9507f4 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -293,7 +293,9 @@ class GroupsServerHandler(object): content): """Add/update a users entry in the group summary """ - yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) + yield self.check_group_is_ours( + group_id, and_exists=True, and_is_admin=requester_user_id, + ) order = content.get("order", None) @@ -313,7 +315,9 @@ class GroupsServerHandler(object): def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): """Remove a user from the group summary """ - yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) + yield self.check_group_is_ours( + group_id, and_exists=True, and_is_admin=requester_user_id, + ) yield self.store.remove_user_from_summary( group_id=group_id, From 3431ec55dc00f9b2b58ce0cc6645d6aed8bd5c87 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 17:19:39 +0100 Subject: [PATCH 0058/1637] Comments --- synapse/federation/transport/client.py | 40 +++++++ synapse/rest/client/v2_alpha/groups.py | 151 ++++++++----------------- 2 files changed, 88 insertions(+), 103 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index e4d84c06c1..073d3abb2a 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -474,6 +474,8 @@ class TransportLayerClient(object): @log_function def get_group_profile(self, destination, group_id, requester_user_id): + """Get a group profile + """ path = PREFIX + "/groups/%s/profile" % (group_id,) return self.client.get_json( @@ -485,6 +487,8 @@ class TransportLayerClient(object): @log_function def get_group_summary(self, destination, group_id, requester_user_id): + """Get a group summary + """ path = PREFIX + "/groups/%s/summary" % (group_id,) return self.client.get_json( @@ -496,6 +500,8 @@ class TransportLayerClient(object): @log_function def get_rooms_in_group(self, destination, group_id, requester_user_id): + """Get all rooms in a group + """ path = PREFIX + "/groups/%s/rooms" % (group_id,) return self.client.get_json( @@ -507,6 +513,8 @@ class TransportLayerClient(object): def add_room_to_group(self, destination, group_id, requester_user_id, room_id, content): + """Add a room to a group + """ path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) return self.client.post_json( @@ -519,6 +527,8 @@ class TransportLayerClient(object): @log_function def get_users_in_group(self, destination, group_id, requester_user_id): + """Get users in a group + """ path = PREFIX + "/groups/%s/users" % (group_id,) return self.client.get_json( @@ -530,6 +540,8 @@ class TransportLayerClient(object): @log_function def accept_group_invite(self, destination, group_id, user_id, content): + """Accept a group invite + """ path = PREFIX + "/groups/%s/users/%s/accept_invite" % (group_id, user_id) return self.client.post_json( @@ -541,6 +553,8 @@ class TransportLayerClient(object): @log_function def invite_to_group(self, destination, group_id, user_id, requester_user_id, content): + """Invite a user to a group + """ path = PREFIX + "/groups/%s/users/%s/invite" % (group_id, user_id) return self.client.post_json( @@ -569,6 +583,8 @@ class TransportLayerClient(object): @log_function def remove_user_from_group(self, destination, group_id, requester_user_id, user_id, content): + """Remove a user fron a group + """ path = PREFIX + "/groups/%s/users/%s/remove" % (group_id, user_id) return self.client.post_json( @@ -613,6 +629,8 @@ class TransportLayerClient(object): @log_function def update_group_summary_room(self, destination, group_id, user_id, room_id, category_id, content): + """Update a room entry in a group summary + """ if category_id: path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % ( group_id, category_id, room_id, @@ -631,6 +649,8 @@ class TransportLayerClient(object): @log_function def delete_group_summary_room(self, destination, group_id, user_id, room_id, category_id): + """Delete a room entry in a group summary + """ if category_id: path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % ( group_id, category_id, room_id, @@ -647,6 +667,8 @@ class TransportLayerClient(object): @log_function def get_group_categories(self, destination, group_id, requester_user_id): + """Get all categories in a group + """ path = PREFIX + "/groups/%s/categories" % (group_id,) return self.client.get_json( @@ -658,6 +680,8 @@ class TransportLayerClient(object): @log_function def get_group_category(self, destination, group_id, requester_user_id, category_id): + """Get category info in a group + """ path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,) return self.client.get_json( @@ -670,6 +694,8 @@ class TransportLayerClient(object): @log_function def update_group_category(self, destination, group_id, requester_user_id, category_id, content): + """Update a category in a group + """ path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,) return self.client.post_json( @@ -683,6 +709,8 @@ class TransportLayerClient(object): @log_function def delete_group_category(self, destination, group_id, requester_user_id, category_id): + """Delete a category in a group + """ path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,) return self.client.delete_json( @@ -694,6 +722,8 @@ class TransportLayerClient(object): @log_function def get_group_roles(self, destination, group_id, requester_user_id): + """Get all roles in a group + """ path = PREFIX + "/groups/%s/roles" % (group_id,) return self.client.get_json( @@ -705,6 +735,8 @@ class TransportLayerClient(object): @log_function def get_group_role(self, destination, group_id, requester_user_id, role_id): + """Get a roles info + """ path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,) return self.client.get_json( @@ -717,6 +749,8 @@ class TransportLayerClient(object): @log_function def update_group_role(self, destination, group_id, requester_user_id, role_id, content): + """Update a role in a group + """ path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,) return self.client.post_json( @@ -729,6 +763,8 @@ class TransportLayerClient(object): @log_function def delete_group_role(self, destination, group_id, requester_user_id, role_id): + """Delete a role in a group + """ path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,) return self.client.delete_json( @@ -741,6 +777,8 @@ class TransportLayerClient(object): @log_function def update_group_summary_user(self, destination, group_id, requester_user_id, user_id, role_id, content): + """Update a users entry in a group + """ if role_id: path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % ( group_id, role_id, user_id, @@ -759,6 +797,8 @@ class TransportLayerClient(object): @log_function def delete_group_summary_user(self, destination, group_id, requester_user_id, user_id, role_id): + """Delete a users entry in a group + """ if role_id: path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % ( group_id, role_id, user_id, diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 255552c365..787967c3a2 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -26,6 +26,8 @@ logger = logging.getLogger(__name__) class GroupServlet(RestServlet): + """Get the group profile + """ PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/profile$") def __init__(self, hs): @@ -45,6 +47,8 @@ class GroupServlet(RestServlet): class GroupSummaryServlet(RestServlet): + """Get the full group summary + """ PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/summary$") def __init__(self, hs): @@ -63,69 +67,17 @@ class GroupSummaryServlet(RestServlet): defer.returnValue((200, get_group_summary)) -class GroupSummaryRoomsServlet(RestServlet): - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/summary/rooms$") - - def __init__(self, hs): - super(GroupSummaryServlet, self).__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @defer.inlineCallbacks - def on_GET(self, request, group_id): - requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() - - get_group_summary = yield self.groups_handler.get_group_summary(group_id, user_id) - - defer.returnValue((200, get_group_summary)) - - -class GroupSummaryRoomsDefaultCatServlet(RestServlet): - PATTERNS = client_v2_patterns( - "/groups/(?P[^/]*)/summary/rooms/(?P[^/]*)$" - ) - - def __init__(self, hs): - super(GroupSummaryRoomsDefaultCatServlet, self).__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @defer.inlineCallbacks - def on_PUT(self, request, group_id, room_id): - requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - resp = yield self.groups_handler.update_group_summary_room( - group_id, user_id, - room_id=room_id, - category_id=None, - content=content, - ) - - defer.returnValue((200, resp)) - - @defer.inlineCallbacks - def on_DELETE(self, request, group_id, room_id): - requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() - - resp = yield self.groups_handler.delete_group_summary_room( - group_id, user_id, - room_id=room_id, - category_id=None, - ) - - defer.returnValue((200, resp)) - - class GroupSummaryRoomsCatServlet(RestServlet): + """Update/delete a rooms entry in the summary. + + Matches both: + - /groups/:group/summary/rooms/:room_id + - /groups/:group/summary/categories/:category/rooms/:room_id + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/summary" - "/categories/(?P[^/]+)/rooms/(?P[^/]+)$" + "(/categories/(?P[^/]+))?" + "/rooms/(?P[^/]*)$" ) def __init__(self, hs): @@ -164,6 +116,8 @@ class GroupSummaryRoomsCatServlet(RestServlet): class GroupCategoryServlet(RestServlet): + """Get/add/update/delete a group category + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/categories/(?P[^/]+)$" ) @@ -214,6 +168,8 @@ class GroupCategoryServlet(RestServlet): class GroupCategoriesServlet(RestServlet): + """Get all group categories + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/categories/$" ) @@ -237,6 +193,8 @@ class GroupCategoriesServlet(RestServlet): class GroupRoleServlet(RestServlet): + """Get/add/update/delete a group role + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/roles/(?P[^/]+)$" ) @@ -287,6 +245,8 @@ class GroupRoleServlet(RestServlet): class GroupRolesServlet(RestServlet): + """Get all group roles + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/roles/$" ) @@ -309,50 +269,17 @@ class GroupRolesServlet(RestServlet): defer.returnValue((200, category)) -class GroupSummaryUsersDefaultRoleServlet(RestServlet): - PATTERNS = client_v2_patterns( - "/groups/(?P[^/]*)/summary/users/(?P[^/]*)$" - ) - - def __init__(self, hs): - super(GroupSummaryUsersDefaultRoleServlet, self).__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @defer.inlineCallbacks - def on_PUT(self, request, group_id, user_id): - requester = yield self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - resp = yield self.groups_handler.update_group_summary_user( - group_id, requester_user_id, - user_id=user_id, - role_id=None, - content=content, - ) - - defer.returnValue((200, resp)) - - @defer.inlineCallbacks - def on_DELETE(self, request, group_id, user_id): - requester = yield self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - resp = yield self.groups_handler.delete_group_summary_user( - group_id, requester_user_id, - user_id=user_id, - role_id=None, - ) - - defer.returnValue((200, resp)) - - class GroupSummaryUsersRoleServlet(RestServlet): + """Update/delete a user's entry in the summary. + + Matches both: + - /groups/:group/summary/users/:room_id + - /groups/:group/summary/roles/:role/users/:user_id + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/summary" - "/roles/(?P[^/]+)/users/(?P[^/]+)$" + "(/roles/(?P[^/]+))?" + "/users/(?P[^/]*)$" ) def __init__(self, hs): @@ -391,6 +318,8 @@ class GroupSummaryUsersRoleServlet(RestServlet): class GroupRoomServlet(RestServlet): + """Get all rooms in a group + """ PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/rooms$") def __init__(self, hs): @@ -410,6 +339,8 @@ class GroupRoomServlet(RestServlet): class GroupUsersServlet(RestServlet): + """Get all users in a group + """ PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/users$") def __init__(self, hs): @@ -429,6 +360,8 @@ class GroupUsersServlet(RestServlet): class GroupCreateServlet(RestServlet): + """Create a group + """ PATTERNS = client_v2_patterns("/create_group$") def __init__(self, hs): @@ -454,6 +387,8 @@ class GroupCreateServlet(RestServlet): class GroupAdminRoomsServlet(RestServlet): + """Add a room to the group + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)$" ) @@ -476,6 +411,8 @@ class GroupAdminRoomsServlet(RestServlet): class GroupAdminUsersInviteServlet(RestServlet): + """Invite a user to the group + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/admin/users/invite/(?P[^/]*)$" ) @@ -503,6 +440,8 @@ class GroupAdminUsersInviteServlet(RestServlet): class GroupAdminUsersKickServlet(RestServlet): + """Kick a user from the group + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/admin/users/remove/(?P[^/]*)$" ) @@ -527,6 +466,8 @@ class GroupAdminUsersKickServlet(RestServlet): class GroupSelfLeaveServlet(RestServlet): + """Leave a joined group + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/self/leave$" ) @@ -551,6 +492,8 @@ class GroupSelfLeaveServlet(RestServlet): class GroupSelfJoinServlet(RestServlet): + """Attempt to join a group, or knock + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/self/join$" ) @@ -575,6 +518,8 @@ class GroupSelfJoinServlet(RestServlet): class GroupSelfAcceptInviteServlet(RestServlet): + """Accept a group invite + """ PATTERNS = client_v2_patterns( "/groups/(?P[^/]*)/self/accept_invite$" ) @@ -599,6 +544,8 @@ class GroupSelfAcceptInviteServlet(RestServlet): class GroupsForUserServlet(RestServlet): + """Get all groups the logged in user is joined to + """ PATTERNS = client_v2_patterns( "/joined_groups$" ) @@ -632,11 +579,9 @@ def register_servlets(hs, http_server): GroupSelfJoinServlet(hs).register(http_server) GroupSelfAcceptInviteServlet(hs).register(http_server) GroupsForUserServlet(hs).register(http_server) - GroupSummaryRoomsDefaultCatServlet(hs).register(http_server) GroupCategoryServlet(hs).register(http_server) GroupCategoriesServlet(hs).register(http_server) GroupSummaryRoomsCatServlet(hs).register(http_server) GroupRoleServlet(hs).register(http_server) GroupRolesServlet(hs).register(http_server) - GroupSummaryUsersDefaultRoleServlet(hs).register(http_server) GroupSummaryUsersRoleServlet(hs).register(http_server) From 14a34f12d755e7516dc81348d811d47dc51f026d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Jul 2017 17:28:42 +0100 Subject: [PATCH 0059/1637] Comments --- synapse/federation/transport/server.py | 2 +- synapse/groups/groups_server.py | 2 +- synapse/handlers/groups_local.py | 29 +++++++++++++++++--------- synapse/rest/client/v2_alpha/groups.py | 4 +++- 4 files changed, 24 insertions(+), 13 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 29e966ac29..1332b49f35 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -672,7 +672,7 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.add_room( + new_content = yield self.handler.add_room_to_group( group_id, requester_user_id, room_id, content ) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index b9ad9507f4..1b6e354ca3 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -430,7 +430,7 @@ class GroupsServerHandler(object): }) @defer.inlineCallbacks - def add_room(self, group_id, requester_user_id, room_id, content): + def add_room_to_group(self, group_id, requester_user_id, room_id, content): """Add room to group """ yield self.check_group_is_ours( diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 50f7fce885..0b80348c82 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -74,6 +74,8 @@ class GroupsLocalHandler(object): get_group_profile = _create_rerouter("get_group_profile") get_rooms_in_group = _create_rerouter("get_rooms_in_group") + add_room_to_group = _create_rerouter("add_room_to_group") + update_group_summary_room = _create_rerouter("update_group_summary_room") delete_group_summary_room = _create_rerouter("delete_group_summary_room") @@ -130,6 +132,9 @@ class GroupsLocalHandler(object): defer.returnValue(res) def create_group(self, group_id, user_id, content): + """Create a group + """ + logger.info("Asking to create group with ID: %r", group_id) if self.is_mine_id(group_id): @@ -141,18 +146,10 @@ class GroupsLocalHandler(object): get_domain_from_id(group_id), group_id, user_id, content, ) # TODO - def add_room(self, group_id, user_id, room_id, content): - if self.is_mine_id(group_id): - return self.groups_server_handler.add_room( - group_id, user_id, room_id, content - ) - - return self.transport_client.add_room_to_group( - get_domain_from_id(group_id), group_id, user_id, room_id, content, - ) - @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): + """Get users in a group + """ if self.is_mine_id(group_id): res = yield self.groups_server_handler.get_users_in_group( group_id, requester_user_id @@ -184,10 +181,14 @@ class GroupsLocalHandler(object): @defer.inlineCallbacks def join_group(self, group_id, user_id, content): + """Request to join a group + """ raise NotImplementedError() # TODO @defer.inlineCallbacks def accept_invite(self, group_id, user_id, content): + """Accept an invite to a group + """ if self.is_mine_id(group_id): yield self.groups_server_handler.accept_invite( group_id, user_id, content @@ -222,6 +223,8 @@ class GroupsLocalHandler(object): @defer.inlineCallbacks def invite(self, group_id, user_id, requester_user_id, config): + """Invite a user to a group + """ content = { "requester_user_id": requester_user_id, "config": config, @@ -240,6 +243,8 @@ class GroupsLocalHandler(object): @defer.inlineCallbacks def on_invite(self, group_id, user_id, content): + """One of our users were invited to a group + """ # TODO: Support auto join and rejection if not self.is_mine_id(user_id): @@ -262,6 +267,8 @@ class GroupsLocalHandler(object): @defer.inlineCallbacks def remove_user_from_group(self, group_id, user_id, requester_user_id, content): + """Remove a user from a group + """ if user_id == requester_user_id: yield self.store.register_user_group_membership( group_id, user_id, @@ -286,6 +293,8 @@ class GroupsLocalHandler(object): @defer.inlineCallbacks def user_removed_from_group(self, group_id, user_id, content): + """One of our users was removed/kicked from a group + """ # TODO: Check if user in group yield self.store.register_user_group_membership( group_id, user_id, diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 787967c3a2..f937d856fd 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -405,7 +405,9 @@ class GroupAdminRoomsServlet(RestServlet): user_id = requester.user.to_string() content = parse_json_object_from_request(request) - result = yield self.groups_handler.add_room(group_id, user_id, room_id, content) + result = yield self.groups_handler.add_room_to_group( + group_id, user_id, room_id, content, + ) defer.returnValue((200, result)) From 6f443a74cf6ab0bfe452289f9888580725987765 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 09:46:33 +0100 Subject: [PATCH 0060/1637] Add update group profile API --- synapse/federation/transport/server.py | 12 ++++++++++++ synapse/groups/groups_server.py | 16 ++++++++++++++++ synapse/handlers/groups_local.py | 1 + synapse/rest/client/v2_alpha/groups.py | 12 ++++++++++++ synapse/storage/group_server.py | 11 +++++++++++ 5 files changed, 52 insertions(+) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 1332b49f35..e04750fd2a 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -642,6 +642,18 @@ class FederationGroupsSummaryServlet(BaseFederationServlet): defer.returnValue((200, new_content)) + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id): + requester_user_id = parse_string_from_args(query, "requester_user_id") + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.update_group_profile( + group_id, requester_user_id, content + ) + + defer.returnValue((200, new_content)) + class FederationGroupsRoomsServlet(BaseFederationServlet): """Get the rooms in a group on behalf of a user diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 1b6e354ca3..322aad2a6f 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -341,6 +341,22 @@ class GroupsServerHandler(object): else: raise SynapseError(404, "Unknown group") + @defer.inlineCallbacks + def update_group_profile(self, group_id, requester_user_id, content): + """Update the group profile + """ + yield self.check_group_is_ours( + group_id, and_exists=True, and_is_admin=requester_user_id, + ) + + profile = {} + for keyname in ("name", "avatar_url", "short_description", + "long_description"): + if keyname in content: + profile[keyname] = content[keyname] + + yield self.store.update_group_profile(group_id, profile) + @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): """Get the users in group as seen by requester_user_id. diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 0b80348c82..b2c920da38 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -72,6 +72,7 @@ class GroupsLocalHandler(object): # or federation depending on if the group is local or remote get_group_profile = _create_rerouter("get_group_profile") + update_group_profile = _create_rerouter("update_group_profile") get_rooms_in_group = _create_rerouter("get_rooms_in_group") add_room_to_group = _create_rerouter("add_room_to_group") diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index f937d856fd..64d803d489 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -45,6 +45,18 @@ class GroupServlet(RestServlet): defer.returnValue((200, group_description)) + @defer.inlineCallbacks + def on_POST(self, request, group_id, content): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + yield self.groups_handler.update_group_profile( + group_id, user_id, content, + ) + + defer.returnValue((200, {})) + class GroupSummaryServlet(RestServlet): """Get the full group summary diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 0a69e0f501..4197d22d88 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -860,6 +860,17 @@ class GroupServerStore(SQLBaseStore): desc="create_group", ) + @defer.inlineCallbacks + def update_group_profile(self, group_id, profile,): + yield self._simple_update_one( + table="groups", + keyvalues={ + "group_id": group_id, + }, + updatevalues=profile, + desc="create_group", + ) + def get_attestations_need_renewals(self, valid_until_ms): """Get all attestations that need to be renewed until givent time """ From d7d24750be64913a10335603f7a48dbba10e51b0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 10:47:01 +0100 Subject: [PATCH 0061/1637] Fix port script for user directory tables --- scripts/synapse_port_db | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 7d158a46a4..8da8a3b1d2 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -252,6 +252,24 @@ class Porter(object): ) return + if table in ( + "user_directory", "user_directory_search", "users_who_share_rooms", + "users_in_pubic_room", + ): + # We don't port these tables, as they're a faff and we can regenreate + # them anyway. + self.progress.update(table, table_size) # Mark table as done + return + + if table == "user_directory_stream_pos": + # We need to make sure there is a single row, `(X, null)` + yield self.postgres_store._simple_insert( + table=table, + values={"stream_id": None}, + ) + self.progress.update(table, table_size) # Mark table as done + return + forward_select = ( "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,) From 57826d645bd62ab534dbcab8d66a98daec145459 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 13:15:22 +0100 Subject: [PATCH 0062/1637] Fix typo --- synapse/storage/group_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 0a69e0f501..a2e7aa47d8 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -798,7 +798,7 @@ class GroupServerStore(SQLBaseStore): }, ) - # TODO: Insert profile to ensuer it comes down stream if its a join. + # TODO: Insert profile to ensure it comes down stream if its a join. if membership == "join": if local_attestation: From 60a9a49f83f7ea7dc8f76ffaec17c9b42c3b19f7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 16:16:29 +0100 Subject: [PATCH 0063/1637] Extend comment --- scripts/synapse_port_db | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 8da8a3b1d2..bc167b59af 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -262,7 +262,8 @@ class Porter(object): return if table == "user_directory_stream_pos": - # We need to make sure there is a single row, `(X, null)` + # We need to make sure there is a single row, `(X, null), as that is + # what synapse expects to be there. yield self.postgres_store._simple_insert( table=table, values={"stream_id": None}, From 8209b5f033417ab018fdd1114170b89fb0b18aa9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 16:22:22 +0100 Subject: [PATCH 0064/1637] Fix a storage desc --- synapse/storage/group_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 4197d22d88..2331ec79bd 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -868,7 +868,7 @@ class GroupServerStore(SQLBaseStore): "group_id": group_id, }, updatevalues=profile, - desc="create_group", + desc="update_group_profile", ) def get_attestations_need_renewals(self, valid_until_ms): From 0ab153d2014d871c13b02dbd1c6bf7c0cc0bcedc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 16:24:18 +0100 Subject: [PATCH 0065/1637] Check values are strings --- synapse/groups/groups_server.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 322aad2a6f..b1ee43ef90 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -353,7 +353,10 @@ class GroupsServerHandler(object): for keyname in ("name", "avatar_url", "short_description", "long_description"): if keyname in content: - profile[keyname] = content[keyname] + value = content[keyname] + if not isinstance(value, basestring): + raise SynapseError(400, "%r value is not a string" % (keyname,)) + profile[keyname] = value yield self.store.update_group_profile(group_id, profile) From c544188ee3644c85a97a3c4e09e63ad4e3c6f0cc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jul 2017 14:53:19 +0100 Subject: [PATCH 0066/1637] Add groups to sync stream --- synapse/handlers/sync.py | 64 ++++++++++++++++- synapse/rest/client/v2_alpha/sync.py | 5 ++ synapse/storage/__init__.py | 15 ++++ synapse/storage/group_server.py | 68 +++++++++++++++++-- .../storage/schema/delta/43/group_server.sql | 9 +++ synapse/streams/events.py | 2 + synapse/types.py | 2 + tests/rest/client/v1/test_rooms.py | 4 +- 8 files changed, 161 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 91c6c6be3c..c01fcd3d59 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -108,6 +108,17 @@ class InvitedSyncResult(collections.namedtuple("InvitedSyncResult", [ return True +class GroupsSyncResult(collections.namedtuple("GroupsSyncResult", [ + "join", + "invite", + "leave", +])): + __slots__ = [] + + def __nonzero__(self): + return self.join or self.invite or self.leave + + class SyncResult(collections.namedtuple("SyncResult", [ "next_batch", # Token for the next sync "presence", # List of presence events for the user. @@ -119,6 +130,7 @@ class SyncResult(collections.namedtuple("SyncResult", [ "device_lists", # List of user_ids whose devices have chanegd "device_one_time_keys_count", # Dict of algorithm to count for one time keys # for this device + "groups", ])): __slots__ = [] @@ -134,7 +146,8 @@ class SyncResult(collections.namedtuple("SyncResult", [ self.archived or self.account_data or self.to_device or - self.device_lists + self.device_lists or + self.groups ) @@ -560,6 +573,8 @@ class SyncHandler(object): user_id, device_id ) + yield self._generate_sync_entry_for_groups(sync_result_builder) + defer.returnValue(SyncResult( presence=sync_result_builder.presence, account_data=sync_result_builder.account_data, @@ -568,10 +583,56 @@ class SyncHandler(object): archived=sync_result_builder.archived, to_device=sync_result_builder.to_device, device_lists=device_lists, + groups=sync_result_builder.groups, device_one_time_keys_count=one_time_key_counts, next_batch=sync_result_builder.now_token, )) + @measure_func("_generate_sync_entry_for_groups") + @defer.inlineCallbacks + def _generate_sync_entry_for_groups(self, sync_result_builder): + user_id = sync_result_builder.sync_config.user.to_string() + since_token = sync_result_builder.since_token + now_token = sync_result_builder.now_token + + if since_token and since_token.groups_key: + results = yield self.store.get_groups_changes_for_user( + user_id, since_token.groups_key, now_token.groups_key, + ) + else: + results = yield self.store.get_all_groups_for_user( + user_id, now_token.groups_key, + ) + + invited = {} + joined = {} + left = {} + for result in results: + membership = result["membership"] + group_id = result["group_id"] + gtype = result["type"] + content = result["content"] + + if membership == "join": + if gtype == "membership": + content.pop("membership", None) + invited[group_id] = content["content"] + else: + joined.setdefault(group_id, {})[gtype] = content + elif membership == "invite": + if gtype == "membership": + content.pop("membership", None) + invited[group_id] = content["content"] + else: + if gtype == "membership": + left[group_id] = content["content"] + + sync_result_builder.groups = GroupsSyncResult( + join=joined, + invite=invited, + leave=left, + ) + @measure_func("_generate_sync_entry_for_device_list") @defer.inlineCallbacks def _generate_sync_entry_for_device_list(self, sync_result_builder): @@ -1260,6 +1321,7 @@ class SyncResultBuilder(object): self.invited = [] self.archived = [] self.device = [] + self.groups = None class RoomSyncResultBuilder(object): diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 6dcc407451..5f208a4c1c 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -199,6 +199,11 @@ class SyncRestServlet(RestServlet): "invite": invited, "leave": archived, }, + "groups": { + "join": sync_result.groups.join, + "invite": sync_result.groups.invite, + "leave": sync_result.groups.leave, + }, "device_one_time_keys_count": sync_result.device_one_time_keys_count, "next_batch": sync_result.next_batch.to_string(), } diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index fdee9f1ad5..594566eb38 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -136,6 +136,9 @@ class DataStore(RoomMemberStore, RoomStore, db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")], ) + self._group_updates_id_gen = StreamIdGenerator( + db_conn, "local_group_updates", "stream_id", + ) if isinstance(self.database_engine, PostgresEngine): self._cache_id_gen = StreamIdGenerator( @@ -236,6 +239,18 @@ class DataStore(RoomMemberStore, RoomStore, prefilled_cache=curr_state_delta_prefill, ) + _group_updates_prefill, min_group_updates_id = self._get_cache_dict( + db_conn, "local_group_updates", + entity_column="user_id", + stream_column="stream_id", + max_value=self._group_updates_id_gen.get_current_token(), + limit=1000, + ) + self._group_updates_stream_cache = StreamChangeCache( + "_group_updates_stream_cache", min_group_updates_id, + prefilled_cache=_group_updates_prefill, + ) + cur = LoggingTransaction( db_conn.cursor(), name="_find_stream_orderings_for_times_txn", diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index a2e7aa47d8..45f0a4c599 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -776,7 +776,7 @@ class GroupServerStore(SQLBaseStore): remote_attestation (dict): If remote group then store the remote attestation from the group, else None. """ - def _register_user_group_membership_txn(txn): + def _register_user_group_membership_txn(txn, next_id): # TODO: Upsert? self._simple_delete_txn( txn, @@ -798,6 +798,19 @@ class GroupServerStore(SQLBaseStore): }, ) + self._simple_insert_txn( + txn, + table="local_group_updates", + values={ + "stream_id": next_id, + "group_id": group_id, + "user_id": user_id, + "type": "membership", + "content": json.dumps({"membership": membership, "content": content}), + } + ) + self._group_updates_stream_cache.entity_has_changed(user_id, next_id) + # TODO: Insert profile to ensure it comes down stream if its a join. if membership == "join": @@ -840,10 +853,11 @@ class GroupServerStore(SQLBaseStore): }, ) - yield self.runInteraction( - "register_user_group_membership", - _register_user_group_membership_txn, - ) + with self._group_updates_id_gen.get_next() as next_id: + yield self.runInteraction( + "register_user_group_membership", + _register_user_group_membership_txn, next_id, + ) @defer.inlineCallbacks def create_group(self, group_id, user_id, name, avatar_url, short_description, @@ -937,3 +951,47 @@ class GroupServerStore(SQLBaseStore): retcol="group_id", desc="get_joined_groups", ) + + def get_all_groups_for_user(self, user_id, now_token): + def _get_all_groups_for_user_txn(txn): + sql = """ + SELECT group_id, type, membership, u.content + FROM local_group_updates AS u + INNER JOIN local_group_membership USING (group_id, user_id) + WHERE user_id = ? AND membership != 'leave' + AND stream_id <= ? + """ + txn.execute(sql, (user_id, now_token,)) + return self.cursor_to_dict(txn) + return self.runInteraction( + "get_all_groups_for_user", _get_all_groups_for_user_txn, + ) + + def get_groups_changes_for_user(self, user_id, from_token, to_token): + from_token = int(from_token) + has_changed = self._group_updates_stream_cache.has_entity_changed( + user_id, from_token, + ) + if not has_changed: + return [] + + def _get_groups_changes_for_user_txn(txn): + sql = """ + SELECT group_id, membership, type, u.content + FROM local_group_updates AS u + INNER JOIN local_group_membership USING (group_id, user_id) + WHERE user_id = ? AND ? < stream_id AND stream_id <= ? + """ + txn.execute(sql, (user_id, from_token, to_token,)) + return [{ + "group_id": group_id, + "membership": membership, + "type": gtype, + "content": json.loads(content_json), + } for group_id, membership, gtype, content_json in txn] + return self.runInteraction( + "get_groups_changes_for_user", _get_groups_changes_for_user_txn, + ) + + def get_group_stream_token(self): + return self._group_updates_id_gen.get_current_token() diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index e1fd47aa7f..92f3339c94 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -155,3 +155,12 @@ CREATE TABLE local_group_membership ( CREATE INDEX local_group_membership_u_idx ON local_group_membership(user_id, group_id); CREATE INDEX local_group_membership_g_idx ON local_group_membership(group_id); + + +CREATE TABLE local_group_updates ( + stream_id BIGINT NOT NULL, + group_id TEXT NOT NULL, + user_id TEXT NOT NULL, + type TEXT NOT NULL, + content TEXT NOT NULL +); diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 91a59b0bae..e2be500815 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -45,6 +45,7 @@ class EventSources(object): push_rules_key, _ = self.store.get_push_rules_stream_token() to_device_key = self.store.get_to_device_stream_token() device_list_key = self.store.get_device_stream_token() + groups_key = self.store.get_group_stream_token() token = StreamToken( room_key=( @@ -65,6 +66,7 @@ class EventSources(object): push_rules_key=push_rules_key, to_device_key=to_device_key, device_list_key=device_list_key, + groups_key=groups_key, ) defer.returnValue(token) diff --git a/synapse/types.py b/synapse/types.py index b32c0e360d..37d5fa7f9f 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -171,6 +171,7 @@ class StreamToken( "push_rules_key", "to_device_key", "device_list_key", + "groups_key", )) ): _SEPARATOR = "_" @@ -209,6 +210,7 @@ class StreamToken( or (int(other.push_rules_key) < int(self.push_rules_key)) or (int(other.to_device_key) < int(self.to_device_key)) or (int(other.device_list_key) < int(self.device_list_key)) + or (int(other.groups_key) < int(self.groups_key)) ) def copy_and_advance(self, key, new_value): diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index d746ea8568..de376fb514 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1032,7 +1032,7 @@ class RoomMessageListTestCase(RestTestCase): @defer.inlineCallbacks def test_topo_token_is_accepted(self): - token = "t1-0_0_0_0_0_0_0_0" + token = "t1-0_0_0_0_0_0_0_0_0" (code, response) = yield self.mock_resource.trigger_get( "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)) @@ -1044,7 +1044,7 @@ class RoomMessageListTestCase(RestTestCase): @defer.inlineCallbacks def test_stream_token_is_accepted_for_fwd_pagianation(self): - token = "s0_0_0_0_0_0_0_0" + token = "s0_0_0_0_0_0_0_0_0" (code, response) = yield self.mock_resource.trigger_get( "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)) From 4d793626ffae05e3d9ac2d770bea7224a3318a56 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 16:42:44 +0100 Subject: [PATCH 0067/1637] Fix bug in generating current token --- synapse/streams/events.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/streams/events.py b/synapse/streams/events.py index e2be500815..f03ad99118 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -75,6 +75,7 @@ class EventSources(object): push_rules_key, _ = self.store.get_push_rules_stream_token() to_device_key = self.store.get_to_device_stream_token() device_list_key = self.store.get_device_stream_token() + groups_key = self.store.get_group_stream_token() token = StreamToken( room_key=( @@ -95,5 +96,6 @@ class EventSources(object): push_rules_key=push_rules_key, to_device_key=to_device_key, device_list_key=device_list_key, + groups_key=groups_key, ) defer.returnValue(token) From 139fe30f47aabd3ca8cce0f8d8c961348188b90b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 16:47:35 +0100 Subject: [PATCH 0068/1637] Remember to cast to bool --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c01fcd3d59..600d0589fd 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -116,7 +116,7 @@ class GroupsSyncResult(collections.namedtuple("GroupsSyncResult", [ __slots__ = [] def __nonzero__(self): - return self.join or self.invite or self.leave + return bool(self.join or self.invite or self.leave) class SyncResult(collections.namedtuple("SyncResult", [ From 2cc998fed879357376edb35d5088d88a078dd576 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 17:13:18 +0100 Subject: [PATCH 0069/1637] Fix replication. And notify --- synapse/app/synchrotron.py | 6 +++ synapse/handlers/groups_local.py | 20 ++++++-- synapse/replication/slave/storage/groups.py | 54 +++++++++++++++++++++ synapse/replication/tcp/streams.py | 20 ++++++++ synapse/storage/group_server.py | 23 +++++++++ 5 files changed, 119 insertions(+), 4 deletions(-) create mode 100644 synapse/replication/slave/storage/groups.py diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 4bdd99a966..d06a05acd9 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -41,6 +41,7 @@ from synapse.replication.slave.storage.presence import SlavedPresenceStore from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.room import RoomStore +from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer from synapse.storage.engines import create_engine @@ -75,6 +76,7 @@ class SynchrotronSlavedStore( SlavedRegistrationStore, SlavedFilteringStore, SlavedPresenceStore, + SlavedGroupServerStore, SlavedDeviceInboxStore, SlavedDeviceStore, SlavedClientIpStore, @@ -409,6 +411,10 @@ class SyncReplicationHandler(ReplicationClientHandler): ) elif stream_name == "presence": yield self.presence_handler.process_replication_rows(token, rows) + elif stream_name == "receipts": + self.notifier.on_new_event( + "groups_key", token, users=[row.user_id for row in rows], + ) def start(config_options): diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 0b80348c82..4182ea5afa 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -211,13 +211,16 @@ class GroupsLocalHandler(object): user_id=user_id, ) - yield self.store.register_user_group_membership( + token = yield self.store.register_user_group_membership( group_id, user_id, membership="join", is_admin=False, local_attestation=local_attestation, remote_attestation=remote_attestation, ) + self.notifier.on_new_event( + "groups_key", token, users=[user_id], + ) defer.returnValue({}) @@ -257,11 +260,14 @@ class GroupsLocalHandler(object): if "avatar_url" in content["profile"]: local_profile["avatar_url"] = content["profile"]["avatar_url"] - yield self.store.register_user_group_membership( + token = yield self.store.register_user_group_membership( group_id, user_id, membership="invite", content={"profile": local_profile, "inviter": content["inviter"]}, ) + self.notifier.on_new_event( + "groups_key", token, users=[user_id], + ) defer.returnValue({"state": "invite"}) @@ -270,10 +276,13 @@ class GroupsLocalHandler(object): """Remove a user from a group """ if user_id == requester_user_id: - yield self.store.register_user_group_membership( + token = yield self.store.register_user_group_membership( group_id, user_id, membership="leave", ) + self.notifier.on_new_event( + "groups_key", token, users=[user_id], + ) # TODO: Should probably remember that we tried to leave so that we can # retry if the group server is currently down. @@ -296,10 +305,13 @@ class GroupsLocalHandler(object): """One of our users was removed/kicked from a group """ # TODO: Check if user in group - yield self.store.register_user_group_membership( + token = yield self.store.register_user_group_membership( group_id, user_id, membership="leave", ) + self.notifier.on_new_event( + "groups_key", token, users=[user_id], + ) @defer.inlineCallbacks def get_joined_groups(self, user_id): diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py new file mode 100644 index 0000000000..0bc4bce5b0 --- /dev/null +++ b/synapse/replication/slave/storage/groups.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import BaseSlavedStore +from ._slaved_id_tracker import SlavedIdTracker +from synapse.storage import DataStore +from synapse.util.caches.stream_change_cache import StreamChangeCache + + +class SlavedGroupServerStore(BaseSlavedStore): + def __init__(self, db_conn, hs): + super(SlavedGroupServerStore, self).__init__(db_conn, hs) + + self.hs = hs + + self._group_updates_id_gen = SlavedIdTracker( + db_conn, "local_group_updates", "stream_id", + ) + self._group_updates_stream_cache = StreamChangeCache( + "_group_updates_stream_cache", self._group_updates_id_gen.get_current_token(), + ) + + get_groups_changes_for_user = DataStore.get_groups_changes_for_user.__func__ + get_group_stream_token = DataStore.get_group_stream_token.__func__ + get_all_groups_for_user = DataStore.get_all_groups_for_user.__func__ + + def stream_positions(self): + result = super(SlavedGroupServerStore, self).stream_positions() + result["groups"] = self._group_updates_id_gen.get_current_token() + return result + + def process_replication_rows(self, stream_name, token, rows): + if stream_name == "groups": + self._group_updates_id_gen.advance(token) + for row in rows: + self._group_updates_stream_cache.entity_has_changed( + row.user_id, token + ) + + return super(SlavedGroupServerStore, self).process_replication_rows( + stream_name, token, rows + ) diff --git a/synapse/replication/tcp/streams.py b/synapse/replication/tcp/streams.py index fbafe12cc2..4c60bf79f9 100644 --- a/synapse/replication/tcp/streams.py +++ b/synapse/replication/tcp/streams.py @@ -118,6 +118,12 @@ CurrentStateDeltaStreamRow = namedtuple("CurrentStateDeltaStream", ( "state_key", # str "event_id", # str, optional )) +GroupsStreamRow = namedtuple("GroupsStreamRow", ( + "group_id", # str + "user_id", # str + "type", # str + "content", # dict +)) class Stream(object): @@ -464,6 +470,19 @@ class CurrentStateDeltaStream(Stream): super(CurrentStateDeltaStream, self).__init__(hs) +class GroupServerStream(Stream): + NAME = "groups" + ROW_TYPE = GroupsStreamRow + + def __init__(self, hs): + store = hs.get_datastore() + + self.current_token = store.get_group_stream_token + self.update_function = store.get_all_groups_changes + + super(GroupServerStream, self).__init__(hs) + + STREAMS_MAP = { stream.NAME: stream for stream in ( @@ -482,5 +501,6 @@ STREAMS_MAP = { TagAccountDataStream, AccountDataStream, CurrentStateDeltaStream, + GroupServerStream, ) } diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 45f0a4c599..5006ac863f 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -853,6 +853,8 @@ class GroupServerStore(SQLBaseStore): }, ) + return next_id + with self._group_updates_id_gen.get_next() as next_id: yield self.runInteraction( "register_user_group_membership", @@ -993,5 +995,26 @@ class GroupServerStore(SQLBaseStore): "get_groups_changes_for_user", _get_groups_changes_for_user_txn, ) + def get_all_groups_changes(self, from_token, to_token, limit): + from_token = int(from_token) + has_changed = self._group_updates_stream_cache.has_any_entity_changed( + from_token, + ) + if not has_changed: + return [] + + def _get_all_groups_changes_txn(txn): + sql = """ + SELECT stream_id, group_id, user_id, type, content + FROM local_group_updates + WHERE ? < stream_id AND stream_id <= ? + LIMIT ? + """ + txn.execute(sql, (from_token, to_token, limit,)) + return txn.fetchall() + return self.runInteraction( + "get_all_groups_changes", _get_all_groups_changes_txn, + ) + def get_group_stream_token(self): return self._group_updates_id_gen.get_current_token() From 960dae3340221168e1e019f250cbc2dd430a1aee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 17:14:44 +0100 Subject: [PATCH 0070/1637] Add notifier --- synapse/handlers/groups_local.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 4182ea5afa..0c329e633d 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -63,6 +63,7 @@ class GroupsLocalHandler(object): self.is_mine_id = hs.is_mine_id self.signing_key = hs.config.signing_key[0] self.server_name = hs.hostname + self.notifier = hs.get_notifier() self.attestations = hs.get_groups_attestation_signing() # Ensure attestations get renewed From b238cf7f6bea80eae076bd34c50d470211a78c72 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2017 17:49:55 +0100 Subject: [PATCH 0071/1637] Remove spurious content param --- synapse/rest/client/v2_alpha/groups.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 64d803d489..009cd70737 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -46,7 +46,7 @@ class GroupServlet(RestServlet): defer.returnValue((200, group_description)) @defer.inlineCallbacks - def on_POST(self, request, group_id, content): + def on_POST(self, request, group_id): requester = yield self.auth.get_user_by_req(request) user_id = requester.user.to_string() From d5e32c843fb51f2b7c0247ca821fd38e5f2f25d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 24 Jul 2017 13:31:26 +0100 Subject: [PATCH 0072/1637] Correctly add joins to correct segment --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 600d0589fd..d7b90a35ea 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -616,7 +616,7 @@ class SyncHandler(object): if membership == "join": if gtype == "membership": content.pop("membership", None) - invited[group_id] = content["content"] + joined[group_id] = content["content"] else: joined.setdefault(group_id, {})[gtype] = content elif membership == "invite": From 851aeae7c796b127dddf7ca6df882df6104ee5e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 24 Jul 2017 13:40:56 +0100 Subject: [PATCH 0073/1637] Check users/rooms are in group before adding to summary --- synapse/storage/group_server.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index d42e215b26..258c3168aa 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -152,6 +152,18 @@ class GroupServerStore(SQLBaseStore): an order of 1 will put the room first. Otherwise, the room gets added to the end. """ + room_in_group = self._simple_select_one_onecol_txn( + txn, + table="group_rooms", + keyvalues={ + "group_id": group_id, + "room_id": room_id, + }, + retcol="room_id", + allow_none=True, + ) + if not room_in_group: + raise SynapseError(400, "room not in group") if category_id is None: category_id = _DEFAULT_CATEGORY_ID @@ -426,6 +438,19 @@ class GroupServerStore(SQLBaseStore): an order of 1 will put the user first. Otherwise, the user gets added to the end. """ + user_in_group = self._simple_select_one_onecol_txn( + txn, + table="group_users", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + retcol="user_id", + allow_none=True, + ) + if not user_in_group: + raise SynapseError(400, "user not in group") + if role_id is None: role_id = _DEFAULT_ROLE_ID else: From b76ef6ccb8e00610c791a78b940d396da82fb1ce Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 24 Jul 2017 13:55:39 +0100 Subject: [PATCH 0074/1637] Include users membership in group in summary API --- synapse/groups/groups_server.py | 5 +++ synapse/storage/group_server.py | 55 +++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index b1ee43ef90..f25f327eb9 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -130,6 +130,10 @@ class GroupsServerHandler(object): users.sort(key=lambda e: e.get("order", 0)) + membership_info = yield self.store.get_users_membership_info_in_group( + group_id, requester_user_id, + ) + defer.returnValue({ "profile": profile, "users_section": { @@ -142,6 +146,7 @@ class GroupsServerHandler(object): "categories": categories, "total_room_count_estimate": 0, # TODO }, + "user": membership_info, }) @defer.inlineCallbacks diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 258c3168aa..989a10eea6 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -672,6 +672,61 @@ class GroupServerStore(SQLBaseStore): allow_none=True, ) + @defer.inlineCallbacks + def get_users_membership_info_in_group(self, group_id, user_id): + """Get a dict describing the memebrship of a user in a group. + + Example if joined: + + { + "memebrship": "joined", + "is_public": True, + "is_privileged": False, + } + + Returns an empty dict if the user is not joined/invited/etc + """ + def _get_users_membership_in_group_txn(txn): + row = self._simple_select_one_txn( + table="group_users", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + retcols=("is_admin", "is_public"), + allow_none=True, + desc="is_user_adim_in_group", + ) + + if row: + return { + "memebrship": "joined", + "is_public": row["is_public"], + "is_privileged": row["is_admin"], + } + + row = self._simple_select_one_onecol_txn( + table="group_invites", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + retcol="user_id", + desc="is_user_invited_to_local_group", + allow_none=True, + ) + + if row: + return { + "memebrship": "invited", + } + + return {} + + return self.runInteraction( + "get_users_membership_info_in_group", _get_users_membership_in_group_txn, + ) + def add_user_to_group(self, group_id, user_id, is_admin=False, is_public=True, local_attestation=None, remote_attestation=None): """Add a user to the group server. From ed666d396985c1fa2b9acb1c69199dd55670a88f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 24 Jul 2017 14:05:09 +0100 Subject: [PATCH 0075/1637] Fix all the typos --- synapse/storage/group_server.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 989a10eea6..357111e305 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -643,7 +643,7 @@ class GroupServerStore(SQLBaseStore): }, retcol="is_admin", allow_none=True, - desc="is_user_adim_in_group", + desc="is_user_admin_in_group", ) def add_group_invite(self, group_id, user_id): @@ -672,14 +672,13 @@ class GroupServerStore(SQLBaseStore): allow_none=True, ) - @defer.inlineCallbacks def get_users_membership_info_in_group(self, group_id, user_id): - """Get a dict describing the memebrship of a user in a group. + """Get a dict describing the membership of a user in a group. Example if joined: { - "memebrship": "joined", + "membership": "joined", "is_public": True, "is_privileged": False, } @@ -688,6 +687,7 @@ class GroupServerStore(SQLBaseStore): """ def _get_users_membership_in_group_txn(txn): row = self._simple_select_one_txn( + txn, table="group_users", keyvalues={ "group_id": group_id, @@ -695,30 +695,29 @@ class GroupServerStore(SQLBaseStore): }, retcols=("is_admin", "is_public"), allow_none=True, - desc="is_user_adim_in_group", ) if row: return { - "memebrship": "joined", + "membership": "joined", "is_public": row["is_public"], "is_privileged": row["is_admin"], } row = self._simple_select_one_onecol_txn( + txn, table="group_invites", keyvalues={ "group_id": group_id, "user_id": user_id, }, retcol="user_id", - desc="is_user_invited_to_local_group", allow_none=True, ) if row: return { - "memebrship": "invited", + "membership": "invited", } return {} From 629cdfb124c013c07b50116386d05162e40871aa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 24 Jul 2017 14:54:05 +0100 Subject: [PATCH 0076/1637] Use join rather than joined, etc. --- synapse/storage/group_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 357111e305..9c55e10e77 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -699,7 +699,7 @@ class GroupServerStore(SQLBaseStore): if row: return { - "membership": "joined", + "membership": "join", "is_public": row["is_public"], "is_privileged": row["is_admin"], } @@ -717,7 +717,7 @@ class GroupServerStore(SQLBaseStore): if row: return { - "membership": "invited", + "membership": "invite", } return {} From 966a70f1fa74192866ff5b0dbae67ee8f490d97d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 24 Jul 2017 17:49:39 +0100 Subject: [PATCH 0077/1637] Update comment --- synapse/storage/group_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 9c55e10e77..f44e80b514 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -678,12 +678,12 @@ class GroupServerStore(SQLBaseStore): Example if joined: { - "membership": "joined", + "membership": "join", "is_public": True, "is_privileged": False, } - Returns an empty dict if the user is not joined/invited/etc + Returns an empty dict if the user is not join/invite/etc """ def _get_users_membership_in_group_txn(txn): row = self._simple_select_one_txn( From f18373dc5d6c5431bbf79760818b6ebc3467c7ba Mon Sep 17 00:00:00 2001 From: Kenny Keslar Date: Wed, 26 Jul 2017 22:44:19 -0500 Subject: [PATCH 0078/1637] Fix iteration of requests_missing_keys; list doesn't have .values() Signed-off-by: Kenny Keslar --- synapse/crypto/keyring.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 1bb27edc0f..c900f4d6df 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -305,7 +305,7 @@ class Keyring(object): if not missing_keys: break - for verify_request in requests_missing_keys.values(): + for verify_request in requests_missing_keys: verify_request.deferred.errback(SynapseError( 401, "No key for %s with id %s" % ( From 09552f9d9c82a30808cdbb8cd8a33c9fdea580bf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Aug 2017 17:29:51 +0100 Subject: [PATCH 0079/1637] Reduce spammy log line in synchrotrons --- synapse/rest/client/v2_alpha/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 6dcc407451..2939896f44 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -110,7 +110,7 @@ class SyncRestServlet(RestServlet): filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) - logger.info( + logger.debug( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r, device_id=%r" % ( user, timeout, since, set_presence, filter_id, device_id From a1e67bcb974e098cbbe2fbe6072bc7d7658936f9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 4 Aug 2017 10:07:10 +0100 Subject: [PATCH 0080/1637] Remove stale TODO comments --- synapse/handlers/groups_local.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index d0ed988224..b8b1e754c7 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -23,16 +23,6 @@ import logging logger = logging.getLogger(__name__) -# TODO: Validate attestations -# TODO: Allow users to "knock" or simpkly join depending on rules -# TODO: is_priveged flag to users and is_public to users and rooms -# TODO: Roles -# TODO: Audit log for admins (profile updates, membership changes, users who tried -# to join but were rejected, etc) -# TODO: Flairs -# TODO: Add group memebership /sync - - def _create_rerouter(func_name): """Returns a function that looks at the group id and calls the function on federation or the local group server if the group is local From 5699b050722ae56953e1ec033023f7e3f7c2b15a Mon Sep 17 00:00:00 2001 From: hera Date: Fri, 4 Aug 2017 22:44:11 +0000 Subject: [PATCH 0081/1637] typo --- synapse/rest/client/v1/admin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 7d786e8de3..7b1cd8fdac 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -168,7 +168,7 @@ class ShutdownRoomRestServlet(ClientV1RestServlet): DEFAULT_MESSAGE = ( "Sharing illegal content on this server is not permitted and rooms in" - " violatation will be blocked." + " violation will be blocked." ) def __init__(self, hs): From eae04f1952275b98079bc7e4fb3058ef9e134d14 Mon Sep 17 00:00:00 2001 From: hera Date: Fri, 4 Aug 2017 22:56:12 +0000 Subject: [PATCH 0082/1637] fix english --- synapse/rest/client/v1/admin.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 7b1cd8fdac..465b25033d 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -296,7 +296,7 @@ class QuarantineMediaInRoom(ClientV1RestServlet): class ResetPasswordRestServlet(ClientV1RestServlet): """Post request to allow an administrator reset password for a user. - This need a user have a administrator access in Synapse. + This needs user to have administrator access in Synapse. Example: http://localhost:8008/_matrix/client/api/v1/admin/reset_password/ @user:to_reset_password?access_token=admin_access_token @@ -319,7 +319,7 @@ class ResetPasswordRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request, target_user_id): """Post request to allow an administrator reset password for a user. - This need a user have a administrator access in Synapse. + This needs user to have administrator access in Synapse. """ UserID.from_string(target_user_id) requester = yield self.auth.get_user_by_req(request) @@ -343,7 +343,7 @@ class ResetPasswordRestServlet(ClientV1RestServlet): class GetUsersPaginatedRestServlet(ClientV1RestServlet): """Get request to get specific number of users from Synapse. - This need a user have a administrator access in Synapse. + This needs user to have administrator access in Synapse. Example: http://localhost:8008/_matrix/client/api/v1/admin/users_paginate/ @admin:user?access_token=admin_access_token&start=0&limit=10 @@ -362,7 +362,7 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, target_user_id): """Get request to get specific number of users from Synapse. - This need a user have a administrator access in Synapse. + This needs user to have administrator access in Synapse. """ target_user = UserID.from_string(target_user_id) requester = yield self.auth.get_user_by_req(request) @@ -395,7 +395,7 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request, target_user_id): """Post request to get specific number of users from Synapse.. - This need a user have a administrator access in Synapse. + This needs user to have administrator access in Synapse. Example: http://localhost:8008/_matrix/client/api/v1/admin/users_paginate/ @admin:user?access_token=admin_access_token @@ -433,7 +433,7 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet): class SearchUsersRestServlet(ClientV1RestServlet): """Get request to search user table for specific users according to search term. - This need a user have a administrator access in Synapse. + This needs user to have administrator access in Synapse. Example: http://localhost:8008/_matrix/client/api/v1/admin/search_users/ @admin:user?access_token=admin_access_token&term=alice @@ -453,7 +453,7 @@ class SearchUsersRestServlet(ClientV1RestServlet): def on_GET(self, request, target_user_id): """Get request to search user table for specific users according to search term. - This need a user have a administrator access in Synapse. + This needs user to have a administrator access in Synapse. """ target_user = UserID.from_string(target_user_id) requester = yield self.auth.get_user_by_req(request) From 05e21285aae4a0411a9ec1151ce006297fa3ca91 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Aug 2017 11:50:09 +0100 Subject: [PATCH 0083/1637] Store whether the user wants to publicise their membership of a group --- synapse/handlers/groups_local.py | 4 ++++ synapse/storage/group_server.py | 2 ++ synapse/storage/schema/delta/43/group_server.sql | 1 + 3 files changed, 7 insertions(+) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index b8b1e754c7..3a738ef36f 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -203,12 +203,16 @@ class GroupsLocalHandler(object): user_id=user_id, ) + # TODO: Check that the group is public and we're being added publically + is_publicised = content.get("publicise", False) + token = yield self.store.register_user_group_membership( group_id, user_id, membership="join", is_admin=False, local_attestation=local_attestation, remote_attestation=remote_attestation, + is_publicised=is_publicised, ) self.notifier.on_new_event( "groups_key", token, users=[user_id], diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index f44e80b514..31514f3cdb 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -840,6 +840,7 @@ class GroupServerStore(SQLBaseStore): is_admin=False, content={}, local_attestation=None, remote_attestation=None, + is_publicised=False, ): """Registers that a local user is a member of a (local or remote) group. @@ -873,6 +874,7 @@ class GroupServerStore(SQLBaseStore): "user_id": user_id, "is_admin": is_admin, "membership": membership, + "is_publicised": is_publicised, "content": json.dumps(content), }, ) diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index 92f3339c94..01ac0edc35 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -150,6 +150,7 @@ CREATE TABLE local_group_membership ( user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, membership TEXT NOT NULL, + is_publicised TEXT NOT NULL, -- if the user is publicising their membership content TEXT NOT NULL ); From b880ff190a82d4f337b94115fc017d703e53878d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Aug 2017 14:19:07 +0100 Subject: [PATCH 0084/1637] Allow update group publicity --- synapse/rest/client/v2_alpha/groups.py | 28 ++++++++++++++++++++++++++ synapse/storage/group_server.py | 15 ++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 009cd70737..9b1116acee 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -557,6 +557,33 @@ class GroupSelfAcceptInviteServlet(RestServlet): defer.returnValue((200, result)) +class GroupSelfUpdatePublicityServlet(RestServlet): + """Update whether we publicise a users membership of a group + """ + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/self/update_publicity$" + ) + + def __init__(self, hs): + super(GroupSelfUpdatePublicityServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + publicise = content["publicise"] + yield self.store.update_group_publicity( + group_id, requester_user_id, publicise, + ) + + defer.returnValue((200, {})) + + class GroupsForUserServlet(RestServlet): """Get all groups the logged in user is joined to """ @@ -598,4 +625,5 @@ def register_servlets(hs, http_server): GroupSummaryRoomsCatServlet(hs).register(http_server) GroupRoleServlet(hs).register(http_server) GroupRolesServlet(hs).register(http_server) + GroupSelfUpdatePublicityServlet(hs).register(http_server) GroupSummaryUsersRoleServlet(hs).register(http_server) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 31514f3cdb..10e757e975 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -835,6 +835,21 @@ class GroupServerStore(SQLBaseStore): desc="add_room_to_group", ) + def update_group_publicity(self, group_id, user_id, publicise): + """Update whether the user is publicising their membership of the group + """ + return self._simple_update_one( + table="local_group_membership", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + updatevalues={ + "is_publicised": publicise, + }, + desc="update_group_publicity" + ) + @defer.inlineCallbacks def register_user_group_membership(self, group_id, user_id, membership, is_admin=False, content={}, From ef8e5786770ff285ebdf1fce420b5aa86437673c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Aug 2017 13:36:22 +0100 Subject: [PATCH 0085/1637] Add bulk group publicised lookup API --- synapse/federation/transport/client.py | 15 +++++++ synapse/federation/transport/server.py | 17 ++++++++ synapse/handlers/groups_local.py | 42 ++++++++++++++++++++ synapse/rest/client/v2_alpha/groups.py | 54 ++++++++++++++++++++++++++ synapse/storage/group_server.py | 14 +++++++ 5 files changed, 142 insertions(+) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 073d3abb2a..ce68cc4937 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -812,3 +812,18 @@ class TransportLayerClient(object): args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) + + def bulk_get_publicised_groups(self, destination, user_ids): + """Get the groups a list of users are publicising + """ + + path = PREFIX + "/get_groups_publicised" + + content = {"user_ids": user_ids} + + return self.client.post_json( + destination=destination, + path=path, + data=content, + ignore_backoff=True, + ) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index e04750fd2a..b5f07c50bf 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1050,6 +1050,22 @@ class FederationGroupsSummaryUsersServlet(BaseFederationServlet): defer.returnValue((200, resp)) +class FederationGroupsBulkPublicisedServlet(BaseFederationServlet): + """Get roles in a group + """ + PATH = ( + "/get_groups_publicised$" + ) + + @defer.inlineCallbacks + def on_POST(self, origin, content, query): + resp = yield self.handler.bulk_get_publicised_groups( + content["user_ids"], proxy=False, + ) + + defer.returnValue((200, resp)) + + FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, FederationPullServlet, @@ -1102,6 +1118,7 @@ GROUP_SERVER_SERVLET_CLASSES = ( GROUP_LOCAL_SERVLET_CLASSES = ( FederationGroupsLocalInviteServlet, FederationGroupsRemoveLocalUserServlet, + FederationGroupsBulkPublicisedServlet, ) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 3a738ef36f..c980623bbc 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -313,3 +313,45 @@ class GroupsLocalHandler(object): def get_joined_groups(self, user_id): group_ids = yield self.store.get_joined_groups(user_id) defer.returnValue({"groups": group_ids}) + + @defer.inlineCallbacks + def get_publicised_groups_for_user(self, user_id): + if self.hs.is_mine_id(user_id): + result = yield self.store.get_publicised_groups_for_user(user_id) + defer.returnValue({"groups": result}) + else: + result = yield self.transport_client.get_publicised_groups_for_user( + get_domain_from_id(user_id), user_id + ) + # TODO: Verify attestations + defer.returnValue(result) + + @defer.inlineCallbacks + def bulk_get_publicised_groups(self, user_ids, proxy=True): + destinations = {} + locals = [] + + for user_id in user_ids: + if self.hs.is_mine_id(user_id): + locals.append(user_id) + else: + destinations.setdefault( + get_domain_from_id(user_id), [] + ).append(user_id) + + if not proxy and destinations: + raise SynapseError(400, "Some user_ids are not local") + + results = {} + for destination, dest_user_ids in destinations.iteritems(): + r = yield self.transport_client.bulk_get_publicised_groups( + destination, dest_user_ids, + ) + results.update(r) + + for uid in locals: + results[uid] = yield self.store.get_publicised_groups_for_user( + uid + ) + + defer.returnValue({"users": results}) diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 9b1116acee..97d7948bb9 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -584,6 +584,59 @@ class GroupSelfUpdatePublicityServlet(RestServlet): defer.returnValue((200, {})) +class PublicisedGroupsForUserServlet(RestServlet): + """Get the list of groups a user is advertising + """ + PATTERNS = client_v2_patterns( + "/publicised_groups/(?P[^/]*)$" + ) + + def __init__(self, hs): + super(PublicisedGroupsForUserServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.store = hs.get_datastore() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, user_id): + yield self.auth.get_user_by_req(request) + + result = yield self.groups_handler.get_publicised_groups_for_user( + user_id + ) + + defer.returnValue((200, result)) + + +class PublicisedGroupsForUsersServlet(RestServlet): + """Get the list of groups a user is advertising + """ + PATTERNS = client_v2_patterns( + "/publicised_groups$" + ) + + def __init__(self, hs): + super(PublicisedGroupsForUsersServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.store = hs.get_datastore() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_POST(self, request): + yield self.auth.get_user_by_req(request) + + content = parse_json_object_from_request(request) + user_ids = content["user_ids"] + + result = yield self.groups_handler.bulk_get_publicised_groups( + user_ids + ) + + defer.returnValue((200, result)) + + class GroupsForUserServlet(RestServlet): """Get all groups the logged in user is joined to """ @@ -627,3 +680,4 @@ def register_servlets(hs, http_server): GroupRolesServlet(hs).register(http_server) GroupSelfUpdatePublicityServlet(hs).register(http_server) GroupSummaryUsersRoleServlet(hs).register(http_server) + PublicisedGroupsForUserServlet(hs).register(http_server) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 10e757e975..0c35b03d2a 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -835,6 +835,20 @@ class GroupServerStore(SQLBaseStore): desc="add_room_to_group", ) + def get_publicised_groups_for_user(self, user_id): + """Get all groups a user is publicising + """ + return self._simple_select_onecol( + table="local_group_membership", + keyvalues={ + "user_id": user_id, + "membership": "join", + "is_publicised": True, + }, + retcol="group_id", + desc="get_publicised_groups_for_user", + ) + def update_group_publicity(self, group_id, user_id, publicise): """Update whether the user is publicising their membership of the group """ From ba3ff7918b54ae431aaaedb3d12650c93d366c04 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Aug 2017 13:42:42 +0100 Subject: [PATCH 0086/1637] Fixup --- synapse/handlers/groups_local.py | 22 +++++++++++++--------- synapse/rest/client/v2_alpha/groups.py | 1 + 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index c980623bbc..274fed9278 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -329,27 +329,31 @@ class GroupsLocalHandler(object): @defer.inlineCallbacks def bulk_get_publicised_groups(self, user_ids, proxy=True): destinations = {} - locals = [] + local_users = set() for user_id in user_ids: if self.hs.is_mine_id(user_id): - locals.append(user_id) + local_users.add(user_id) else: destinations.setdefault( - get_domain_from_id(user_id), [] - ).append(user_id) + get_domain_from_id(user_id), set() + ).add(user_id) if not proxy and destinations: raise SynapseError(400, "Some user_ids are not local") results = {} + failed_results = [] for destination, dest_user_ids in destinations.iteritems(): - r = yield self.transport_client.bulk_get_publicised_groups( - destination, dest_user_ids, - ) - results.update(r) + try: + r = yield self.transport_client.bulk_get_publicised_groups( + destination, list(dest_user_ids), + ) + results.update(r["users"]) + except Exception: + failed_results.extend(dest_user_ids) - for uid in locals: + for uid in local_users: results[uid] = yield self.store.get_publicised_groups_for_user( uid ) diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 97d7948bb9..b469058e9d 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -681,3 +681,4 @@ def register_servlets(hs, http_server): GroupSelfUpdatePublicityServlet(hs).register(http_server) GroupSummaryUsersRoleServlet(hs).register(http_server) PublicisedGroupsForUserServlet(hs).register(http_server) + PublicisedGroupsForUsersServlet(hs).register(http_server) From 09703609fc2432ecf086b8fe45d6b324c5a923c9 Mon Sep 17 00:00:00 2001 From: Tom Lant Date: Mon, 14 Aug 2017 14:35:25 +0100 Subject: [PATCH 0087/1637] Create ISSUE_TEMPLATE.md A new issue template proposed to try and steer people towards #matrix:matrix.org for support queries relating to running their own homeserver. --- ISSUE_TEMPLATE.md | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 ISSUE_TEMPLATE.md diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000..61d8b59eb0 --- /dev/null +++ b/ISSUE_TEMPLATE.md @@ -0,0 +1,36 @@ + + +### Description + +Describe here the problem that you are experiencing, or the feature you are requesting. + +### Steps to reproduce + +- For bugs, list the steps +- that reproduce the bug +- using hyphens as bullet points + +Describe how what happens differs from what you expected. + +### Version information + + + +- **Homeserver**: was this issue identified on matrix.org or another homeserver? +- **Platform**: tell us about the environment in which your homeserver is operating + - distro, hardware, if it's running in a vm/container, etc. From b524dd4c355700e449ff5806514527e7b040ac5c Mon Sep 17 00:00:00 2001 From: Tom Lant Date: Mon, 14 Aug 2017 14:36:49 +0100 Subject: [PATCH 0088/1637] Update ISSUE_TEMPLATE.md Oops capital L. --- ISSUE_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 61d8b59eb0..e5415a202b 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,7 +1,7 @@ ### Description From 543c794a76a0e1c97883cf58981c0dcbfc83c6f8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 15 Aug 2017 15:57:46 +0100 Subject: [PATCH 0090/1637] Factor out common application start We have 10 copies of this code, and I don't really want to update each one separately. --- synapse/app/_base.py | 92 +++++++++++++++++++++++++ synapse/app/appservice.py | 50 +++----------- synapse/app/client_reader.py | 53 +++------------ synapse/app/federation_reader.py | 53 +++------------ synapse/app/federation_sender.py | 57 ++++------------ synapse/app/frontend_proxy.py | 76 ++++++--------------- synapse/app/homeserver.py | 113 +++++++++++-------------------- synapse/app/media_repository.py | 53 +++------------ synapse/app/pusher.py | 57 ++++------------ synapse/app/synchrotron.py | 69 ++++++------------- synapse/app/user_dir.py | 57 ++++------------ 11 files changed, 257 insertions(+), 473 deletions(-) create mode 100644 synapse/app/_base.py diff --git a/synapse/app/_base.py b/synapse/app/_base.py new file mode 100644 index 0000000000..3889c35946 --- /dev/null +++ b/synapse/app/_base.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import logging + +from daemonize import Daemonize +from synapse.util import PreserveLoggingContext +from synapse.util.rlimit import change_resource_limit +from twisted.internet import reactor + + +def start_worker_reactor(appname, config): + """ Run the reactor in the main process + + Daemonizes if necessary, and then configures some resources, before starting + the reactor. Pulls configuration from the 'worker' settings in 'config'. + + Args: + appname (str): application name which will be sent to syslog + config (synapse.config.Config): config object + """ + + logger = logging.getLogger(config.worker_app) + + start_reactor( + appname, + config.soft_file_limit, + config.gc_thresholds, + config.worker_pid_file, + config.worker_daemonize, + logger + ) + + +def start_reactor( + appname, + soft_file_limit, + gc_thresholds, + pid_file, + daemonize, + logger, +): + """ Run the reactor in the main process + + Daemonizes if necessary, and then configures some resources, before starting + the reactor + + Args: + appname (str): application name which will be sent to syslog + soft_file_limit (int): + gc_thresholds: + pid_file (str): name of pid file to write to if daemonize is True + daemonize (bool): true to run the reactor in a background process + logger (logging.Logger): logger instance to pass to Daemonize + """ + + def run(): + # make sure that we run the reactor with the sentinel log context, + # otherwise other PreserveLoggingContext instances will get confused + # and complain when they see the logcontext arbitrarily swapping + # between the sentinel and `run` logcontexts. + with PreserveLoggingContext(): + logger.info("Running") + change_resource_limit(soft_file_limit) + if gc_thresholds: + gc.set_threshold(*gc_thresholds) + reactor.run() + + if daemonize: + daemon = Daemonize( + app=appname, + pid=pid_file, + action=run, + auto_close_fds=False, + verbose=True, + logger=logger, + ) + daemon.start() + else: + run() diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 9a476efa63..ba2657bbad 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -13,38 +13,31 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import sys import synapse - -from synapse.server import HomeServer +from synapse import events +from synapse.app import _base from synapse.config._base import ConfigError -from synapse.config.logger import setup_logging from synapse.config.homeserver import HomeServerConfig +from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite -from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole -from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string - -from synapse import events - from twisted.internet import reactor from twisted.web.resource import Resource -from daemonize import Daemonize - -import sys -import logging -import gc - logger = logging.getLogger("synapse.app.appservice") @@ -181,36 +174,13 @@ def start(config_options): ps.setup() ps.start_listening(config.worker_listeners) - def run(): - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - logger.info("Running") - change_resource_limit(config.soft_file_limit) - if config.gc_thresholds: - gc.set_threshold(*config.gc_thresholds) - reactor.run() - def start(): ps.get_datastore().start_profiling() ps.get_state_handler().start_caching() reactor.callWhenRunning(start) - if config.worker_daemonize: - daemon = Daemonize( - app="synapse-appservice", - pid=config.worker_pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - daemon.start() - else: - run() + _base.start_worker_reactor("synapse-appservice", config) if __name__ == '__main__': diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 09bc1935f1..129cfa901f 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -13,47 +13,39 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import sys import synapse - +from synapse import events +from synapse.app import _base from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging -from synapse.http.site import SynapseSite +from synapse.crypto import context_factory from synapse.http.server import JsonResource -from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.http.site import SynapseSite +from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore +from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.keys import SlavedKeyStore -from synapse.replication.slave.storage.room import RoomStore -from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore +from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import TransactionStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.rest.client.v1.room import PublicRoomListRestServlet from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole -from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string -from synapse.crypto import context_factory - -from synapse import events - - from twisted.internet import reactor from twisted.web.resource import Resource -from daemonize import Daemonize - -import sys -import logging -import gc - logger = logging.getLogger("synapse.app.client_reader") @@ -183,36 +175,13 @@ def start(config_options): ss.get_handlers() ss.start_listening(config.worker_listeners) - def run(): - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - logger.info("Running") - change_resource_limit(config.soft_file_limit) - if config.gc_thresholds: - gc.set_threshold(*config.gc_thresholds) - reactor.run() - def start(): ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) - if config.worker_daemonize: - daemon = Daemonize( - app="synapse-client-reader", - pid=config.worker_pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - daemon.start() - else: - run() + _base.start_worker_reactor("synapse-client-reader", config) if __name__ == '__main__': diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index eb392e1c9d..40cebe6f4a 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -13,44 +13,36 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import sys import synapse - +from synapse import events +from synapse.api.urls import FEDERATION_PREFIX +from synapse.app import _base from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging +from synapse.crypto import context_factory +from synapse.federation.transport.server import TransportLayerServer from synapse.http.site import SynapseSite -from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore +from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.keys import SlavedKeyStore from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import TransactionStore -from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole -from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string -from synapse.api.urls import FEDERATION_PREFIX -from synapse.federation.transport.server import TransportLayerServer -from synapse.crypto import context_factory - -from synapse import events - - from twisted.internet import reactor from twisted.web.resource import Resource -from daemonize import Daemonize - -import sys -import logging -import gc - logger = logging.getLogger("synapse.app.federation_reader") @@ -172,36 +164,13 @@ def start(config_options): ss.get_handlers() ss.start_listening(config.worker_listeners) - def run(): - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - logger.info("Running") - change_resource_limit(config.soft_file_limit) - if config.gc_thresholds: - gc.set_threshold(*config.gc_thresholds) - reactor.run() - def start(): ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) - if config.worker_daemonize: - daemon = Daemonize( - app="synapse-federation-reader", - pid=config.worker_pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - daemon.start() - else: - run() + _base.start_worker_reactor("synapse-federation-reader", config) if __name__ == '__main__': diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 03327dc47a..389e3909d1 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -13,44 +13,37 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import sys import synapse - -from synapse.server import HomeServer +from synapse import events +from synapse.app import _base from synapse.config._base import ConfigError -from synapse.config.logger import setup_logging from synapse.config.homeserver import HomeServerConfig +from synapse.config.logger import setup_logging from synapse.crypto import context_factory -from synapse.http.site import SynapseSite from synapse.federation import send_queue -from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.http.site import SynapseSite +from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore +from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.events import SlavedEventStore +from synapse.replication.slave.storage.presence import SlavedPresenceStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.slave.storage.presence import SlavedPresenceStore from synapse.replication.slave.storage.transactions import TransactionStore -from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.async import Linearizer from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole -from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string - -from synapse import events - -from twisted.internet import reactor, defer +from twisted.internet import defer, reactor from twisted.web.resource import Resource -from daemonize import Daemonize - -import sys -import logging -import gc - logger = logging.getLogger("synapse.app.federation_sender") @@ -213,36 +206,12 @@ def start(config_options): ps.setup() ps.start_listening(config.worker_listeners) - def run(): - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - logger.info("Running") - change_resource_limit(config.soft_file_limit) - if config.gc_thresholds: - gc.set_threshold(*config.gc_thresholds) - reactor.run() - def start(): ps.get_datastore().start_profiling() ps.get_state_handler().start_caching() reactor.callWhenRunning(start) - - if config.worker_daemonize: - daemon = Daemonize( - app="synapse-federation-sender", - pid=config.worker_pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - daemon.start() - else: - run() + _base.start_worker_reactor("synapse-federation-sender", config) class FederationSenderHandler(object): diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 132f18a979..bee4c47498 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -13,48 +13,39 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import sys import synapse - +from synapse import events +from synapse.api.errors import SynapseError +from synapse.app import _base from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging -from synapse.http.site import SynapseSite -from synapse.http.server import JsonResource -from synapse.metrics.resource import MetricsResource, METRICS_PREFIX -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage.client_ips import SlavedClientIpStore -from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.server import HomeServer -from synapse.storage.engines import create_engine -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext -from synapse.util.manhole import manhole -from synapse.util.rlimit import change_resource_limit -from synapse.util.versionstring import get_version_string from synapse.crypto import context_factory -from synapse.api.errors import SynapseError +from synapse.http.server import JsonResource from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, ) +from synapse.http.site import SynapseSite +from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.replication.slave.storage._base import BaseSlavedStore +from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore +from synapse.replication.slave.storage.client_ips import SlavedClientIpStore +from synapse.replication.slave.storage.devices import SlavedDeviceStore +from synapse.replication.slave.storage.registration import SlavedRegistrationStore +from synapse.replication.tcp.client import ReplicationClientHandler from synapse.rest.client.v2_alpha._base import client_v2_patterns - -from synapse import events - - -from twisted.internet import reactor, defer +from synapse.server import HomeServer +from synapse.storage.engines import create_engine +from synapse.util.httpresourcetree import create_resource_tree +from synapse.util.logcontext import LoggingContext +from synapse.util.manhole import manhole +from synapse.util.versionstring import get_version_string +from twisted.internet import defer, reactor from twisted.web.resource import Resource -from daemonize import Daemonize - -import sys -import logging -import gc - - logger = logging.getLogger("synapse.app.frontend_proxy") @@ -234,36 +225,13 @@ def start(config_options): ss.get_handlers() ss.start_listening(config.worker_listeners) - def run(): - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - logger.info("Running") - change_resource_limit(config.soft_file_limit) - if config.gc_thresholds: - gc.set_threshold(*config.gc_thresholds) - reactor.run() - def start(): ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) - if config.worker_daemonize: - daemon = Daemonize( - app="synapse-frontend-proxy", - pid=config.worker_pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - daemon.start() - else: - run() + _base.start_worker_reactor("synapse-frontend-proxy", config) if __name__ == '__main__': diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 081e7cce59..83b6c3212b 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -13,61 +13,48 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import synapse - import gc import logging import os import sys +import synapse import synapse.config.logger +from synapse import events +from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \ + LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \ + STATIC_PREFIX, WEB_CLIENT_PREFIX +from synapse.app import _base from synapse.config._base import ConfigError - -from synapse.python_dependencies import ( - check_requirements, CONDITIONAL_REQUIREMENTS -) - -from synapse.rest import ClientRestResource -from synapse.storage.engines import create_engine, IncorrectDatabaseSetup -from synapse.storage import are_all_users_on_domain -from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database - -from synapse.server import HomeServer - -from twisted.internet import reactor, defer -from twisted.application import service -from twisted.web.resource import Resource, EncodingResourceWrapper -from twisted.web.static import File -from twisted.web.server import GzipEncoderFactory -from synapse.http.server import RootRedirect -from synapse.rest.media.v0.content_repository import ContentRepoResource -from synapse.rest.media.v1.media_repository import MediaRepositoryResource -from synapse.rest.key.v1.server_key_resource import LocalKey -from synapse.rest.key.v2 import KeyApiV2Resource -from synapse.api.urls import ( - FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX, - SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX, - SERVER_KEY_V2_PREFIX, -) from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext -from synapse.metrics import register_memory_metrics -from synapse.metrics.resource import MetricsResource, METRICS_PREFIX -from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.federation.transport.server import TransportLayerServer - +from synapse.http.server import RootRedirect +from synapse.http.site import SynapseSite +from synapse.metrics import register_memory_metrics +from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \ + check_requirements +from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory +from synapse.rest import ClientRestResource +from synapse.rest.key.v1.server_key_resource import LocalKey +from synapse.rest.key.v2 import KeyApiV2Resource +from synapse.rest.media.v0.content_repository import ContentRepoResource +from synapse.rest.media.v1.media_repository import MediaRepositoryResource +from synapse.server import HomeServer +from synapse.storage import are_all_users_on_domain +from synapse.storage.engines import IncorrectDatabaseSetup, create_engine +from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database +from synapse.util.httpresourcetree import create_resource_tree +from synapse.util.logcontext import LoggingContext +from synapse.util.manhole import manhole from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole - -from synapse.http.site import SynapseSite - -from synapse import events - -from daemonize import Daemonize +from twisted.application import service +from twisted.internet import defer, reactor +from twisted.web.resource import EncodingResourceWrapper, Resource +from twisted.web.server import GzipEncoderFactory +from twisted.web.static import File logger = logging.getLogger("synapse.app.homeserver") @@ -446,37 +433,17 @@ def run(hs): # be quite busy the first few minutes clock.call_later(5 * 60, phone_stats_home) - def in_thread(): - # Uncomment to enable tracing of log context changes. - # sys.settrace(logcontext_tracer) + if hs.config.daemonize and hs.config.print_pidfile: + print (hs.config.pid_file) - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - change_resource_limit(hs.config.soft_file_limit) - if hs.config.gc_thresholds: - gc.set_threshold(*hs.config.gc_thresholds) - reactor.run() - - if hs.config.daemonize: - - if hs.config.print_pidfile: - print (hs.config.pid_file) - - daemon = Daemonize( - app="synapse-homeserver", - pid=hs.config.pid_file, - action=lambda: in_thread(), - auto_close_fds=False, - verbose=True, - logger=logger, - ) - - daemon.start() - else: - in_thread() + _base.start_reactor( + "synapse-homeserver", + hs.config.soft_file_limit, + hs.config.gc_thresholds, + hs.config.pid_file, + hs.config.daemonize, + logger, + ) def main(): diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index f57ec784fe..36c18bdbcb 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -13,14 +13,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import sys import synapse - +from synapse import events +from synapse.api.urls import ( + CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX +) +from synapse.app import _base from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging +from synapse.crypto import context_factory from synapse.http.site import SynapseSite -from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore @@ -33,27 +40,12 @@ from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.storage.media_repository import MediaRepositoryStore from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole -from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string -from synapse.api.urls import ( - CONTENT_REPO_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX -) -from synapse.crypto import context_factory - -from synapse import events - - from twisted.internet import reactor from twisted.web.resource import Resource -from daemonize import Daemonize - -import sys -import logging -import gc - logger = logging.getLogger("synapse.app.media_repository") @@ -180,36 +172,13 @@ def start(config_options): ss.get_handlers() ss.start_listening(config.worker_listeners) - def run(): - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - logger.info("Running") - change_resource_limit(config.soft_file_limit) - if config.gc_thresholds: - gc.set_threshold(*config.gc_thresholds) - reactor.run() - def start(): ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) - if config.worker_daemonize: - daemon = Daemonize( - app="synapse-media-repository", - pid=config.worker_pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - daemon.start() - else: - run() + _base.start_worker_reactor("synapse-media-repository", config) if __name__ == '__main__': diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index f9114acfcb..db9a4d16f4 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -13,41 +13,33 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import sys import synapse - -from synapse.server import HomeServer +from synapse import events +from synapse.app import _base from synapse.config._base import ConfigError -from synapse.config.logger import setup_logging from synapse.config.homeserver import HomeServerConfig +from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite -from synapse.metrics.resource import MetricsResource, METRICS_PREFIX -from synapse.storage.roommember import RoomMemberStore +from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.pushers import SlavedPusherStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.storage.engines import create_engine +from synapse.server import HomeServer from synapse.storage import DataStore +from synapse.storage.engines import create_engine +from synapse.storage.roommember import RoomMemberStore from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, preserve_fn, \ - PreserveLoggingContext +from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole -from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string - -from synapse import events - -from twisted.internet import reactor, defer +from twisted.internet import defer, reactor from twisted.web.resource import Resource -from daemonize import Daemonize - -import sys -import logging -import gc - logger = logging.getLogger("synapse.app.pusher") @@ -244,18 +236,6 @@ def start(config_options): ps.setup() ps.start_listening(config.worker_listeners) - def run(): - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - logger.info("Running") - change_resource_limit(config.soft_file_limit) - if config.gc_thresholds: - gc.set_threshold(*config.gc_thresholds) - reactor.run() - def start(): ps.get_pusherpool().start() ps.get_datastore().start_profiling() @@ -263,18 +243,7 @@ def start(config_options): reactor.callWhenRunning(start) - if config.worker_daemonize: - daemon = Daemonize( - app="synapse-pusher", - pid=config.worker_pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - daemon.start() - else: - run() + _base.start_worker_reactor("synapse-pusher", config) if __name__ == '__main__': diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 4bdd99a966..80e4ba5336 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -13,56 +13,50 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import contextlib +import logging +import sys import synapse - from synapse.api.constants import EventTypes +from synapse.app import _base from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.handlers.presence import PresenceHandler, get_interested_parties -from synapse.http.site import SynapseSite from synapse.http.server import JsonResource -from synapse.metrics.resource import MetricsResource, METRICS_PREFIX -from synapse.rest.client.v2_alpha import sync -from synapse.rest.client.v1 import events -from synapse.rest.client.v1.room import RoomInitialSyncRestServlet -from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet +from synapse.http.site import SynapseSite +from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage.client_ips import SlavedClientIpStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.slave.storage.filtering import SlavedFilteringStore -from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore -from synapse.replication.slave.storage.presence import SlavedPresenceStore +from synapse.replication.slave.storage.client_ips import SlavedClientIpStore from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore +from synapse.replication.slave.storage.events import SlavedEventStore +from synapse.replication.slave.storage.filtering import SlavedFilteringStore +from synapse.replication.slave.storage.presence import SlavedPresenceStore +from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore +from synapse.replication.slave.storage.receipts import SlavedReceiptsStore +from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.room import RoomStore from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.rest.client.v1 import events +from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet +from synapse.rest.client.v1.room import RoomInitialSyncRestServlet +from synapse.rest.client.v2_alpha import sync from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.storage.presence import UserPresenceState from synapse.storage.roommember import RoomMemberStore from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole -from synapse.util.rlimit import change_resource_limit from synapse.util.stringutils import random_string from synapse.util.versionstring import get_version_string - -from twisted.internet import reactor, defer +from twisted.internet import defer, reactor from twisted.web.resource import Resource -from daemonize import Daemonize - -import sys -import logging -import contextlib -import gc - logger = logging.getLogger("synapse.app.synchrotron") @@ -440,36 +434,13 @@ def start(config_options): ss.setup() ss.start_listening(config.worker_listeners) - def run(): - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - logger.info("Running") - change_resource_limit(config.soft_file_limit) - if config.gc_thresholds: - gc.set_threshold(*config.gc_thresholds) - reactor.run() - def start(): ss.get_datastore().start_profiling() ss.get_state_handler().start_caching() reactor.callWhenRunning(start) - if config.worker_daemonize: - daemon = Daemonize( - app="synapse-synchrotron", - pid=config.worker_pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - daemon.start() - else: - run() + _base.start_worker_reactor("synapse-synchrotron", config) if __name__ == '__main__': diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 8c6300db9d..cd743887ce 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -14,16 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import synapse +import logging +import sys -from synapse.server import HomeServer +import synapse +from synapse import events +from synapse.app import _base from synapse.config._base import ConfigError -from synapse.config.logger import setup_logging from synapse.config.homeserver import HomeServerConfig +from synapse.config.logger import setup_logging from synapse.crypto import context_factory -from synapse.http.site import SynapseSite from synapse.http.server import JsonResource -from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.http.site import SynapseSite +from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore @@ -31,26 +34,17 @@ from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.rest.client.v2_alpha import user_directory +from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.storage.user_directory import UserDirectoryStore -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn -from synapse.util.manhole import manhole -from synapse.util.rlimit import change_resource_limit -from synapse.util.versionstring import get_version_string from synapse.util.caches.stream_change_cache import StreamChangeCache - -from synapse import events - +from synapse.util.httpresourcetree import create_resource_tree +from synapse.util.logcontext import LoggingContext, preserve_fn +from synapse.util.manhole import manhole +from synapse.util.versionstring import get_version_string from twisted.internet import reactor from twisted.web.resource import Resource -from daemonize import Daemonize - -import sys -import logging -import gc - logger = logging.getLogger("synapse.app.user_dir") @@ -233,36 +227,13 @@ def start(config_options): ps.setup() ps.start_listening(config.worker_listeners) - def run(): - # make sure that we run the reactor with the sentinel log context, - # otherwise other PreserveLoggingContext instances will get confused - # and complain when they see the logcontext arbitrarily swapping - # between the sentinel and `run` logcontexts. - with PreserveLoggingContext(): - logger.info("Running") - change_resource_limit(config.soft_file_limit) - if config.gc_thresholds: - gc.set_threshold(*config.gc_thresholds) - reactor.run() - def start(): ps.get_datastore().start_profiling() ps.get_state_handler().start_caching() reactor.callWhenRunning(start) - if config.worker_daemonize: - daemon = Daemonize( - app="synapse-user-dir", - pid=config.worker_pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - daemon.start() - else: - run() + _base.start_worker_reactor("synapse-user-dir") if __name__ == '__main__': From 10d8b701a1fa585c5fc2d5edcea8d4d02ae360a4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 15 Aug 2017 17:08:28 +0100 Subject: [PATCH 0091/1637] Allow configuration of CPU affinity Make it possible to set the CPU affinity in the config file, so that we don't need to remember to do it manually every time. --- synapse/app/_base.py | 9 ++++++++- synapse/app/homeserver.py | 1 + synapse/config/server.py | 12 ++++++++++++ synapse/config/workers.py | 1 + synapse/python_dependencies.py | 1 + 5 files changed, 23 insertions(+), 1 deletion(-) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 3889c35946..cd0e815919 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -15,6 +15,7 @@ import gc import logging +import affinity from daemonize import Daemonize from synapse.util import PreserveLoggingContext from synapse.util.rlimit import change_resource_limit @@ -40,7 +41,8 @@ def start_worker_reactor(appname, config): config.gc_thresholds, config.worker_pid_file, config.worker_daemonize, - logger + config.worker_cpu_affinity, + logger, ) @@ -50,6 +52,7 @@ def start_reactor( gc_thresholds, pid_file, daemonize, + cpu_affinity, logger, ): """ Run the reactor in the main process @@ -63,6 +66,7 @@ def start_reactor( gc_thresholds: pid_file (str): name of pid file to write to if daemonize is True daemonize (bool): true to run the reactor in a background process + cpu_affinity (int|None): cpu affinity mask logger (logging.Logger): logger instance to pass to Daemonize """ @@ -73,6 +77,9 @@ def start_reactor( # between the sentinel and `run` logcontexts. with PreserveLoggingContext(): logger.info("Running") + if cpu_affinity is not None: + logger.info("Setting CPU affinity to %s" % cpu_affinity) + affinity.set_process_affinity_mask(0, cpu_affinity) change_resource_limit(soft_file_limit) if gc_thresholds: gc.set_threshold(*gc_thresholds) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 83b6c3212b..84ad8f04a0 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -442,6 +442,7 @@ def run(hs): hs.config.gc_thresholds, hs.config.pid_file, hs.config.daemonize, + hs.config.cpu_affinity, logger, ) diff --git a/synapse/config/server.py b/synapse/config/server.py index 28b4e5f50c..4e4bf6b432 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -29,6 +29,7 @@ class ServerConfig(Config): self.user_agent_suffix = config.get("user_agent_suffix") self.use_frozen_dicts = config.get("use_frozen_dicts", False) self.public_baseurl = config.get("public_baseurl") + self.cpu_affinity = config.get("cpu_affinity") # Whether to send federation traffic out in this process. This only # applies to some federation traffic, and so shouldn't be used to @@ -147,6 +148,17 @@ class ServerConfig(Config): # When running as a daemon, the file to store the pid in pid_file: %(pid_file)s + # CPU affinity mask. Setting this restricts the CPUs on which the process + # will be scheduled. It is represented as a bitmask, with the lowest order + # bit corresponding to the first logical CPU and the highest order bit + # corresponding to the last logical CPU. Not all CPUs may exist on a + # given system but a mask may specify more CPUs than are present. + # For example: + # 0x00000001 is processor #0, + # 0x00000003 is processors #0 and #1, + # 0xFFFFFFFF is all processors (#0 through #31). + # cpu_affinity: 0xFFFFFFFF + # Whether to serve a web client from the HTTP/HTTPS root resource. web_client: True diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 99d5d8aaeb..c5a5a8919c 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -33,6 +33,7 @@ class WorkerConfig(Config): self.worker_name = config.get("worker_name", self.worker_app) self.worker_main_http_uri = config.get("worker_main_http_uri", None) + self.worker_cpu_affinity = config.get("worker_cpu_affinity") if self.worker_listeners: for listener in self.worker_listeners: diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index ed7f1c89ad..1d902dc38d 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -40,6 +40,7 @@ REQUIREMENTS = { "pymacaroons-pynacl": ["pymacaroons"], "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], + "affinity": ["affinity"], } CONDITIONAL_REQUIREMENTS = { "web_client": { From 92168cbbc53ccf941ddcb958452ace8e41a948fd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 15 Aug 2017 18:27:42 +0100 Subject: [PATCH 0092/1637] explain why CPU affinity is a good idea --- synapse/config/server.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/synapse/config/server.py b/synapse/config/server.py index 4e4bf6b432..e33cd51f7c 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -153,10 +153,18 @@ class ServerConfig(Config): # bit corresponding to the first logical CPU and the highest order bit # corresponding to the last logical CPU. Not all CPUs may exist on a # given system but a mask may specify more CPUs than are present. + # # For example: # 0x00000001 is processor #0, # 0x00000003 is processors #0 and #1, # 0xFFFFFFFF is all processors (#0 through #31). + # + # This is desirable for Synapse processes (especially workers), which are + # inherently single-threaded due to the GIL and can suffer a 30-40% slowdown + # due to cache blow-out and thread context switching if the scheduler happens + # to schedule the underlying threads across different cores. + # See https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/ + # # cpu_affinity: 0xFFFFFFFF # Whether to serve a web client from the HTTP/HTTPS root resource. From d2352347cfed50e17ed567dff228af858ace54aa Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 16 Aug 2017 14:57:35 +0100 Subject: [PATCH 0093/1637] Fix process startup escape the % that got added in 92168cb so that the process starts up ok. --- synapse/config/server.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index e33cd51f7c..89d61a0503 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -148,22 +149,24 @@ class ServerConfig(Config): # When running as a daemon, the file to store the pid in pid_file: %(pid_file)s - # CPU affinity mask. Setting this restricts the CPUs on which the process - # will be scheduled. It is represented as a bitmask, with the lowest order - # bit corresponding to the first logical CPU and the highest order bit - # corresponding to the last logical CPU. Not all CPUs may exist on a - # given system but a mask may specify more CPUs than are present. + # CPU affinity mask. Setting this restricts the CPUs on which the + # process will be scheduled. It is represented as a bitmask, with the + # lowest order bit corresponding to the first logical CPU and the + # highest order bit corresponding to the last logical CPU. Not all CPUs + # may exist on a given system but a mask may specify more CPUs than are + # present. # # For example: # 0x00000001 is processor #0, # 0x00000003 is processors #0 and #1, # 0xFFFFFFFF is all processors (#0 through #31). # - # This is desirable for Synapse processes (especially workers), which are - # inherently single-threaded due to the GIL and can suffer a 30-40% slowdown - # due to cache blow-out and thread context switching if the scheduler happens - # to schedule the underlying threads across different cores. - # See https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/ + # Pinning a Python process to a single CPU is desirable, because Python + # is inherently single-threaded due to the GIL, and can suffer a + # 30-40%% slowdown due to cache blow-out and thread context switching + # if the scheduler happens to schedule the underlying threads across + # different cores. See + # https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/. # # cpu_affinity: 0xFFFFFFFF From 692250c6be825230ab785b33c59055b98ff91669 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 16 Aug 2017 15:11:29 +0100 Subject: [PATCH 0094/1637] Fix user_dir startup Add missing parameter to _base.start_worker_reactor --- synapse/app/user_dir.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index cd743887ce..be661a70c7 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -233,7 +233,7 @@ def start(config_options): reactor.callWhenRunning(start) - _base.start_worker_reactor("synapse-user-dir") + _base.start_worker_reactor("synapse-user-dir", config) if __name__ == '__main__': From 012875258c7c8ad7db4dcb8825684f2e8034e650 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 16 Aug 2017 15:31:44 +0100 Subject: [PATCH 0095/1637] Add prometheus config ... from https://github.com/matrix-org/synapse-prometheus-config. --- contrib/prometheus/README | 20 ++ contrib/prometheus/consoles/synapse.html | 395 +++++++++++++++++++++++ contrib/prometheus/synapse.rules | 21 ++ 3 files changed, 436 insertions(+) create mode 100644 contrib/prometheus/README create mode 100644 contrib/prometheus/consoles/synapse.html create mode 100644 contrib/prometheus/synapse.rules diff --git a/contrib/prometheus/README b/contrib/prometheus/README new file mode 100644 index 0000000000..eb91db2de2 --- /dev/null +++ b/contrib/prometheus/README @@ -0,0 +1,20 @@ +This directory contains some sample monitoring config for using the +'Prometheus' monitoring server against synapse. + +To use it, first install prometheus by following the instructions at + + http://prometheus.io/ + +Then add a new job to the main prometheus.conf file: + + job: { + name: "synapse" + + target_group: { + target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics" + } + } + +Metrics are disabled by default when running synapse; they must be enabled +with the 'enable-metrics' option, either in the synapse config file or as a +command-line option. diff --git a/contrib/prometheus/consoles/synapse.html b/contrib/prometheus/consoles/synapse.html new file mode 100644 index 0000000000..e23d8a1fce --- /dev/null +++ b/contrib/prometheus/consoles/synapse.html @@ -0,0 +1,395 @@ +{{ template "head" . }} + +{{ template "prom_content_head" . }} +

System Resources

+ +

CPU

+
+ + +

Memory

+
+ + +

File descriptors

+
+ + +

Reactor

+ +

Total reactor time

+
+ + +

Average reactor tick time

+
+ + +

Pending calls per tick

+
+ + +

Storage

+ +

Queries

+
+ + +

Transactions

+
+ + +

Transaction execution time

+
+ + +

Database scheduling latency

+
+ + +

Cache hit ratio

+
+ + +

Cache size

+
+ + +

Requests

+ +

Requests by Servlet

+
+ +

 (without EventStreamRestServlet or SyncRestServlet)

+
+ + +

Average response times

+
+ + +

All responses by code

+
+ + +

Error responses by code

+
+ + + +

CPU Usage

+
+ + + +

DB Usage

+
+ + + +

Average event send times

+
+ + +

Federation

+ +

Sent Messages

+
+ + +

Received Messages

+
+ + +

Pending

+
+ + +

Clients

+ +

Notifiers

+
+ + +

Notified Events

+
+ + +{{ template "prom_content_tail" . }} + +{{ template "tail" }} diff --git a/contrib/prometheus/synapse.rules b/contrib/prometheus/synapse.rules new file mode 100644 index 0000000000..b6f84174b0 --- /dev/null +++ b/contrib/prometheus/synapse.rules @@ -0,0 +1,21 @@ +synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0) +synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0) + +synapse_http_server_requests:method{servlet=""} = sum(synapse_http_server_requests) by (method) +synapse_http_server_requests:servlet{method=""} = sum(synapse_http_server_requests) by (servlet) + +synapse_http_server_requests:total{servlet=""} = sum(synapse_http_server_requests:by_method) by (servlet) + +synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m]) +synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s]) + +synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0 +synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0 +synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job) + +synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0 +synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0 +synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job) + +synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0 +synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0 From ec3a2dc773d70e6d19e4c1f75571b5f2a313cb16 Mon Sep 17 00:00:00 2001 From: Tom Lant Date: Thu, 17 Aug 2017 11:00:51 +0100 Subject: [PATCH 0096/1637] Update ISSUE_TEMPLATE.md Responding to review comments. --- ISSUE_TEMPLATE.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index a3ca19ff72..a03285bc4e 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -27,10 +27,17 @@ Describe here the problem that you are experiencing, or the feature you are requ Describe how what happens differs from what you expected. +If you can identify any relevant log snippets from _homeserver.log_, please include +those here (please be careful to remove any personal or private data): + ### Version information -- **Homeserver**: was this issue identified on matrix.org or another homeserver? -- **Platform**: tell us about the environment in which your homeserver is operating - - distro, hardware, if it's running in a vm/container, etc. +- **Homeserver**: Was this issue identified on matrix.org or another homeserver? + +If not matrix.org: +- **Version**: What version of Synapse is running? +- **Install method**: package manager/git clone/pip +- **Platform**: Tell us about the environment in which your homeserver is operating + - distro, hardware, if it's running in a vm/container, etc. From 413c2707236aa0a7e96e23eae1f760826163cc5a Mon Sep 17 00:00:00 2001 From: Tom Lant Date: Thu, 17 Aug 2017 11:14:35 +0100 Subject: [PATCH 0097/1637] Update ISSUE_TEMPLATE.md Added instructions for checking server version. --- ISSUE_TEMPLATE.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index a03285bc4e..d2050a3e44 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -37,7 +37,11 @@ those here (please be careful to remove any personal or private data): - **Homeserver**: Was this issue identified on matrix.org or another homeserver? If not matrix.org: -- **Version**: What version of Synapse is running? +- **Version**: What version of Synapse is running? - **Install method**: package manager/git clone/pip - **Platform**: Tell us about the environment in which your homeserver is operating - distro, hardware, if it's running in a vm/container, etc. From 046b659ce245272eb0c38cb1ee4206b5cb9e4f0c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 17 Aug 2017 16:54:27 +0100 Subject: [PATCH 0098/1637] Improvements to the federation test client Make it read the config file, primarily. --- scripts-dev/federation_client.py | 65 ++++++++++++++++++++++++++++---- 1 file changed, 58 insertions(+), 7 deletions(-) mode change 100644 => 100755 scripts-dev/federation_client.py diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py old mode 100644 new mode 100755 index d1ab42d3af..c840acb923 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -1,10 +1,30 @@ +#!/usr/bin/env python +# +# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import argparse import nacl.signing import json import base64 import requests import sys import srvlookup - +import yaml def encode_base64(input_bytes): """Encode bytes as a base64 string without any padding.""" @@ -120,11 +140,13 @@ def get_json(origin_name, origin_key, destination, path): origin_name, key, sig, ) authorization_headers.append(bytes(header)) - sys.stderr.write(header) - sys.stderr.write("\n") + print ("Authorization: %s" % header, file=sys.stderr) + + dest = lookup(destination, path) + print ("Requesting %s" % dest, file=sys.stderr) result = requests.get( - lookup(destination, path), + dest, headers={"Authorization": authorization_headers[0]}, verify=False, ) @@ -133,17 +155,46 @@ def get_json(origin_name, origin_key, destination, path): def main(): - origin_name, keyfile, destination, path = sys.argv[1:] + parser = argparse.ArgumentParser( + description= + "Signs and sends a federation request to a matrix homeserver", + ) + + parser.add_argument( + "-c", "--config", + type=argparse.FileType('r'), + default="homeserver.yaml", + help="Path to server config file. Used to read in server name and key " + "file", + ) + + parser.add_argument( + "-d", "--destination", + default="matrix.org", + help="name of the remote homeserver. We will do SRV lookups and " + "connect appropriately.", + ) + + parser.add_argument( + "path", + help="request path. We will add '/_matrix/federation/v1/' to this." + ) + + args = parser.parse_args() + + config = yaml.safe_load(args.config) + origin_name = config['server_name'] + keyfile = config['signing_key_path'] with open(keyfile) as f: key = read_signing_keys(f)[0] result = get_json( - origin_name, key, destination, "/_matrix/federation/v1/" + path + origin_name, key, args.destination, "/_matrix/federation/v1/" + args.path ) json.dump(result, sys.stdout) - print "" + print ("") if __name__ == "__main__": main() From 175a01f56c86a4c201e72d49f22663425656d81d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 21 Aug 2017 14:45:56 +0100 Subject: [PATCH 0099/1637] Groups: Fix mising json.load in initial sync --- synapse/storage/group_server.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index f44e80b514..792a57deb5 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -1101,7 +1101,13 @@ class GroupServerStore(SQLBaseStore): LIMIT ? """ txn.execute(sql, (from_token, to_token, limit,)) - return txn.fetchall() + return [{ + "stream_id": stream_id, + "group_id": group_id, + "user_id": user_id, + "type": gtype, + "content": json.loads(content_json), + } for stream_id, group_id, user_id, gtype, content_json in txn] return self.runInteraction( "get_all_groups_changes", _get_all_groups_changes_txn, ) From 8b50fe5330249fd24d50fa97385cd88ef6703d79 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 21 Aug 2017 13:18:23 +0100 Subject: [PATCH 0100/1637] Use BOOLEAN rather than TEXT type --- synapse/storage/schema/delta/43/group_server.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index 01ac0edc35..e74554381f 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -150,7 +150,7 @@ CREATE TABLE local_group_membership ( user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, membership TEXT NOT NULL, - is_publicised TEXT NOT NULL, -- if the user is publicising their membership + is_publicised BOOLEAN NOT NULL, -- if the user is publicising their membership content TEXT NOT NULL ); From a04c6bbf8f31aaafa0a67813621b85cb26179d34 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 22 Aug 2017 11:19:30 +0100 Subject: [PATCH 0101/1637] test federation client: Allow server-name and key-file as options so that you don't necessarily need a config file. --- scripts-dev/federation_client.py | 36 +++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index c840acb923..82a90ef6fa 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -160,12 +160,23 @@ def main(): "Signs and sends a federation request to a matrix homeserver", ) + parser.add_argument( + "-N", "--server-name", + help="Name to give as the local homeserver. If unspecified, will be " + "read from the config file.", + ) + + parser.add_argument( + "-k", "--signing-key-path", + help="Path to the file containing the private ed25519 key to sign the " + "request with.", + ) + parser.add_argument( "-c", "--config", - type=argparse.FileType('r'), default="homeserver.yaml", - help="Path to server config file. Used to read in server name and key " - "file", + help="Path to server config file. Ignored if --server-name and " + "--signing-key-path are both given.", ) parser.add_argument( @@ -182,19 +193,28 @@ def main(): args = parser.parse_args() - config = yaml.safe_load(args.config) - origin_name = config['server_name'] - keyfile = config['signing_key_path'] + if not args.server_name or not args.signing_key_path: + read_args_from_config(args) - with open(keyfile) as f: + with open(args.signing_key_path) as f: key = read_signing_keys(f)[0] result = get_json( - origin_name, key, args.destination, "/_matrix/federation/v1/" + args.path + args.server_name, key, args.destination, "/_matrix/federation/v1/" + args.path ) json.dump(result, sys.stdout) print ("") + +def read_args_from_config(args): + with open(args.config, 'r') as fh: + config = yaml.safe_load(fh) + if not args.server_name: + args.server_name = config['server_name'] + if not args.signing_key_path: + args.signing_key_path = config['signing_key_path'] + + if __name__ == "__main__": main() From fc9878f6a4d71bcf59a2f7e652a817133aaf0a89 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 23 Aug 2017 15:15:40 +0100 Subject: [PATCH 0102/1637] Tweaks to the upgrade instructions --- UPGRADE.rst | 93 +++++++++++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 42 deletions(-) diff --git a/UPGRADE.rst b/UPGRADE.rst index 62b22e9108..2efe7ea60f 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -5,39 +5,48 @@ Before upgrading check if any special steps are required to upgrade from the what you currently have installed to current version of synapse. The extra instructions that may be required are listed later in this document. -If synapse was installed in a virtualenv then active that virtualenv before -upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then run: +1. If synapse was installed in a virtualenv then active that virtualenv before + upgrading. If synapse is installed in a virtualenv in ``~/.synapse/`` then + run: + + .. code:: bash + + source ~/.synapse/bin/activate + +2. If synapse was installed using pip then upgrade to the latest version by + running: + + .. code:: bash + + pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master + + # restart synapse + synctl restart + + + If synapse was installed using git then upgrade to the latest version by + running: + + .. code:: bash + + # Pull the latest version of the master branch. + git pull + # Update the versions of synapse's python dependencies. + python synapse/python_dependencies.py | xargs pip install --upgrade + + # restart synapse + ./synctl restart + + +To check whether your update was sucessful, you can check the Server header +returned by the Client-Server API: .. code:: bash - source ~/.synapse/bin/activate - -If synapse was installed using pip then upgrade to the latest version by -running: - -.. code:: bash - - pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master - -If synapse was installed using git then upgrade to the latest version by -running: - -.. code:: bash - - # Pull the latest version of the master branch. - git pull - # Update the versions of synapse's python dependencies. - python synapse/python_dependencies.py | xargs -n1 pip install --upgrade - -To check whether your update was sucessfull, run: - -.. code:: bash - - # replace your.server.domain with ther domain of your synapse homeserver - curl https:///_matrix/federation/v1/version - -So for the Matrix.org HS server the URL would be: https://matrix.org/_matrix/federation/v1/version. - + # replace with the hostname of your synapse homeserver. + # You may need to specify a port (eg, :8448) if your server is not + # configured on port 443. + curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:" Upgrading to v0.15.0 ==================== @@ -77,7 +86,7 @@ It has been replaced by specifying a list of application service registrations i ``homeserver.yaml``:: app_service_config_files: ["registration-01.yaml", "registration-02.yaml"] - + Where ``registration-01.yaml`` looks like:: url: # e.g. "https://my.application.service.com" @@ -166,7 +175,7 @@ This release completely changes the database schema and so requires upgrading it before starting the new version of the homeserver. The script "database-prepare-for-0.5.0.sh" should be used to upgrade the -database. This will save all user information, such as logins and profiles, +database. This will save all user information, such as logins and profiles, but will otherwise purge the database. This includes messages, which rooms the home server was a member of and room alias mappings. @@ -175,18 +184,18 @@ file and ask for help in #matrix:matrix.org. The upgrade process is, unfortunately, non trivial and requires human intervention to resolve any resulting conflicts during the upgrade process. -Before running the command the homeserver should be first completely +Before running the command the homeserver should be first completely shutdown. To run it, simply specify the location of the database, e.g.: ./scripts/database-prepare-for-0.5.0.sh "homeserver.db" -Once this has successfully completed it will be safe to restart the -homeserver. You may notice that the homeserver takes a few seconds longer to +Once this has successfully completed it will be safe to restart the +homeserver. You may notice that the homeserver takes a few seconds longer to restart than usual as it reinitializes the database. On startup of the new version, users can either rejoin remote rooms using room aliases or by being reinvited. Alternatively, if any other homeserver sends a -message to a room that the homeserver was previously in the local HS will +message to a room that the homeserver was previously in the local HS will automatically rejoin the room. Upgrading to v0.4.0 @@ -245,7 +254,7 @@ automatically generate default config use:: --config-path homeserver.config \ --generate-config -This config can be edited if desired, for example to specify a different SSL +This config can be edited if desired, for example to specify a different SSL certificate to use. Once done you can run the home server using:: $ python synapse/app/homeserver.py --config-path homeserver.config @@ -266,20 +275,20 @@ This release completely changes the database schema and so requires upgrading it before starting the new version of the homeserver. The script "database-prepare-for-0.0.1.sh" should be used to upgrade the -database. This will save all user information, such as logins and profiles, +database. This will save all user information, such as logins and profiles, but will otherwise purge the database. This includes messages, which rooms the home server was a member of and room alias mappings. -Before running the command the homeserver should be first completely +Before running the command the homeserver should be first completely shutdown. To run it, simply specify the location of the database, e.g.: ./scripts/database-prepare-for-0.0.1.sh "homeserver.db" -Once this has successfully completed it will be safe to restart the -homeserver. You may notice that the homeserver takes a few seconds longer to +Once this has successfully completed it will be safe to restart the +homeserver. You may notice that the homeserver takes a few seconds longer to restart than usual as it reinitializes the database. On startup of the new version, users can either rejoin remote rooms using room aliases or by being reinvited. Alternatively, if any other homeserver sends a -message to a room that the homeserver was previously in the local HS will +message to a room that the homeserver was previously in the local HS will automatically rejoin the room. From 97c544f91f562f33b9655e7c8c8f980bac5ac658 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 25 Aug 2017 11:11:37 +0100 Subject: [PATCH 0103/1637] Add _simple_update --- synapse/storage/_base.py | 51 +++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 6f54036d67..5124a833a5 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -743,6 +743,33 @@ class SQLBaseStore(object): txn.execute(sql, values) return cls.cursor_to_dict(txn) + def _simple_update(self, table, keyvalues, updatevalues, desc): + return self.runInteraction( + desc, + self._simple_update_txn, + table, keyvalues, updatevalues, + ) + + @staticmethod + def _simple_update_txn(txn, table, keyvalues, updatevalues): + if keyvalues: + where = "WHERE %s" % " AND ".join("%s = ?" % k for k in keyvalues.iterkeys()) + else: + where = "" + + update_sql = "UPDATE %s SET %s %s" % ( + table, + ", ".join("%s = ?" % (k,) for k in updatevalues), + where, + ) + + txn.execute( + update_sql, + updatevalues.values() + keyvalues.values() + ) + + return txn.rowcount + def _simple_update_one(self, table, keyvalues, updatevalues, desc="_simple_update_one"): """Executes an UPDATE query on the named table, setting new values for @@ -768,27 +795,13 @@ class SQLBaseStore(object): table, keyvalues, updatevalues, ) - @staticmethod - def _simple_update_one_txn(txn, table, keyvalues, updatevalues): - if keyvalues: - where = "WHERE %s" % " AND ".join("%s = ?" % k for k in keyvalues.iterkeys()) - else: - where = "" + @classmethod + def _simple_update_one_txn(cls, txn, table, keyvalues, updatevalues): + rowcount = cls._simple_update_txn(txn, table, keyvalues, updatevalues) - update_sql = "UPDATE %s SET %s %s" % ( - table, - ", ".join("%s = ?" % (k,) for k in updatevalues), - where, - ) - - txn.execute( - update_sql, - updatevalues.values() + keyvalues.values() - ) - - if txn.rowcount == 0: + if rowcount == 0: raise StoreError(404, "No row found") - if txn.rowcount > 1: + if rowcount > 1: raise StoreError(500, "More than one row matched") @staticmethod From 27ebc5c8f299488ccc0a6f100ec3b248cd81a058 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 25 Aug 2017 11:21:34 +0100 Subject: [PATCH 0104/1637] Add remote profile cache --- synapse/groups/groups_server.py | 18 ++++ synapse/handlers/groups_local.py | 17 +++- synapse/handlers/profile.py | 81 ++++++++++++++- synapse/storage/profile.py | 98 +++++++++++++++++++ .../storage/schema/delta/43/profile_cache.sql | 28 ++++++ 5 files changed, 237 insertions(+), 5 deletions(-) create mode 100644 synapse/storage/schema/delta/43/profile_cache.sql diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index f25f327eb9..6bccae4bfb 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -503,6 +503,13 @@ class GroupsServerHandler(object): get_domain_from_id(user_id), group_id, user_id, content ) + user_profile = res.get("user_profile", {}) + yield self.store.add_remote_profile_cache( + user_id, + displayname=user_profile.get("displayname"), + avatar_url=user_profile.get("avatar_url"), + ) + if res["state"] == "join": if not self.hs.is_mine_id(user_id): remote_attestation = res["attestation"] @@ -627,6 +634,9 @@ class GroupsServerHandler(object): get_domain_from_id(user_id), group_id, user_id, {} ) + if not self.hs.is_mine_id(user_id): + yield self.store.maybe_delete_remote_profile_cache(user_id) + defer.returnValue({}) @defer.inlineCallbacks @@ -647,6 +657,7 @@ class GroupsServerHandler(object): avatar_url = profile.get("avatar_url") short_description = profile.get("short_description") long_description = profile.get("long_description") + user_profile = content.get("user_profile", {}) yield self.store.create_group( group_id, @@ -679,6 +690,13 @@ class GroupsServerHandler(object): remote_attestation=remote_attestation, ) + if not self.hs.is_mine_id(user_id): + yield self.store.add_remote_profile_cache( + user_id, + displayname=user_profile.get("displayname"), + avatar_url=user_profile.get("avatar_url"), + ) + defer.returnValue({ "group_id": group_id, }) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 274fed9278..bfa10bde5a 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -56,6 +56,9 @@ class GroupsLocalHandler(object): self.notifier = hs.get_notifier() self.attestations = hs.get_groups_attestation_signing() + handlers = hs.get_handlers() + self.profile_handler = handlers.profile_handler + # Ensure attestations get renewed hs.get_groups_attestation_renewer() @@ -123,6 +126,7 @@ class GroupsLocalHandler(object): defer.returnValue(res) + @defer.inlineCallbacks def create_group(self, group_id, user_id, content): """Create a group """ @@ -130,13 +134,16 @@ class GroupsLocalHandler(object): logger.info("Asking to create group with ID: %r", group_id) if self.is_mine_id(group_id): - return self.groups_server_handler.create_group( + res = yield self.groups_server_handler.create_group( group_id, user_id, content ) + defer.returnValue(res) - return self.transport_client.create_group( + content["user_profile"] = yield self.profile_handler.get_profile(user_id) + res = yield self.transport_client.create_group( get_domain_from_id(group_id), group_id, user_id, content, - ) # TODO + ) + defer.returnValue(res) @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): @@ -265,7 +272,9 @@ class GroupsLocalHandler(object): "groups_key", token, users=[user_id], ) - defer.returnValue({"state": "invite"}) + user_profile = yield self.profile_handler.get_profile(user_id) + + defer.returnValue({"state": "invite", "user_profile": user_profile}) @defer.inlineCallbacks def remove_user_from_group(self, group_id, user_id, requester_user_id, content): diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 7abee98dea..57e22edb0d 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -19,7 +19,7 @@ from twisted.internet import defer import synapse.types from synapse.api.errors import SynapseError, AuthError, CodeMessageException -from synapse.types import UserID +from synapse.types import UserID, get_domain_from_id from ._base import BaseHandler @@ -27,15 +27,53 @@ logger = logging.getLogger(__name__) class ProfileHandler(BaseHandler): + PROFILE_UPDATE_MS = 60 * 1000 + PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 def __init__(self, hs): super(ProfileHandler, self).__init__(hs) + self.clock = hs.get_clock() + self.federation = hs.get_replication_layer() self.federation.register_query_handler( "profile", self.on_profile_query ) + self.clock.looping_call(self._update_remote_profile_cache, self.PROFILE_UPDATE_MS) + + @defer.inlineCallbacks + def get_profile(self, user_id): + target_user = UserID.from_string(user_id) + if self.hs.is_mine(target_user): + displayname = yield self.store.get_profile_displayname( + target_user.localpart + ) + avatar_url = yield self.store.get_profile_avatar_url( + target_user.localpart + ) + + defer.returnValue({ + "displayname": displayname, + "avatar_url": avatar_url, + }) + else: + try: + result = yield self.federation.make_query( + destination=target_user.domain, + query_type="profile", + args={ + "user_id": user_id, + }, + ignore_backoff=True, + ) + defer.returnValue(result) + except CodeMessageException as e: + if e.code != 404: + logger.exception("Failed to get displayname") + + raise + @defer.inlineCallbacks def get_displayname(self, target_user): if self.hs.is_mine(target_user): @@ -182,3 +220,44 @@ class ProfileHandler(BaseHandler): "Failed to update join event for room %s - %s", room_id, str(e.message) ) + + def _update_remote_profile_cache(self): + """Called periodically to check profiles of remote users we havent' + checked in a while. + """ + entries = yield self.store.get_remote_profile_cache_entries_that_expire( + last_checked=self.clock.time_msec() - self.PROFILE_UPDATE_EVERY_MS + ) + + for user_id, displayname, avatar_url in entries: + is_subcscribed = yield self.store.is_subscribed_remote_profile_for_user( + user_id, + ) + if not is_subcscribed: + yield self.store.maybe_delete_remote_profile_cache(user_id) + continue + + try: + profile = yield self.federation.make_query( + destination=get_domain_from_id(user_id), + query_type="profile", + args={ + "user_id": user_id, + }, + ignore_backoff=True, + ) + except: + logger.exception("Failed to get avatar_url") + + yield self.store.update_remote_profile_cache( + user_id, displayname, avatar_url + ) + continue + + new_name = profile.get("displayname") + new_avatar = profile.get("avatar_url") + + # We always hit update to update the last_check timestamp + yield self.store.update_remote_profile_cache( + user_id, new_name, new_avatar + ) diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 26a40905ae..dca6af8a77 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from twisted.internet import defer + from ._base import SQLBaseStore @@ -55,3 +57,99 @@ class ProfileStore(SQLBaseStore): updatevalues={"avatar_url": new_avatar_url}, desc="set_profile_avatar_url", ) + + def get_from_remote_profile_cache(self, user_id): + return self._simple_select_one( + table="remote_profile_cache", + keyvalues={"user_id": user_id}, + retcols=("displayname", "avatar_url", "last_check"), + allow_none=True, + desc="get_from_remote_profile_cache", + ) + + def add_remote_profile_cache(self, user_id, displayname, avatar_url): + """Ensure we are caching the remote user's profiles. + + This should only be called when `is_subscribed_remote_profile_for_user` + would return true for the user. + """ + return self._simple_upsert( + table="remote_profile_cache", + keyvalues={"user_id": user_id}, + values={ + "displayname": displayname, + "avatar_url": avatar_url, + "last_check": self._clock.time_msec(), + }, + desc="add_remote_profile_cache", + ) + + def update_remote_profile_cache(self, user_id, displayname, avatar_url): + return self._simple_update( + table="remote_profile_cache", + keyvalues={"user_id": user_id}, + values={ + "displayname": displayname, + "avatar_url": avatar_url, + "last_check": self._clock.time_msec(), + }, + desc="update_remote_profile_cache", + ) + + @defer.inlineCallbacks + def maybe_delete_remote_profile_cache(self, user_id): + """Check if we still care about the remote user's profile, and if we + don't then remove their profile from the cache + """ + subscribed = yield self.is_subscribed_remote_profile_for_user(user_id) + if not subscribed: + yield self._simple_delete( + table="remote_profile_cache", + keyvalues={"user_id": user_id}, + desc="delete_remote_profile_cache", + ) + + def get_remote_profile_cache_entries_that_expire(self, last_checked): + """Get all users who haven't been checked since `last_checked` + """ + def _get_remote_profile_cache_entries_that_expire_txn(txn): + sql = """ + SELECT user_id, displayname, avatar_url + FROM remote_profile_cache + WHERE last_check < ? + """ + + txn.execute(sql, (last_checked,)) + + return self.cursor_to_dict(txn) + + return self.runInteraction( + "get_remote_profile_cache_entries_that_expire", + _get_remote_profile_cache_entries_that_expire_txn, + ) + + @defer.inlineCallbacks + def is_subscribed_remote_profile_for_user(self, user_id): + """Check whether we are interested in a remote user's profile. + """ + res = yield self._simple_select_one_onecol( + table="group_users", + keyvalues={"user_id": user_id}, + retcol="user_id", + allow_none=True, + desc="should_update_remote_profile_cache_for_user", + ) + + if res: + defer.returnValue(True) + + res = yield self._simple_select_one_onecol( + table="group_invites", + keyvalues={"user_id": user_id}, + retcol="user_id", + allow_none=True, + desc="should_update_remote_profile_cache_for_user", + ) + + if res: + defer.returnValue(True) diff --git a/synapse/storage/schema/delta/43/profile_cache.sql b/synapse/storage/schema/delta/43/profile_cache.sql new file mode 100644 index 0000000000..e5ddc84df0 --- /dev/null +++ b/synapse/storage/schema/delta/43/profile_cache.sql @@ -0,0 +1,28 @@ +/* Copyright 2017 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- A subset of remote users whose profiles we have cached. +-- Whether a user is in this table or not is defined by the storage function +-- `is_subscribed_remote_profile_for_user` +CREATE TABLE remote_profile_cache ( + user_id TEXT NOT NULL, + displayname TEXT, + avatar_url TEXT, + last_check BIGINT NOT NULL +); + +CREATE UNIQUE INDEX remote_profile_cache_user_id ON remote_profile_cache(user_id); +CREATE INDEX remote_profile_cache_time ON remote_profile_cache(last_check); From bf81f3cf2c3e5b1d96953f3116c22aee05fb79b3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 25 Aug 2017 14:34:56 +0100 Subject: [PATCH 0105/1637] Split out profile handler to fix tests --- synapse/handlers/__init__.py | 2 -- synapse/handlers/groups_local.py | 3 +-- synapse/handlers/message.py | 3 ++- synapse/handlers/profile.py | 13 ++++++++----- synapse/handlers/register.py | 4 ++-- synapse/handlers/room_member.py | 4 +++- synapse/rest/client/v1/profile.py | 18 +++++++++--------- synapse/server.py | 5 +++++ tests/handlers/test_profile.py | 4 +--- tests/handlers/test_register.py | 5 +++-- tests/rest/client/v1/test_profile.py | 3 +-- 11 files changed, 35 insertions(+), 29 deletions(-) diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index 5ad408f549..53213cdccf 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -20,7 +20,6 @@ from .room import ( from .room_member import RoomMemberHandler from .message import MessageHandler from .federation import FederationHandler -from .profile import ProfileHandler from .directory import DirectoryHandler from .admin import AdminHandler from .identity import IdentityHandler @@ -52,7 +51,6 @@ class Handlers(object): self.room_creation_handler = RoomCreationHandler(hs) self.room_member_handler = RoomMemberHandler(hs) self.federation_handler = FederationHandler(hs) - self.profile_handler = ProfileHandler(hs) self.directory_handler = DirectoryHandler(hs) self.admin_handler = AdminHandler(hs) self.identity_handler = IdentityHandler(hs) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index bfa10bde5a..1950c12f1f 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -56,8 +56,7 @@ class GroupsLocalHandler(object): self.notifier = hs.get_notifier() self.attestations = hs.get_groups_attestation_signing() - handlers = hs.get_handlers() - self.profile_handler = handlers.profile_handler + self.profile_handler = hs.get_profile_handler() # Ensure attestations get renewed hs.get_groups_attestation_renewer() diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index be4f123c54..5b8f20b73c 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -47,6 +47,7 @@ class MessageHandler(BaseHandler): self.state = hs.get_state_handler() self.clock = hs.get_clock() self.validator = EventValidator() + self.profile_handler = hs.get_profile_handler() self.pagination_lock = ReadWriteLock() @@ -210,7 +211,7 @@ class MessageHandler(BaseHandler): if membership in {Membership.JOIN, Membership.INVITE}: # If event doesn't include a display name, add one. - profile = self.hs.get_handlers().profile_handler + profile = self.profile_handler content = builder.content try: diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 57e22edb0d..5e34501c7a 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -22,18 +22,21 @@ from synapse.api.errors import SynapseError, AuthError, CodeMessageException from synapse.types import UserID, get_domain_from_id from ._base import BaseHandler - logger = logging.getLogger(__name__) -class ProfileHandler(BaseHandler): +class ProfileHandler(object): PROFILE_UPDATE_MS = 60 * 1000 PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 def __init__(self, hs): - super(ProfileHandler, self).__init__(hs) - + self.hs = hs + self.store = hs.get_datastore() self.clock = hs.get_clock() + self.ratelimiter = hs.get_ratelimiter() + + # AWFUL hack to get at BaseHandler.ratelimit + self.base_handler = BaseHandler(hs) self.federation = hs.get_replication_layer() self.federation.register_query_handler( @@ -194,7 +197,7 @@ class ProfileHandler(BaseHandler): if not self.hs.is_mine(user): return - yield self.ratelimit(requester) + yield self.base_handler.ratelimit(requester) room_ids = yield self.store.get_rooms_for_user( user.to_string(), diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index ee3a2269a8..560fb36254 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -36,6 +36,7 @@ class RegistrationHandler(BaseHandler): super(RegistrationHandler, self).__init__(hs) self.auth = hs.get_auth() + self.profile_handler = hs.get_profile_handler() self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -423,8 +424,7 @@ class RegistrationHandler(BaseHandler): if displayname is not None: logger.info("setting user display name: %s -> %s", user_id, displayname) - profile_handler = self.hs.get_handlers().profile_handler - yield profile_handler.set_displayname( + yield self.profile_handler.set_displayname( user, requester, displayname, by_admin=True, ) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index b3f979b246..dadc19d45b 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -45,6 +45,8 @@ class RoomMemberHandler(BaseHandler): def __init__(self, hs): super(RoomMemberHandler, self).__init__(hs) + self.profile_handler = hs.get_profile_handler() + self.member_linearizer = Linearizer(name="member") self.clock = hs.get_clock() @@ -255,7 +257,7 @@ class RoomMemberHandler(BaseHandler): content["membership"] = Membership.JOIN - profile = self.hs.get_handlers().profile_handler + profile = self.profile_handler if not content_specified: content["displayname"] = yield profile.get_displayname(target) content["avatar_url"] = yield profile.get_avatar_url(target) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 1a5045c9ec..d7edc34245 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -26,13 +26,13 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): def __init__(self, hs): super(ProfileDisplaynameRestServlet, self).__init__(hs) - self.handlers = hs.get_handlers() + self.profile_handler = hs.get_profile_handler() @defer.inlineCallbacks def on_GET(self, request, user_id): user = UserID.from_string(user_id) - displayname = yield self.handlers.profile_handler.get_displayname( + displayname = yield self.profile_handler.get_displayname( user, ) @@ -55,7 +55,7 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): except: defer.returnValue((400, "Unable to parse name")) - yield self.handlers.profile_handler.set_displayname( + yield self.profile_handler.set_displayname( user, requester, new_name, is_admin) defer.returnValue((200, {})) @@ -69,13 +69,13 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): def __init__(self, hs): super(ProfileAvatarURLRestServlet, self).__init__(hs) - self.handlers = hs.get_handlers() + self.profile_handler = hs.get_profile_handler() @defer.inlineCallbacks def on_GET(self, request, user_id): user = UserID.from_string(user_id) - avatar_url = yield self.handlers.profile_handler.get_avatar_url( + avatar_url = yield self.profile_handler.get_avatar_url( user, ) @@ -97,7 +97,7 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): except: defer.returnValue((400, "Unable to parse name")) - yield self.handlers.profile_handler.set_avatar_url( + yield self.profile_handler.set_avatar_url( user, requester, new_name, is_admin) defer.returnValue((200, {})) @@ -111,16 +111,16 @@ class ProfileRestServlet(ClientV1RestServlet): def __init__(self, hs): super(ProfileRestServlet, self).__init__(hs) - self.handlers = hs.get_handlers() + self.profile_handler = hs.get_profile_handler() @defer.inlineCallbacks def on_GET(self, request, user_id): user = UserID.from_string(user_id) - displayname = yield self.handlers.profile_handler.get_displayname( + displayname = yield self.profile_handler.get_displayname( user, ) - avatar_url = yield self.handlers.profile_handler.get_avatar_url( + avatar_url = yield self.profile_handler.get_avatar_url( user, ) diff --git a/synapse/server.py b/synapse/server.py index d0a6272766..5b892cc390 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -51,6 +51,7 @@ from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.user_directory import UserDirectoyHandler from synapse.handlers.groups_local import GroupsLocalHandler +from synapse.handlers.profile import ProfileHandler from synapse.groups.groups_server import GroupsServerHandler from synapse.groups.attestations import GroupAttestionRenewer, GroupAttestationSigning from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory @@ -114,6 +115,7 @@ class HomeServer(object): 'application_service_scheduler', 'application_service_handler', 'device_message_handler', + 'profile_handler', 'notifier', 'distributor', 'client_resource', @@ -258,6 +260,9 @@ class HomeServer(object): def build_initial_sync_handler(self): return InitialSyncHandler(self) + def build_profile_handler(self): + return ProfileHandler(self) + def build_event_sources(self): return EventSources(self) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 2a203129ca..a5f47181d7 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -62,8 +62,6 @@ class ProfileTestCase(unittest.TestCase): self.ratelimiter = hs.get_ratelimiter() self.ratelimiter.send_message.return_value = (True, 0) - hs.handlers = ProfileHandlers(hs) - self.store = hs.get_datastore() self.frank = UserID.from_string("@1234ABCD:test") @@ -72,7 +70,7 @@ class ProfileTestCase(unittest.TestCase): yield self.store.create_profile(self.frank.localpart) - self.handler = hs.get_handlers().profile_handler + self.handler = hs.get_profile_handler() @defer.inlineCallbacks def test_get_my_name(self): diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index c8cf9a63ec..e990e45220 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -40,13 +40,14 @@ class RegistrationTestCase(unittest.TestCase): self.hs = yield setup_test_homeserver( handlers=None, http_client=None, - expire_access_token=True) + expire_access_token=True, + profile_handler=Mock(), + ) self.macaroon_generator = Mock( generate_access_token=Mock(return_value='secret')) self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator) self.hs.handlers = RegistrationHandlers(self.hs) self.handler = self.hs.get_handlers().registration_handler - self.hs.get_handlers().profile_handler = Mock() @defer.inlineCallbacks def test_user_is_created_and_logged_in_if_doesnt_exist(self): diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 1e95e97538..dddcf51b69 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -46,6 +46,7 @@ class ProfileTestCase(unittest.TestCase): resource_for_client=self.mock_resource, federation=Mock(), replication_layer=Mock(), + profile_handler=self.mock_handler ) def _get_user_by_req(request=None, allow_guest=False): @@ -53,8 +54,6 @@ class ProfileTestCase(unittest.TestCase): hs.get_v1auth().get_user_by_req = _get_user_by_req - hs.get_handlers().profile_handler = self.mock_handler - profile.register_servlets(hs, self.mock_resource) @defer.inlineCallbacks From 258409ef6156782d0c15cd5d1c1620b5734f379c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 25 Aug 2017 14:45:20 +0100 Subject: [PATCH 0106/1637] Fix typos and reinherit --- synapse/handlers/profile.py | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 5e34501c7a..c3cee38a43 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -25,18 +25,12 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) -class ProfileHandler(object): +class ProfileHandler(BaseHandler): PROFILE_UPDATE_MS = 60 * 1000 PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 def __init__(self, hs): - self.hs = hs - self.store = hs.get_datastore() - self.clock = hs.get_clock() - self.ratelimiter = hs.get_ratelimiter() - - # AWFUL hack to get at BaseHandler.ratelimit - self.base_handler = BaseHandler(hs) + super(ProfileHandler, self).__init__(hs) self.federation = hs.get_replication_layer() self.federation.register_query_handler( @@ -197,7 +191,7 @@ class ProfileHandler(object): if not self.hs.is_mine(user): return - yield self.base_handler.ratelimit(requester) + yield self.ratelimit(requester) room_ids = yield self.store.get_rooms_for_user( user.to_string(), @@ -225,7 +219,7 @@ class ProfileHandler(object): ) def _update_remote_profile_cache(self): - """Called periodically to check profiles of remote users we havent' + """Called periodically to check profiles of remote users we haven't checked in a while. """ entries = yield self.store.get_remote_profile_cache_entries_that_expire( @@ -233,10 +227,10 @@ class ProfileHandler(object): ) for user_id, displayname, avatar_url in entries: - is_subcscribed = yield self.store.is_subscribed_remote_profile_for_user( + is_subscribed = yield self.store.is_subscribed_remote_profile_for_user( user_id, ) - if not is_subcscribed: + if not is_subscribed: yield self.store.maybe_delete_remote_profile_cache(user_id) continue From 4a9b1cf25300eedf66aaefcb36e23f5fadf2b57a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 25 Aug 2017 16:23:58 +0100 Subject: [PATCH 0107/1637] Add user profiles to summary from group server --- synapse/groups/groups_server.py | 7 ++++++- synapse/handlers/profile.py | 23 +++++++++++++++++++++++ synapse/storage/profile.py | 2 +- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 6bccae4bfb..94cf9788bb 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -45,6 +45,7 @@ class GroupsServerHandler(object): self.server_name = hs.hostname self.attestations = hs.get_groups_attestation_signing() self.transport_client = hs.get_federation_transport_client() + self.profile_handler = hs.get_profile_handler() # Ensure attestations get renewed hs.get_groups_attestation_renewer() @@ -128,6 +129,9 @@ class GroupsServerHandler(object): group_id, user_id, ) + user_profile = yield self.profile_handler.get_profile_from_cache(user_id) + entry.update(user_profile) + users.sort(key=lambda e: e.get("order", 0)) membership_info = yield self.store.get_users_membership_info_in_group( @@ -387,7 +391,8 @@ class GroupsServerHandler(object): entry = {"user_id": g_user_id} - # TODO: Get profile information + profile = yield self.profile_handler.get_profile_from_cache(g_user_id) + entry.update(profile) if not is_public: entry["is_public"] = False diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index c3cee38a43..e56e0a52bf 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -71,6 +71,29 @@ class ProfileHandler(BaseHandler): raise + @defer.inlineCallbacks + def get_profile_from_cache(self, user_id): + """Get the profile information from our local cache. If the user is + ours then the profile information will always be corect. Otherwise, + it may be out of date/missing. + """ + target_user = UserID.from_string(user_id) + if self.hs.is_mine(target_user): + displayname = yield self.store.get_profile_displayname( + target_user.localpart + ) + avatar_url = yield self.store.get_profile_avatar_url( + target_user.localpart + ) + + defer.returnValue({ + "displayname": displayname, + "avatar_url": avatar_url, + }) + else: + profile = yield self.store.get_from_remote_profile_cache(user_id) + defer.returnValue(profile or {}) + @defer.inlineCallbacks def get_displayname(self, target_user): if self.hs.is_mine(target_user): diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index dca6af8a77..beea3102fc 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -62,7 +62,7 @@ class ProfileStore(SQLBaseStore): return self._simple_select_one( table="remote_profile_cache", keyvalues={"user_id": user_id}, - retcols=("displayname", "avatar_url", "last_check"), + retcols=("displayname", "avatar_url",), allow_none=True, desc="get_from_remote_profile_cache", ) From 6e67aaa7f249b196aa0288d713c8265c957cfbd5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 1 Sep 2017 10:06:21 +0100 Subject: [PATCH 0108/1637] Set --python when running sytest .. because I want to make the 'install_and_run' script useful for non-synapse jobs, which do not accept --python. In any case we set up the path here, so sytest shouldn't be guessing it. --- jenkins-dendron-haproxy-postgres.sh | 1 + jenkins-dendron-postgres.sh | 1 + jenkins-postgres.sh | 1 + jenkins-sqlite.sh | 1 + 4 files changed, 4 insertions(+) diff --git a/jenkins-dendron-haproxy-postgres.sh b/jenkins-dendron-haproxy-postgres.sh index d64b2d2c9d..2f6544e22c 100755 --- a/jenkins-dendron-haproxy-postgres.sh +++ b/jenkins-dendron-haproxy-postgres.sh @@ -17,6 +17,7 @@ export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy ./sytest/jenkins/prep_sytest_for_postgres.sh ./sytest/jenkins/install_and_run.sh \ + --python $WORKSPACE/.tox/bin/python \ --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ --haproxy \ diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index 37ae746f4b..bec6a72152 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -15,5 +15,6 @@ export SYNAPSE_CACHE_FACTOR=1 ./sytest/jenkins/prep_sytest_for_postgres.sh ./sytest/jenkins/install_and_run.sh \ + --python $WORKSPACE/.tox/bin/python \ --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index f2ca8ccdff..8b38d7418d 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -14,4 +14,5 @@ export SYNAPSE_CACHE_FACTOR=1 ./sytest/jenkins/prep_sytest_for_postgres.sh ./sytest/jenkins/install_and_run.sh \ + --python $WORKSPACE/.tox/bin/python \ --synapse-directory $WORKSPACE \ diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index 84613d979c..d20c6da645 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -12,4 +12,5 @@ export SYNAPSE_CACHE_FACTOR=1 ./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git ./sytest/jenkins/install_and_run.sh \ + --python $WORKSPACE/.tox/bin/python \ --synapse-directory $WORKSPACE \ From f06ffdb6fa209b34dbd6367d3632266ba1f9f6a7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 1 Sep 2017 10:31:45 +0100 Subject: [PATCH 0109/1637] fix python path in jenkins scripts --- jenkins-dendron-haproxy-postgres.sh | 2 +- jenkins-dendron-postgres.sh | 2 +- jenkins-postgres.sh | 2 +- jenkins-sqlite.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/jenkins-dendron-haproxy-postgres.sh b/jenkins-dendron-haproxy-postgres.sh index 2f6544e22c..07979bf8b8 100755 --- a/jenkins-dendron-haproxy-postgres.sh +++ b/jenkins-dendron-haproxy-postgres.sh @@ -17,7 +17,7 @@ export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy ./sytest/jenkins/prep_sytest_for_postgres.sh ./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/bin/python \ + --python $WORKSPACE/.tox/py27/bin/python \ --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ --haproxy \ diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index bec6a72152..3b932fe340 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -15,6 +15,6 @@ export SYNAPSE_CACHE_FACTOR=1 ./sytest/jenkins/prep_sytest_for_postgres.sh ./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/bin/python \ + --python $WORKSPACE/.tox/py27/bin/python \ --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index 8b38d7418d..1afb736394 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -14,5 +14,5 @@ export SYNAPSE_CACHE_FACTOR=1 ./sytest/jenkins/prep_sytest_for_postgres.sh ./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/bin/python \ + --python $WORKSPACE/.tox/py27/bin/python \ --synapse-directory $WORKSPACE \ diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index d20c6da645..baf4713a01 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -12,5 +12,5 @@ export SYNAPSE_CACHE_FACTOR=1 ./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git ./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/bin/python \ + --python $WORKSPACE/.tox/py27/bin/python \ --synapse-directory $WORKSPACE \ From dff396de0f91e6c1647b865fdb310228aefca2cb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 1 Sep 2017 10:06:21 +0100 Subject: [PATCH 0110/1637] Set --python when running sytest .. because I want to make the 'install_and_run' script useful for non-synapse jobs, which do not accept --python. In any case we set up the path here, so sytest shouldn't be guessing it. --- jenkins-dendron-haproxy-postgres.sh | 1 + jenkins-dendron-postgres.sh | 1 + jenkins-postgres.sh | 1 + jenkins-sqlite.sh | 1 + 4 files changed, 4 insertions(+) diff --git a/jenkins-dendron-haproxy-postgres.sh b/jenkins-dendron-haproxy-postgres.sh index d64b2d2c9d..07979bf8b8 100755 --- a/jenkins-dendron-haproxy-postgres.sh +++ b/jenkins-dendron-haproxy-postgres.sh @@ -17,6 +17,7 @@ export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy ./sytest/jenkins/prep_sytest_for_postgres.sh ./sytest/jenkins/install_and_run.sh \ + --python $WORKSPACE/.tox/py27/bin/python \ --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ --haproxy \ diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index 37ae746f4b..3b932fe340 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -15,5 +15,6 @@ export SYNAPSE_CACHE_FACTOR=1 ./sytest/jenkins/prep_sytest_for_postgres.sh ./sytest/jenkins/install_and_run.sh \ + --python $WORKSPACE/.tox/py27/bin/python \ --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index f2ca8ccdff..1afb736394 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -14,4 +14,5 @@ export SYNAPSE_CACHE_FACTOR=1 ./sytest/jenkins/prep_sytest_for_postgres.sh ./sytest/jenkins/install_and_run.sh \ + --python $WORKSPACE/.tox/py27/bin/python \ --synapse-directory $WORKSPACE \ diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index 84613d979c..baf4713a01 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -12,4 +12,5 @@ export SYNAPSE_CACHE_FACTOR=1 ./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git ./sytest/jenkins/install_and_run.sh \ + --python $WORKSPACE/.tox/py27/bin/python \ --synapse-directory $WORKSPACE \ From 8b16b43b7f9b303dea15258285a26f266756f3d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Paul=20T=C3=B6tterman?= Date: Fri, 1 Sep 2017 16:52:45 +0300 Subject: [PATCH 0111/1637] Document known to work postgres version --- docs/postgres.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/postgres.rst b/docs/postgres.rst index b592801e93..904942ec74 100644 --- a/docs/postgres.rst +++ b/docs/postgres.rst @@ -1,6 +1,8 @@ Using Postgres -------------- +Postgres version 9.4 or later is known to work. + Set up database =============== From 4dd61df6f8d8d622b1327e2ce678d26e9c6911b0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 5 Sep 2017 16:35:23 +0100 Subject: [PATCH 0112/1637] do tox install with pip -e - this ensures we end up with a working virtualenv which we can use for other things. --- tox.ini | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/tox.ini b/tox.ini index 39ad305360..f408defc8f 100644 --- a/tox.ini +++ b/tox.ini @@ -14,14 +14,38 @@ deps = setenv = PYTHONDONTWRITEBYTECODE = no_byte_code - # As of twisted 16.4, trial tries to import the tests as a package, which - # means it needs to be on the pythonpath. - PYTHONPATH = {toxinidir} + commands = - /bin/sh -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \ - {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}" + /usr/bin/find "{toxinidir}" -name '*.pyc' -delete + coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \ + "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} {env:DUMP_COVERAGE_COMMAND:coverage report -m} +[testenv:py27] + +# As of twisted 16.4, trial tries to import the tests as a package (previously +# it loaded the files explicitly), which means they need to be on the +# pythonpath. Our sdist doesn't include the 'tests' package, so normally it +# doesn't work within the tox virtualenv. +# +# As a workaround, we tell tox to do install with 'pip -e', which just +# creates a symlink to the project directory instead of unpacking the sdist. +# +# (An alternative to this would be to set PYTHONPATH to include the project +# directory. Note two problems with this: +# +# - if you set it via `setenv`, then it is also set during the 'install' +# phase, which inhibits unpacking the sdist, so the virtualenv isn't +# useful for anything else without setting PYTHONPATH similarly. +# +# - `synapse` is also loaded from PYTHONPATH so even if you only set +# PYTHONPATH for the test phase, we're still running the tests against +# the working copy rather than the contents of the sdist. So frankly +# you might as well use -e in the first place. +# +# ) +usedevelop=true + [testenv:packaging] deps = check-manifest From 59de2c7afa00edb4fedc0aae39a5a87c9f3464f1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 5 Sep 2017 21:57:19 +0100 Subject: [PATCH 0113/1637] Exclude the github issue template from our sdist (#2440) PR #2413 added an issue template, but just adding files to the project directory upsets the packaging scripts: we need to explicitly include or exclude them. Move the template into a .github directory to make that easy, and to de-clutter the root a bit. --- ISSUE_TEMPLATE.md => .github/ISSUE_TEMPLATE.md | 0 MANIFEST.in | 1 + 2 files changed, 1 insertion(+) rename ISSUE_TEMPLATE.md => .github/ISSUE_TEMPLATE.md (100%) diff --git a/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md similarity index 100% rename from ISSUE_TEMPLATE.md rename to .github/ISSUE_TEMPLATE.md diff --git a/MANIFEST.in b/MANIFEST.in index 981698143f..afb60e12ee 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -27,4 +27,5 @@ exclude jenkins*.sh exclude jenkins* recursive-exclude jenkins *.sh +prune .github prune demo/etc From 53cc8ad35a269723478a1ee1a9a96d510a7b044f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Sep 2017 15:08:39 +0100 Subject: [PATCH 0114/1637] Send down device list change notif when member leaves/rejoins room --- synapse/handlers/device.py | 2 +- synapse/handlers/sync.py | 64 ++++++++++++++++++++++------ synapse/rest/client/v2_alpha/sync.py | 3 +- 3 files changed, 55 insertions(+), 14 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index ed60d494ff..be120b2f34 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -320,7 +320,7 @@ class DeviceHandler(BaseHandler): # check if this member has changed since any of the extremities # at the stream_ordering, and add them to the list if so. - for state_dict in prev_state_ids.values(): + for state_dict in prev_state_ids.itervalues(): prev_event_id = state_dict.get(key, None) if not prev_event_id or prev_event_id != event_id: possibly_changed.add(state_key) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index e6df1819b9..4ee6109cf8 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -108,6 +108,16 @@ class InvitedSyncResult(collections.namedtuple("InvitedSyncResult", [ return True +class DeviceLists(collections.namedtuple("DeviceLists", [ + "changed", # list of user_ids whose devices may have changed + "left", # list of user_ids whose devices we no longer track +])): + __slots__ = [] + + def __nonzero__(self): + return bool(self.changed or self.left) + + class SyncResult(collections.namedtuple("SyncResult", [ "next_batch", # Token for the next sync "presence", # List of presence events for the user. @@ -535,7 +545,7 @@ class SyncHandler(object): res = yield self._generate_sync_entry_for_rooms( sync_result_builder, account_data_by_room ) - newly_joined_rooms, newly_joined_users = res + newly_joined_rooms, newly_joined_users, _, newly_left_users = res block_all_presence_data = ( since_token is None and @@ -549,7 +559,11 @@ class SyncHandler(object): yield self._generate_sync_entry_for_to_device(sync_result_builder) device_lists = yield self._generate_sync_entry_for_device_list( - sync_result_builder + sync_result_builder, + newly_joined_rooms=newly_joined_rooms, + newly_joined_users=newly_joined_users, + newly_left_rooms=[], + newly_left_users=newly_left_users, ) device_id = sync_config.device_id @@ -574,7 +588,9 @@ class SyncHandler(object): @measure_func("_generate_sync_entry_for_device_list") @defer.inlineCallbacks - def _generate_sync_entry_for_device_list(self, sync_result_builder): + def _generate_sync_entry_for_device_list(self, sync_result_builder, + newly_joined_rooms, newly_joined_users, + newly_left_rooms, newly_left_users): user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token @@ -582,16 +598,32 @@ class SyncHandler(object): changed = yield self.store.get_user_whose_devices_changed( since_token.device_list_key ) - if not changed: - defer.returnValue([]) + + # TODO: Check that these users are actually new, i.e. either they + # weren't in the previous sync *or* they left and rejoined. + changed.update(newly_joined_users) + + # TODO: Add the members from newly_*_rooms + + if not changed and not newly_left_users: + defer.returnValue(DeviceLists( + changed=[], + left=newly_left_users, + )) users_who_share_room = yield self.store.get_users_who_share_room_with_user( user_id ) - defer.returnValue(users_who_share_room & changed) + defer.returnValue(DeviceLists( + changed=users_who_share_room & changed, + left=set(newly_left_users) - users_who_share_room, + )) else: - defer.returnValue([]) + defer.returnValue(DeviceLists( + changed=[], + left=[], + )) @defer.inlineCallbacks def _generate_sync_entry_for_to_device(self, sync_result_builder): @@ -755,8 +787,8 @@ class SyncHandler(object): account_data_by_room(dict): Dictionary of per room account data Returns: - Deferred(tuple): Returns a 2-tuple of - `(newly_joined_rooms, newly_joined_users)` + Deferred(tuple): Returns a 4-tuple of + `(newly_joined_rooms, newly_joined_users, newly_left_rooms, newly_left_users)` """ user_id = sync_result_builder.sync_config.user.to_string() block_all_room_ephemeral = ( @@ -787,7 +819,7 @@ class SyncHandler(object): ) if not tags_by_room: logger.debug("no-oping sync") - defer.returnValue(([], [])) + defer.returnValue(([], [], [], [])) ignored_account_data = yield self.store.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id=user_id, @@ -828,17 +860,24 @@ class SyncHandler(object): # Now we want to get any newly joined users newly_joined_users = set() + newly_left_users = set() if since_token: for joined_sync in sync_result_builder.joined: it = itertools.chain( - joined_sync.timeline.events, joined_sync.state.values() + joined_sync.timeline.events, joined_sync.state.itervalues() ) for event in it: if event.type == EventTypes.Member: if event.membership == Membership.JOIN: newly_joined_users.add(event.state_key) + else: + prev_content = event.unsigned.get("prev_content", {}) + prev_membership = prev_content.get("membership", None) + if prev_membership == Membership.JOIN: + newly_left_users.add(event.state_key) - defer.returnValue((newly_joined_rooms, newly_joined_users)) + newly_left_users -= newly_joined_users + defer.returnValue((newly_joined_rooms, newly_joined_users, [], newly_left_users)) @defer.inlineCallbacks def _have_rooms_changed(self, sync_result_builder): @@ -1259,6 +1298,7 @@ class SyncResultBuilder(object): self.invited = [] self.archived = [] self.device = [] + self.to_device = [] class RoomSyncResultBuilder(object): diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 2939896f44..978af9c280 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -189,7 +189,8 @@ class SyncRestServlet(RestServlet): "account_data": {"events": sync_result.account_data}, "to_device": {"events": sync_result.to_device}, "device_lists": { - "changed": list(sync_result.device_lists), + "changed": list(sync_result.device_lists.changed), + "left": list(sync_result.device_lists.left), }, "presence": SyncRestServlet.encode_presence( sync_result.presence, time_now From 69ef4987a68d66093007ca11886e25139ea0c970 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 Sep 2017 14:44:36 +0100 Subject: [PATCH 0115/1637] Add left section to /keys/changes --- synapse/handlers/device.py | 22 ++++++++++++++++------ synapse/handlers/sync.py | 2 +- synapse/rest/client/v2_alpha/keys.py | 6 ++---- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index be120b2f34..ef8753b1ff 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -326,13 +326,23 @@ class DeviceHandler(BaseHandler): possibly_changed.add(state_key) break - users_who_share_room = yield self.store.get_users_who_share_room_with_user( - user_id - ) + if possibly_changed: + users_who_share_room = yield self.store.get_users_who_share_room_with_user( + user_id + ) - # Take the intersection of the users whose devices may have changed - # and those that actually still share a room with the user - defer.returnValue(users_who_share_room & possibly_changed) + # Take the intersection of the users whose devices may have changed + # and those that actually still share a room with the user + possibly_joined = possibly_changed & users_who_share_room + possibly_left = possibly_changed - users_who_share_room + else: + possibly_joined = [] + possibly_left = [] + + defer.returnValue({ + "changed": list(possibly_joined), + "left": list(possibly_left), + }) @defer.inlineCallbacks def on_federation_query_user_devices(self, user_id): diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 4ee6109cf8..9ae7fbc797 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -949,7 +949,7 @@ class SyncHandler(object): newly_joined_rooms = [] room_entries = [] invited = [] - for room_id, events in mem_change_events_by_room_id.items(): + for room_id, events in mem_change_events_by_room_id.iteritems(): non_joins = [e for e in events if e.membership != Membership.JOIN] has_join = len(non_joins) != len(events) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 6a3cfe84f8..943e87e7fd 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -188,13 +188,11 @@ class KeyChangesServlet(RestServlet): user_id = requester.user.to_string() - changed = yield self.device_handler.get_user_ids_changed( + results = yield self.device_handler.get_user_ids_changed( user_id, from_token, ) - defer.returnValue((200, { - "changed": list(changed), - })) + defer.returnValue((200, results)) class OneTimeKeyServlet(RestServlet): From 9ce866ed4f68450d8a2eab84be759c0056b6b992 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 Sep 2017 16:44:26 +0100 Subject: [PATCH 0116/1637] In sync handle device lists for newly joined/left rooms --- synapse/handlers/sync.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9ae7fbc797..d1ba75dbda 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -599,12 +599,20 @@ class SyncHandler(object): since_token.device_list_key ) + # TODO: Be more clever than this, i.e. remove users who we already + # share a room with? + for room_id in newly_joined_rooms: + joined_users = yield self.state.get_current_user_in_room(room_id) + newly_joined_users.update(joined_users) + + for room_id in newly_left_rooms: + left_users = yield self.state.get_current_user_in_room(room_id) + newly_left_users.update(left_users) + # TODO: Check that these users are actually new, i.e. either they # weren't in the previous sync *or* they left and rejoined. changed.update(newly_joined_users) - # TODO: Add the members from newly_*_rooms - if not changed and not newly_left_users: defer.returnValue(DeviceLists( changed=[], From 473700f0162482e7bb57cad922de99ff29b9b216 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Sep 2017 15:13:41 +0100 Subject: [PATCH 0117/1637] Get left rooms --- synapse/handlers/sync.py | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index d1ba75dbda..9aae4c344b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -840,7 +840,7 @@ class SyncHandler(object): if since_token: res = yield self._get_rooms_changed(sync_result_builder, ignored_users) - room_entries, invited, newly_joined_rooms = res + room_entries, invited, newly_joined_rooms, newly_left_rooms = res tags_by_room = yield self.store.get_updated_tags( user_id, since_token.account_data_key, @@ -848,6 +848,7 @@ class SyncHandler(object): else: res = yield self._get_all_rooms(sync_result_builder, ignored_users) room_entries, invited, newly_joined_rooms = res + newly_left_rooms = [] tags_by_room = yield self.store.get_tags_for_user(user_id) @@ -885,7 +886,13 @@ class SyncHandler(object): newly_left_users.add(event.state_key) newly_left_users -= newly_joined_users - defer.returnValue((newly_joined_rooms, newly_joined_users, [], newly_left_users)) + + defer.returnValue(( + newly_joined_rooms, + newly_joined_users, + newly_left_rooms, + newly_left_users, + )) @defer.inlineCallbacks def _have_rooms_changed(self, sync_result_builder): @@ -955,6 +962,7 @@ class SyncHandler(object): mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) newly_joined_rooms = [] + newly_left_rooms = [] room_entries = [] invited = [] for room_id, events in mem_change_events_by_room_id.iteritems(): @@ -964,6 +972,7 @@ class SyncHandler(object): # We want to figure out if we joined the room at some point since # the last sync (even if we have since left). This is to make sure # we do send down the room, and with full state, where necessary + old_state_ids = None if room_id in joined_room_ids or has_join: old_state_ids = yield self.get_state_at(room_id, since_token) old_mem_ev_id = old_state_ids.get((EventTypes.Member, user_id), None) @@ -981,6 +990,26 @@ class SyncHandler(object): if not non_joins: continue + # Check if we have left the room. This can either be because we were + # joined before *or* that we since joined and then left. + if events[-1].membership != Membership.JOIN: + if has_join: + newly_left_rooms.append(room_id) + else: + if not old_state_ids: + old_state_ids = yield self.get_state_at(room_id, since_token) + old_mem_ev_id = old_state_ids.get( + (EventTypes.Member, user_id), + None, + ) + old_mem_ev = None + if old_mem_ev_id: + old_mem_ev = yield self.store.get_event( + old_mem_ev_id, allow_none=True + ) + if old_mem_ev and old_mem_ev.membership == Membership.JOIN: + newly_left_rooms.append(room_id) + # Only bother if we're still currently invited should_invite = non_joins[-1].membership == Membership.INVITE if should_invite: @@ -1058,7 +1087,7 @@ class SyncHandler(object): upto_token=since_token, )) - defer.returnValue((room_entries, invited, newly_joined_rooms)) + defer.returnValue((room_entries, invited, newly_joined_rooms, newly_left_rooms)) @defer.inlineCallbacks def _get_all_rooms(self, sync_result_builder, ignored_users): From 4f845a07137049b9487ebd16e21637b74c774a79 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Sep 2017 16:28:08 +0100 Subject: [PATCH 0118/1637] Handle joining/leaving rooms in /keys/changes --- synapse/handlers/device.py | 39 +++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index ef8753b1ff..ac9868d810 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -270,6 +270,8 @@ class DeviceHandler(BaseHandler): user_id (str) from_token (StreamToken) """ + now_token = yield self.hs.get_event_sources().get_current_token() + room_ids = yield self.store.get_rooms_for_user(user_id) # First we check if any devices have changed @@ -280,11 +282,24 @@ class DeviceHandler(BaseHandler): # Then work out if any users have since joined rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key) + member_events = yield self.store.get_membership_changes_for_user( + user_id, from_token.room_key, now_token.room_key + ) + rooms_changed.update(event.room_id for event in member_events) + stream_ordering = RoomStreamToken.parse_stream_token( - from_token.room_key).stream + from_token.room_key + ).stream possibly_changed = set(changed) + possibly_left_rooms = set() for room_id in rooms_changed: + # The user may have left the room + # TODO: Check if they actually did or if we were just invited. + if room_id not in room_ids: + possibly_left_rooms.add(room_id) + continue + # Fetch the current state at the time. try: event_ids = yield self.store.get_forward_extremeties_for_room( @@ -307,9 +322,25 @@ class DeviceHandler(BaseHandler): possibly_changed.add(state_key) continue + current_member_id = current_state_ids.get((EventTypes.Member, user_id)) + if not current_member_id: + continue + # mapping from event_id -> state_dict prev_state_ids = yield self.store.get_state_ids_for_events(event_ids) + # Check if we've joined the room? If so we just blindly add all the users to + # the "possibly changed" users. + for state_dict in prev_state_ids.itervalues(): + member_event = state_dict.get((EventTypes.Member, user_id), None) + if not member_event or member_event != current_member_id: + for key, event_id in current_state_ids.iteritems(): + etype, state_key = key + if etype != EventTypes.Member: + continue + possibly_changed.append(state_key) + break + # If there has been any change in membership, include them in the # possibly changed list. We'll check if they are joined below, # and we're not toooo worried about spuriously adding users. @@ -324,6 +355,12 @@ class DeviceHandler(BaseHandler): prev_event_id = state_dict.get(key, None) if not prev_event_id or prev_event_id != event_id: possibly_changed.add(state_key) + if state_key == user_id: + for key, event_id in current_state_ids.iteritems(): + etype, state_key = key + if etype != EventTypes.Member: + continue + possibly_changed.add(room_id) break if possibly_changed: From 3a0cee28d6457b812123f6bad6deee476bef4984 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Sep 2017 11:49:37 +0100 Subject: [PATCH 0119/1637] Actually hook leave notifs up --- synapse/handlers/sync.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9aae4c344b..c6b04a1683 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -545,7 +545,8 @@ class SyncHandler(object): res = yield self._generate_sync_entry_for_rooms( sync_result_builder, account_data_by_room ) - newly_joined_rooms, newly_joined_users, _, newly_left_users = res + newly_joined_rooms, newly_joined_users, _, _ = res + _, _, newly_left_rooms, newly_left_users = res block_all_presence_data = ( since_token is None and @@ -562,7 +563,7 @@ class SyncHandler(object): sync_result_builder, newly_joined_rooms=newly_joined_rooms, newly_joined_users=newly_joined_users, - newly_left_rooms=[], + newly_left_rooms=newly_left_rooms, newly_left_users=newly_left_users, ) From 4a94eb3ea40a3c1bee5916d57f5c72bb75c28cf3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Sep 2017 09:56:54 +0100 Subject: [PATCH 0120/1637] Fix typo --- synapse/handlers/device.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index ac9868d810..0d6750f0e4 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -338,7 +338,7 @@ class DeviceHandler(BaseHandler): etype, state_key = key if etype != EventTypes.Member: continue - possibly_changed.append(state_key) + possibly_changed.add(state_key) break # If there has been any change in membership, include them in the From d6dadd95acdc5e4899b2b781bb0b0c42724bd10d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 18 Sep 2017 15:38:22 +0100 Subject: [PATCH 0121/1637] Correctly handle leaving room in /key/changes --- synapse/handlers/device.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 0d6750f0e4..dac4b3f4e0 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -292,12 +292,18 @@ class DeviceHandler(BaseHandler): ).stream possibly_changed = set(changed) - possibly_left_rooms = set() + possibly_left = set() for room_id in rooms_changed: + current_state_ids = yield self.store.get_current_state_ids(room_id) + # The user may have left the room # TODO: Check if they actually did or if we were just invited. if room_id not in room_ids: - possibly_left_rooms.add(room_id) + for key, event_id in current_state_ids.iteritems(): + etype, state_key = key + if etype != EventTypes.Member: + continue + possibly_left.add(state_key) continue # Fetch the current state at the time. @@ -310,8 +316,6 @@ class DeviceHandler(BaseHandler): # ordering: treat it the same as a new room event_ids = [] - current_state_ids = yield self.store.get_current_state_ids(room_id) - # special-case for an empty prev state: include all members # in the changed list if not event_ids: @@ -354,16 +358,11 @@ class DeviceHandler(BaseHandler): for state_dict in prev_state_ids.itervalues(): prev_event_id = state_dict.get(key, None) if not prev_event_id or prev_event_id != event_id: - possibly_changed.add(state_key) - if state_key == user_id: - for key, event_id in current_state_ids.iteritems(): - etype, state_key = key - if etype != EventTypes.Member: - continue - possibly_changed.add(room_id) + if state_key != user_id: + possibly_changed.add(state_key) break - if possibly_changed: + if possibly_changed or possibly_left: users_who_share_room = yield self.store.get_users_who_share_room_with_user( user_id ) @@ -371,7 +370,7 @@ class DeviceHandler(BaseHandler): # Take the intersection of the users whose devices may have changed # and those that actually still share a room with the user possibly_joined = possibly_changed & users_who_share_room - possibly_left = possibly_changed - users_who_share_room + possibly_left = (possibly_changed | possibly_left) - users_who_share_room else: possibly_joined = [] possibly_left = [] From a2562f9d749023b9564ccd36acf920eeb45178ff Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 18 Sep 2017 15:39:39 +0100 Subject: [PATCH 0122/1637] Add support for event_id_only push format Param in the data dict of a pusher that tells an HTTP pusher to send just the event_id of the event it's notifying about and the notification counts. For clients that want to go & fetch the body of the event themselves anyway. --- synapse/push/httppusher.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 8a5d473108..1b6510eea4 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -244,6 +244,25 @@ class HttpPusher(object): @defer.inlineCallbacks def _build_notification_dict(self, event, tweaks, badge): + if 'format' in self.data and self.data['format'] == 'event_id_only': + d = { + 'notification': { + 'event_id': event.event_id, + 'counts': { + 'unread': badge, + }, + 'devices': [ + { + 'app_id': self.app_id, + 'pushkey': self.pushkey, + 'pushkey_ts': long(self.pushkey_ts / 1000), + 'data': self.data_minus_url, + } + ] + } + } + defer.returnValue(d) + ctx = yield push_tools.get_context_for_event( self.store, self.state_handler, event, self.user_id ) From b393f5db51ab1e37f364a11bfbb0440063be4753 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 18 Sep 2017 15:50:26 +0100 Subject: [PATCH 0123/1637] Use .get - it's much shorter --- synapse/push/httppusher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 1b6510eea4..b4140e08a8 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -244,7 +244,7 @@ class HttpPusher(object): @defer.inlineCallbacks def _build_notification_dict(self, event, tweaks, badge): - if 'format' in self.data and self.data['format'] == 'event_id_only': + if self.data.get('format') == 'event_id_only': d = { 'notification': { 'event_id': event.event_id, From 436ee0a2ea9782d003c0ab8288c50c6d3f46bdb1 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 18 Sep 2017 15:58:38 +0100 Subject: [PATCH 0124/1637] Also include the room_id as really it's part of the event ID --- synapse/push/httppusher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index b4140e08a8..62c41cd9db 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -248,6 +248,7 @@ class HttpPusher(object): d = { 'notification': { 'event_id': event.event_id, + 'room_id': event.room_id, 'counts': { 'unread': badge, }, From 2d1b7955aec60a2a5dabc7882b4081b794968d7c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 18 Sep 2017 17:13:03 +0100 Subject: [PATCH 0125/1637] Don't filter out current state events from timeline --- synapse/handlers/sync.py | 7 +++++++ synapse/visibility.py | 14 +++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c6b04a1683..bb78c25ee5 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -293,6 +293,11 @@ class SyncHandler(object): timeline_limit = sync_config.filter_collection.timeline_limit() block_all_timeline = sync_config.filter_collection.blocks_all_room_timeline() + # Pull out the current state, as we always want to include those events + # in the timeline if they're there. + current_state_ids = yield self.state.get_current_state_ids(room_id) + current_state_ids = frozenset(current_state_ids.itervalues()) + if recents is None or newly_joined_room or timeline_limit < len(recents): limited = True else: @@ -304,6 +309,7 @@ class SyncHandler(object): self.store, sync_config.user.to_string(), recents, + always_include_ids=current_state_ids, ) else: recents = [] @@ -339,6 +345,7 @@ class SyncHandler(object): self.store, sync_config.user.to_string(), loaded_recents, + always_include_ids=current_state_ids, ) loaded_recents.extend(recents) recents = loaded_recents diff --git a/synapse/visibility.py b/synapse/visibility.py index 5590b866ed..d7dbdc77ff 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -43,7 +43,8 @@ MEMBERSHIP_PRIORITY = ( @defer.inlineCallbacks -def filter_events_for_clients(store, user_tuples, events, event_id_to_state): +def filter_events_for_clients(store, user_tuples, events, event_id_to_state, + always_include_ids=frozenset()): """ Returns dict of user_id -> list of events that user is allowed to see. @@ -54,6 +55,8 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state): * the user has not been a member of the room since the given events events ([synapse.events.EventBase]): list of events to filter + always_include_ids (set(event_id)): set of event ids to specifically + include (unless sender is ignored) """ forgotten = yield preserve_context_over_deferred(defer.gatherResults([ defer.maybeDeferred( @@ -91,6 +94,9 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state): if not event.is_state() and event.sender in ignore_list: return False + if event.event_id in always_include_ids: + return True + state = event_id_to_state[event.event_id] # get the room_visibility at the time of the event. @@ -189,7 +195,8 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state): @defer.inlineCallbacks -def filter_events_for_client(store, user_id, events, is_peeking=False): +def filter_events_for_client(store, user_id, events, is_peeking=False, + always_include_ids=frozenset()): """ Check which events a user is allowed to see @@ -213,6 +220,7 @@ def filter_events_for_client(store, user_id, events, is_peeking=False): types=types ) res = yield filter_events_for_clients( - store, [(user_id, is_peeking)], events, event_id_to_state + store, [(user_id, is_peeking)], events, event_id_to_state, + always_include_ids=always_include_ids, ) defer.returnValue(res.get(user_id, [])) From 290777b3d96df17292d40de240f7bd7b162fea4e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 18 Sep 2017 18:31:01 +0100 Subject: [PATCH 0126/1637] Clean up and document handling of logcontexts in Keyring (#2452) I'm still unclear on what the intended behaviour for `verify_json_objects_for_server` is, but at least I now understand the behaviour of most of the things it calls... --- synapse/crypto/keyring.py | 64 +++++++++++++++++-------------- tests/crypto/test_keyring.py | 74 ++++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+), 28 deletions(-) create mode 100644 tests/crypto/test_keyring.py diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 1bb27edc0f..51851d04e5 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2017 New Vector Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,10 +16,9 @@ from synapse.crypto.keyclient import fetch_server_key from synapse.api.errors import SynapseError, Codes -from synapse.util import unwrapFirstError -from synapse.util.async import ObservableDeferred +from synapse.util import unwrapFirstError, logcontext from synapse.util.logcontext import ( - preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext, + preserve_context_over_fn, PreserveLoggingContext, preserve_fn ) from synapse.util.metrics import Measure @@ -74,6 +74,11 @@ class Keyring(object): self.perspective_servers = self.config.perspectives self.hs = hs + # map from server name to Deferred. Has an entry for each server with + # an ongoing key download; the Deferred completes once the download + # completes. + # + # These are regular, logcontext-agnostic Deferreds. self.key_downloads = {} def verify_json_for_server(self, server_name, json_object): @@ -82,7 +87,7 @@ class Keyring(object): )[0] def verify_json_objects_for_server(self, server_and_json): - """Bulk verfies signatures of json objects, bulk fetching keys as + """Bulk verifies signatures of json objects, bulk fetching keys as necessary. Args: @@ -212,7 +217,13 @@ class Keyring(object): Args: server_names (list): list of server_names we want to lookup server_to_deferred (dict): server_name to deferred which gets - resolved once we've finished looking up keys for that server + resolved once we've finished looking up keys for that server. + The Deferreds should be regular twisted ones which call their + callbacks with no logcontext. + + Returns: a Deferred which resolves once all key lookups for the given + servers have completed. Follows the synapse rules of logcontext + preservation. """ while True: wait_on = [ @@ -226,15 +237,13 @@ class Keyring(object): else: break + def rm(r, server_name_): + self.key_downloads.pop(server_name_, None) + return r + for server_name, deferred in server_to_deferred.items(): - d = ObservableDeferred(preserve_context_over_deferred(deferred)) - self.key_downloads[server_name] = d - - def rm(r, server_name): - self.key_downloads.pop(server_name, None) - return r - - d.addBoth(rm, server_name) + self.key_downloads[server_name] = deferred + deferred.addBoth(rm, server_name) def get_server_verify_keys(self, verify_requests): """Tries to find at least one key for each verify request @@ -333,7 +342,7 @@ class Keyring(object): Deferred: resolves to dict[str, dict[str, VerifyKey]]: map from server_name -> key_id -> VerifyKey """ - res = yield preserve_context_over_deferred(defer.gatherResults( + res = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ preserve_fn(self.store.get_server_verify_keys)( server_name, key_ids @@ -341,7 +350,7 @@ class Keyring(object): for server_name, key_ids in server_name_and_key_ids ], consumeErrors=True, - )).addErrback(unwrapFirstError) + ).addErrback(unwrapFirstError)) defer.returnValue(dict(res)) @@ -362,13 +371,13 @@ class Keyring(object): ) defer.returnValue({}) - results = yield preserve_context_over_deferred(defer.gatherResults( + results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ preserve_fn(get_key)(p_name, p_keys) for p_name, p_keys in self.perspective_servers.items() ], consumeErrors=True, - )).addErrback(unwrapFirstError) + ).addErrback(unwrapFirstError)) union_of_keys = {} for result in results: @@ -402,13 +411,13 @@ class Keyring(object): defer.returnValue(keys) - results = yield preserve_context_over_deferred(defer.gatherResults( + results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ preserve_fn(get_key)(server_name, key_ids) for server_name, key_ids in server_name_and_key_ids ], consumeErrors=True, - )).addErrback(unwrapFirstError) + ).addErrback(unwrapFirstError)) merged = {} for result in results: @@ -485,7 +494,7 @@ class Keyring(object): for server_name, response_keys in processed_response.items(): keys.setdefault(server_name, {}).update(response_keys) - yield preserve_context_over_deferred(defer.gatherResults( + yield logcontext.make_deferred_yieldable(defer.gatherResults( [ preserve_fn(self.store_keys)( server_name=server_name, @@ -495,7 +504,7 @@ class Keyring(object): for server_name, response_keys in keys.items() ], consumeErrors=True - )).addErrback(unwrapFirstError) + ).addErrback(unwrapFirstError)) defer.returnValue(keys) @@ -543,7 +552,7 @@ class Keyring(object): keys.update(response_keys) - yield preserve_context_over_deferred(defer.gatherResults( + yield logcontext.make_deferred_yieldable(defer.gatherResults( [ preserve_fn(self.store_keys)( server_name=key_server_name, @@ -553,7 +562,7 @@ class Keyring(object): for key_server_name, verify_keys in keys.items() ], consumeErrors=True - )).addErrback(unwrapFirstError) + ).addErrback(unwrapFirstError)) defer.returnValue(keys) @@ -619,7 +628,7 @@ class Keyring(object): response_keys.update(verify_keys) response_keys.update(old_verify_keys) - yield preserve_context_over_deferred(defer.gatherResults( + yield logcontext.make_deferred_yieldable(defer.gatherResults( [ preserve_fn(self.store.store_server_keys_json)( server_name=server_name, @@ -632,7 +641,7 @@ class Keyring(object): for key_id in updated_key_ids ], consumeErrors=True, - )).addErrback(unwrapFirstError) + ).addErrback(unwrapFirstError)) results[server_name] = response_keys @@ -710,7 +719,6 @@ class Keyring(object): defer.returnValue(verify_keys) - @defer.inlineCallbacks def store_keys(self, server_name, from_server, verify_keys): """Store a collection of verify keys for a given server Args: @@ -721,7 +729,7 @@ class Keyring(object): A deferred that completes when the keys are stored. """ # TODO(markjh): Store whether the keys have expired. - yield preserve_context_over_deferred(defer.gatherResults( + return logcontext.make_deferred_yieldable(defer.gatherResults( [ preserve_fn(self.store.store_server_verify_key)( server_name, server_name, key.time_added, key @@ -729,4 +737,4 @@ class Keyring(object): for key_id, key in verify_keys.items() ], consumeErrors=True, - )).addErrback(unwrapFirstError) + ).addErrback(unwrapFirstError)) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py new file mode 100644 index 0000000000..da2c9e44e7 --- /dev/null +++ b/tests/crypto/test_keyring.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.crypto import keyring +from synapse.util.logcontext import LoggingContext +from tests import utils, unittest +from twisted.internet import defer + + +class KeyringTestCase(unittest.TestCase): + @defer.inlineCallbacks + def setUp(self): + self.hs = yield utils.setup_test_homeserver(handlers=None) + + @defer.inlineCallbacks + def test_wait_for_previous_lookups(self): + sentinel_context = LoggingContext.current_context() + + kr = keyring.Keyring(self.hs) + + def check_context(_, expected): + self.assertEquals( + LoggingContext.current_context().test_key, expected + ) + + lookup_1_deferred = defer.Deferred() + lookup_2_deferred = defer.Deferred() + + with LoggingContext("one") as context_one: + context_one.test_key = "one" + + wait_1_deferred = kr.wait_for_previous_lookups( + ["server1"], + {"server1": lookup_1_deferred}, + ) + + # there were no previous lookups, so the deferred should be ready + self.assertTrue(wait_1_deferred.called) + # ... so we should have preserved the LoggingContext. + self.assertIs(LoggingContext.current_context(), context_one) + wait_1_deferred.addBoth(check_context, "one") + + with LoggingContext("two") as context_two: + context_two.test_key = "two" + + # set off another wait. It should block because the first lookup + # hasn't yet completed. + wait_2_deferred = kr.wait_for_previous_lookups( + ["server1"], + {"server1": lookup_2_deferred}, + ) + self.assertFalse(wait_2_deferred.called) + # ... so we should have reset the LoggingContext. + self.assertIs(LoggingContext.current_context(), sentinel_context) + wait_2_deferred.addBoth(check_context, "two") + + # let the first lookup complete (in the sentinel context) + lookup_1_deferred.callback(None) + + # now the second wait should complete and restore our + # loggingcontext. + yield wait_2_deferred From 3f405b34e9976df2f93b9ef75ae00c634976e3a3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Sep 2017 08:52:52 +0100 Subject: [PATCH 0127/1637] Fix overzealous kicking of guest users (#2453) We should only kick guest users if the guest access event is authorised. --- synapse/handlers/federation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b790a7c2ef..4669199b2d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1606,7 +1606,7 @@ class FederationHandler(BaseHandler): context.rejected = RejectedReason.AUTH_ERROR - if event.type == EventTypes.GuestAccess: + if event.type == EventTypes.GuestAccess and not context.rejected: yield self.maybe_kick_guest_users(event) defer.returnValue(context) From 93e504d04e5e92ec1d54b3c7c860adc2cfb0e9f4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 19 Sep 2017 10:35:35 +0100 Subject: [PATCH 0128/1637] Ensure that creator of group sees group down /sync --- synapse/handlers/groups_local.py | 34 ++++++++++++++++++++++++++++---- synapse/handlers/sync.py | 1 + 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 1950c12f1f..b4833f8ef8 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -136,12 +136,38 @@ class GroupsLocalHandler(object): res = yield self.groups_server_handler.create_group( group_id, user_id, content ) - defer.returnValue(res) + local_attestation = None + remote_attestation = None + else: + local_attestation = self.attestations.create_attestation(group_id, user_id) + content["attestation"] = local_attestation - content["user_profile"] = yield self.profile_handler.get_profile(user_id) - res = yield self.transport_client.create_group( - get_domain_from_id(group_id), group_id, user_id, content, + content["user_profile"] = yield self.profile_handler.get_profile(user_id) + + res = yield self.transport_client.create_group( + get_domain_from_id(group_id), group_id, user_id, content, + ) + + remote_attestation = res["attestation"] + yield self.attestations.verify_attestation( + remote_attestation, + group_id=group_id, + user_id=user_id, + ) + + is_publicised = content.get("publicise", False) + token = yield self.store.register_user_group_membership( + group_id, user_id, + membership="join", + is_admin=True, + local_attestation=local_attestation, + remote_attestation=remote_attestation, + is_publicised=is_publicised, ) + self.notifier.on_new_event( + "groups_key", token, users=[user_id], + ) + defer.returnValue(res) @defer.inlineCallbacks diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f2e4ffcec6..69c1bc189e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -637,6 +637,7 @@ class SyncHandler(object): if membership == "join": if gtype == "membership": + # TODO: Add profile content.pop("membership", None) joined[group_id] = content["content"] else: From 5ed109d59f46c5185395f7c76050274fdd6abc15 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Sep 2017 12:20:11 +0100 Subject: [PATCH 0129/1637] PoC for filtering spammy events (#2456) Demonstration of how you might add some hooks to filter out spammy events. --- synapse/events/spamcheck.py | 38 ++++++++++++++++++++++++ synapse/federation/federation_base.py | 42 ++++++++++++++++----------- synapse/handlers/message.py | 8 ++++- 3 files changed, 70 insertions(+), 18 deletions(-) create mode 100644 synapse/events/spamcheck.py diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py new file mode 100644 index 0000000000..3eb4eab26a --- /dev/null +++ b/synapse/events/spamcheck.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def check_event_for_spam(event): + """Checks if a given event is considered "spammy" by this server. + + If the server considers an event spammy, then it will be rejected if + sent by a local user. If it is sent by a user on another server, then + users + + Args: + event (synapse.events.EventBase): the event to be checked + + Returns: + bool: True if the event is spammy. + """ + if not hasattr(event, "content") or "body" not in event.content: + return False + + # for example: + # + # if "the third flower is green" in event.content["body"]: + # return True + + return False diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 2339cc9034..28eaab2cef 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -12,21 +12,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - -from twisted.internet import defer - -from synapse.events.utils import prune_event - -from synapse.crypto.event_signing import check_event_content_hash - -from synapse.api.errors import SynapseError - -from synapse.util import unwrapFirstError -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred - import logging +from synapse.api.errors import SynapseError +from synapse.crypto.event_signing import check_event_content_hash +from synapse.events import spamcheck +from synapse.events.utils import prune_event +from synapse.util import unwrapFirstError +from synapse.util.logcontext import preserve_context_over_deferred, preserve_fn +from twisted.internet import defer logger = logging.getLogger(__name__) @@ -117,12 +111,18 @@ class FederationBase(object): return self._check_sigs_and_hashes([pdu])[0] def _check_sigs_and_hashes(self, pdus): - """Throws a SynapseError if a PDU does not have the correct - signatures. + """Checks that each of the received events is correctly signed by the + sending server. + + Args: + pdus (list[FrozenEvent]): the events to be checked Returns: - FrozenEvent: Either the given event or it redacted if it failed the - content hash check. + list[Deferred]: for each input event, a deferred which: + * returns the original event if the checks pass + * returns a redacted version of the event (if the signature + matched but the hash did not) + * throws a SynapseError if the signature check failed. """ redacted_pdus = [ @@ -142,6 +142,14 @@ class FederationBase(object): pdu.event_id, pdu.get_pdu_json() ) return redacted + + if spamcheck.check_event_for_spam(pdu): + logger.warn( + "Event contains spam, redacting %s: %s", + pdu.event_id, pdu.get_pdu_json() + ) + return redacted + return pdu def errback(failure, pdu): diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index be4f123c54..da18bf23db 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from synapse.events import spamcheck from twisted.internet import defer from synapse.api.constants import EventTypes, Membership @@ -321,6 +321,12 @@ class MessageHandler(BaseHandler): token_id=requester.access_token_id, txn_id=txn_id ) + + if spamcheck.check_event_for_spam(event): + raise SynapseError( + 403, "Spam is not permitted here", Codes.FORBIDDEN + ) + yield self.send_nonmember_event( requester, event, From 2eabdf3f9860c78598d026574807da463bf40f2e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Sep 2017 12:18:01 +0100 Subject: [PATCH 0130/1637] add some comments to on_exchange_third_party_invite_request --- synapse/handlers/federation.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 4669199b2d..2637f41dcd 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2090,6 +2090,14 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks @log_function def on_exchange_third_party_invite_request(self, origin, room_id, event_dict): + """Handle an exchange_third_party_invite request from a remote server + + The remote server will call this when it wants to turn a 3pid invite + into a normal m.room.member invite. + + Returns: + Deferred: resolves (to None) + """ builder = self.event_builder_factory.new(event_dict) message_handler = self.hs.get_handlers().message_handler @@ -2108,9 +2116,12 @@ class FederationHandler(BaseHandler): raise e yield self._check_signature(event, context) + # XXX we send the invite here, but send_membership_event also sends it, + # so we end up making two requests. I think this is redundant. returned_invite = yield self.send_invite(origin, event) # TODO: Make sure the signatures actually are correct. event.signatures.update(returned_invite.signatures) + member_handler = self.hs.get_handlers().room_member_handler yield member_handler.send_membership_event(None, event, context) From aa620d09a01c226d7a6fbc0d839d8abd347a2b2e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Sep 2017 16:08:14 +0100 Subject: [PATCH 0131/1637] Add a config option to block all room invites (#2457) - allows sysadmins the ability to lock down their servers so that people can't send their users room invites. --- synapse/api/auth.py | 8 ++++++++ synapse/config/server.py | 10 ++++++++++ synapse/handlers/federation.py | 3 +++ synapse/handlers/room_member.py | 22 ++++++++++++++++++++++ tests/utils.py | 1 + 5 files changed, 44 insertions(+) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index e3da45b416..72858cca1f 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -519,6 +519,14 @@ class Auth(object): ) def is_server_admin(self, user): + """ Check if the given user is a local server admin. + + Args: + user (str): mxid of user to check + + Returns: + bool: True if the user is an admin + """ return self.store.is_server_admin(user) @defer.inlineCallbacks diff --git a/synapse/config/server.py b/synapse/config/server.py index 89d61a0503..c9a1715f1f 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -43,6 +43,12 @@ class ServerConfig(Config): self.filter_timeline_limit = config.get("filter_timeline_limit", -1) + # Whether we should block invites sent to users on this server + # (other than those sent by local server admins) + self.block_non_admin_invites = config.get( + "block_non_admin_invites", False, + ) + if self.public_baseurl is not None: if self.public_baseurl[-1] != '/': self.public_baseurl += '/' @@ -194,6 +200,10 @@ class ServerConfig(Config): # and sync operations. The default value is -1, means no upper limit. # filter_timeline_limit: 5000 + # Whether room invites to users on this server should be blocked + # (except those sent by local server admins). The default is False. + # block_non_admin_invites: True + # List of ports that Synapse should listen on, their purpose and their # configuration. listeners: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2637f41dcd..18f87cad67 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1074,6 +1074,9 @@ class FederationHandler(BaseHandler): if is_blocked: raise SynapseError(403, "This room has been blocked on this server") + if self.hs.config.block_non_admin_invites: + raise SynapseError(403, "This server does not accept room invites") + membership = event.content.get("membership") if event.type != EventTypes.Member or membership != Membership.INVITE: raise SynapseError(400, "The event was not an m.room.member invite event") diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index b3f979b246..9a498c2d3e 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -191,6 +191,8 @@ class RoomMemberHandler(BaseHandler): if action in ["kick", "unban"]: effective_membership_state = "leave" + # if this is a join with a 3pid signature, we may need to turn a 3pid + # invite into a normal invite before we can handle the join. if third_party_signed is not None: replication = self.hs.get_replication_layer() yield replication.exchange_third_party_invite( @@ -208,6 +210,16 @@ class RoomMemberHandler(BaseHandler): if is_blocked: raise SynapseError(403, "This room has been blocked on this server") + if (effective_membership_state == "invite" and + self.hs.config.block_non_admin_invites): + is_requester_admin = yield self.auth.is_server_admin( + requester.user, + ) + if not is_requester_admin: + raise SynapseError( + 403, "Invites have been disabled on this server", + ) + latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) current_state_ids = yield self.state_handler.get_current_state_ids( room_id, latest_event_ids=latest_event_ids, @@ -471,6 +483,16 @@ class RoomMemberHandler(BaseHandler): requester, txn_id ): + if self.hs.config.block_non_admin_invites: + is_requester_admin = yield self.auth.is_server_admin( + requester.user, + ) + if not is_requester_admin: + raise SynapseError( + 403, "Invites have been disabled on this server", + Codes.FORBIDDEN, + ) + invitee = yield self._lookup_3pid( id_server, medium, address ) diff --git a/tests/utils.py b/tests/utils.py index 4f7e32b3ab..3c81a3e16d 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -56,6 +56,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): config.worker_replication_url = "" config.worker_app = None config.email_enable_notifs = False + config.block_non_admin_invites = False config.use_frozen_dicts = True config.database_config = {"name": "sqlite3"} From 9864efa5321ad5afa522d9ecb3eb48e1f50fb852 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Sep 2017 23:25:44 +0100 Subject: [PATCH 0132/1637] Fix concurrent server_key requests (#2458) Fix a bug where we could end up firing off multiple requests for server_keys for the same server at the same time. --- synapse/crypto/keyring.py | 4 ++- tests/crypto/test_keyring.py | 58 ++++++++++++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 51851d04e5..ebf4e2e7a6 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -201,7 +201,9 @@ class Keyring(object): server_name = verify_request.server_name request_id = id(verify_request) server_to_request_ids.setdefault(server_name, set()).add(request_id) - deferred.addBoth(remove_deferreds, server_name, verify_request) + verify_request.deferred.addBoth( + remove_deferreds, server_name, verify_request, + ) # Pass those keys to handle_key_deferred so that the json object # signatures can be verified diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index da2c9e44e7..2e5878f087 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -12,17 +12,27 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import signedjson +from mock import Mock +from synapse.api.errors import SynapseError from synapse.crypto import keyring +from synapse.util import async from synapse.util.logcontext import LoggingContext -from tests import utils, unittest +from tests import unittest, utils from twisted.internet import defer class KeyringTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): - self.hs = yield utils.setup_test_homeserver(handlers=None) + self.http_client = Mock() + self.hs = yield utils.setup_test_homeserver( + handlers=None, + http_client=self.http_client, + ) + self.hs.config.perspectives = { + "persp_server": {"k": "v"} + } @defer.inlineCallbacks def test_wait_for_previous_lookups(self): @@ -72,3 +82,45 @@ class KeyringTestCase(unittest.TestCase): # now the second wait should complete and restore our # loggingcontext. yield wait_2_deferred + + @defer.inlineCallbacks + def test_verify_json_objects_for_server_awaits_previous_requests(self): + key1 = signedjson.key.generate_signing_key(1) + + kr = keyring.Keyring(self.hs) + json1 = {} + signedjson.sign.sign_json(json1, "server1", key1) + + self.http_client.post_json.return_value = defer.Deferred() + + # start off a first set of lookups + res_deferreds = kr.verify_json_objects_for_server( + [("server1", json1), + ("server2", {}) + ] + ) + + # the unsigned json should be rejected pretty quickly + try: + yield res_deferreds[1] + self.assertFalse("unsigned json didn't cause a failure") + except SynapseError: + pass + + self.assertFalse(res_deferreds[0].called) + + # wait a tick for it to send the request to the perspectives server + # (it first tries the datastore) + yield async.sleep(0.005) + self.http_client.post_json.assert_called_once() + + # a second request for a server with outstanding requests should + # block rather than start a second call + self.http_client.post_json.reset_mock() + self.http_client.post_json.return_value = defer.Deferred() + + kr.verify_json_objects_for_server( + [("server1", json1)], + ) + yield async.sleep(0.005) + self.http_client.post_json.assert_not_called() From fcf2c0fd1aa4d85df0bdb43bc8411ad4ad988a6f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0133/1637] Remove redundant `preserve_fn` preserve_fn is a no-op unless the wrapped function returns a Deferred. verify_json_objects_for_server returns a list, so this is doing nothing. --- synapse/federation/federation_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 28eaab2cef..cabed33f74 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -19,7 +19,7 @@ from synapse.crypto.event_signing import check_event_content_hash from synapse.events import spamcheck from synapse.events.utils import prune_event from synapse.util import unwrapFirstError -from synapse.util.logcontext import preserve_context_over_deferred, preserve_fn +from synapse.util.logcontext import preserve_context_over_deferred from twisted.internet import defer logger = logging.getLogger(__name__) @@ -130,7 +130,7 @@ class FederationBase(object): for pdu in pdus ] - deferreds = preserve_fn(self.keyring.verify_json_objects_for_server)([ + deferreds = self.keyring.verify_json_objects_for_server([ (p.origin, p.get_pdu_json()) for p in redacted_pdus ]) From e76d1135dd26305e0ff4c5d8e41b9dff204d72cf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0134/1637] Invalidate signing key cache when we gat an update This might make the cache slightly more efficient. --- synapse/storage/keys.py | 41 ++++++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 3b5e0a4fb9..87aeaf71d6 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -113,30 +113,37 @@ class KeyStore(SQLBaseStore): keys[key_id] = key defer.returnValue(keys) - @defer.inlineCallbacks def store_server_verify_key(self, server_name, from_server, time_now_ms, verify_key): """Stores a NACL verification key for the given server. Args: server_name (str): The name of the server. - key_id (str): The version of the key for the server. from_server (str): Where the verification key was looked up - ts_now_ms (int): The time now in milliseconds - verification_key (VerifyKey): The NACL verify key. + time_now_ms (int): The time now in milliseconds + verify_key (nacl.signing.VerifyKey): The NACL verify key. """ - yield self._simple_upsert( - table="server_signature_keys", - keyvalues={ - "server_name": server_name, - "key_id": "%s:%s" % (verify_key.alg, verify_key.version), - }, - values={ - "from_server": from_server, - "ts_added_ms": time_now_ms, - "verify_key": buffer(verify_key.encode()), - }, - desc="store_server_verify_key", - ) + key_id = "%s:%s" % (verify_key.alg, verify_key.version) + + def _txn(txn): + self._simple_upsert_txn( + txn, + table="server_signature_keys", + keyvalues={ + "server_name": server_name, + "key_id": key_id, + }, + values={ + "from_server": from_server, + "ts_added_ms": time_now_ms, + "verify_key": buffer(verify_key.encode()), + }, + ) + txn.call_after( + self._get_server_verify_key.invalidate, + (server_name, key_id) + ) + + return self.runInteraction("store_server_verify_key", _txn) def store_server_keys_json(self, server_name, key_id, from_server, ts_now_ms, ts_expires_ms, key_json_bytes): From dd1ea9763a79f49403964667114a60f71ac1f0bf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0135/1637] Fix incorrect key_ids in error message --- synapse/crypto/keyring.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index ebf4e2e7a6..7d142c1b96 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -144,7 +144,7 @@ class Keyring(object): ) raise SynapseError( 401, - "No key for %s with id %s" % (server_name, key_ids), + "No key for %s with id %s" % (server_name, verify_request.key_ids), Codes.UNAUTHORIZED, ) From 2d511defd9aa85b56222381efedc63c9f6045087 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0136/1637] pull out handle_key_deferred to top level There's no need for this to be a nested definition; pulling it out not only makes it more efficient, but makes it easier to check that it's not accessing any local variables it shouldn't be. --- synapse/crypto/keyring.py | 87 ++++++++++++++++++++------------------- 1 file changed, 44 insertions(+), 43 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 7d142c1b96..0033ba06ba 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -122,48 +122,6 @@ class Keyring(object): verify_requests.append(verify_request) - @defer.inlineCallbacks - def handle_key_deferred(verify_request): - server_name = verify_request.server_name - try: - _, key_id, verify_key = yield verify_request.deferred - except IOError as e: - logger.warn( - "Got IOError when downloading keys for %s: %s %s", - server_name, type(e).__name__, str(e.message), - ) - raise SynapseError( - 502, - "Error downloading keys for %s" % (server_name,), - Codes.UNAUTHORIZED, - ) - except Exception as e: - logger.exception( - "Got Exception when downloading keys for %s: %s %s", - server_name, type(e).__name__, str(e.message), - ) - raise SynapseError( - 401, - "No key for %s with id %s" % (server_name, verify_request.key_ids), - Codes.UNAUTHORIZED, - ) - - json_object = verify_request.json_object - - logger.debug("Got key %s %s:%s for server %s, verifying" % ( - key_id, verify_key.alg, verify_key.version, server_name, - )) - try: - verify_signed_json(json_object, server_name, verify_key) - except: - raise SynapseError( - 401, - "Invalid signature for server %s with key %s:%s" % ( - server_name, verify_key.alg, verify_key.version - ), - Codes.UNAUTHORIZED, - ) - server_to_deferred = { server_name: defer.Deferred() for server_name, _ in server_and_json @@ -208,7 +166,7 @@ class Keyring(object): # Pass those keys to handle_key_deferred so that the json object # signatures can be verified return [ - preserve_context_over_fn(handle_key_deferred, verify_request) + preserve_context_over_fn(_handle_key_deferred, verify_request) for verify_request in verify_requests ] @@ -740,3 +698,46 @@ class Keyring(object): ], consumeErrors=True, ).addErrback(unwrapFirstError)) + + +@defer.inlineCallbacks +def _handle_key_deferred(verify_request): + server_name = verify_request.server_name + try: + _, key_id, verify_key = yield verify_request.deferred + except IOError as e: + logger.warn( + "Got IOError when downloading keys for %s: %s %s", + server_name, type(e).__name__, str(e.message), + ) + raise SynapseError( + 502, + "Error downloading keys for %s" % (server_name,), + Codes.UNAUTHORIZED, + ) + except Exception as e: + logger.exception( + "Got Exception when downloading keys for %s: %s %s", + server_name, type(e).__name__, str(e.message), + ) + raise SynapseError( + 401, + "No key for %s with id %s" % (server_name, verify_request.key_ids), + Codes.UNAUTHORIZED, + ) + + json_object = verify_request.json_object + + logger.debug("Got key %s %s:%s for server %s, verifying" % ( + key_id, verify_key.alg, verify_key.version, server_name, + )) + try: + verify_signed_json(json_object, server_name, verify_key) + except: + raise SynapseError( + 401, + "Invalid signature for server %s with key %s:%s" % ( + server_name, verify_key.alg, verify_key.version + ), + Codes.UNAUTHORIZED, + ) From fde63b880d32937b52a80815a08342449d9c4842 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0137/1637] Replace `server_and_json` with `verify_requests` This is a precursor to factoring some of this code out. --- synapse/crypto/keyring.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 0033ba06ba..32b107b17d 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -123,8 +123,8 @@ class Keyring(object): verify_requests.append(verify_request) server_to_deferred = { - server_name: defer.Deferred() - for server_name, _ in server_and_json + rq.server_name: defer.Deferred() + for rq in verify_requests } with PreserveLoggingContext(): @@ -132,7 +132,7 @@ class Keyring(object): # We want to wait for any previous lookups to complete before # proceeding. wait_on_deferred = self.wait_for_previous_lookups( - [server_name for server_name, _ in server_and_json], + [rq.server_name for rq in verify_requests], server_to_deferred, ) From 3b98439ecaab4707c2224d7912b3f4513c2af8b7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0138/1637] Factor out _start_key_lookups ... to make it easier to see what's going on. --- synapse/crypto/keyring.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 32b107b17d..105de2b58b 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -122,6 +122,23 @@ class Keyring(object): verify_requests.append(verify_request) + self._start_key_lookups(verify_requests) + + # Pass those keys to handle_key_deferred so that the json object + # signatures can be verified + return [ + preserve_context_over_fn(_handle_key_deferred, rq) + for rq in verify_requests + ] + + def _start_key_lookups(self, verify_requests): + """Sets off the key fetches for each verify request + + Once each fetch completes, verify_request.deferred will be resolved. + + Args: + verify_requests (List[VerifyKeyRequest]): + """ server_to_deferred = { rq.server_name: defer.Deferred() for rq in verify_requests @@ -163,13 +180,6 @@ class Keyring(object): remove_deferreds, server_name, verify_request, ) - # Pass those keys to handle_key_deferred so that the json object - # signatures can be verified - return [ - preserve_context_over_fn(_handle_key_deferred, verify_request) - for verify_request in verify_requests - ] - @defer.inlineCallbacks def wait_for_previous_lookups(self, server_names, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. From 2a4b9ea233cfffa556fa63a37cffb24bfe133d82 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0139/1637] Consistency for how verify_request.deferred is called Define that it is run with no log context, and make sure that happens. If we aren't careful to reset the logcontext, we can't bung the deferreds into defer.gatherResults etc. We don't actually do that directly, but we *do* resolve other deferreds from affected callbacks (notably the server_to_deferred map in _start_key_lookups), and those *do* get passed into defer.gatherResults. It turns out that this way ends up being least confusing. --- synapse/crypto/keyring.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 105de2b58b..22bb325cfd 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -57,7 +57,8 @@ Attributes: json_object(dict): The JSON object to verify. deferred(twisted.internet.defer.Deferred): A deferred (server_name, key_id, verify_key) tuple that resolves when - a verify key has been fetched + a verify key has been fetched. The deferreds' callbacks are run with no + logcontext. """ @@ -284,19 +285,21 @@ class Keyring(object): if not missing_keys: break - for verify_request in requests_missing_keys.values(): - verify_request.deferred.errback(SynapseError( - 401, - "No key for %s with id %s" % ( - verify_request.server_name, verify_request.key_ids, - ), - Codes.UNAUTHORIZED, - )) + with PreserveLoggingContext(): + for verify_request in requests_missing_keys.values(): + verify_request.deferred.errback(SynapseError( + 401, + "No key for %s with id %s" % ( + verify_request.server_name, verify_request.key_ids, + ), + Codes.UNAUTHORIZED, + )) def on_err(err): - for verify_request in verify_requests: - if not verify_request.deferred.called: - verify_request.deferred.errback(err) + with PreserveLoggingContext(): + for verify_request in verify_requests: + if not verify_request.deferred.called: + verify_request.deferred.errback(err) do_iterations().addErrback(on_err) @@ -714,7 +717,8 @@ class Keyring(object): def _handle_key_deferred(verify_request): server_name = verify_request.server_name try: - _, key_id, verify_key = yield verify_request.deferred + with PreserveLoggingContext(): + _, key_id, verify_key = yield verify_request.deferred except IOError as e: logger.warn( "Got IOError when downloading keys for %s: %s %s", From afbd773dc66d43d066d5a0b4639075a2d09cb4e5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0140/1637] Add some comments to _start_key_lookups --- synapse/crypto/keyring.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 22bb325cfd..d7fd831bf9 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -140,6 +140,12 @@ class Keyring(object): Args: verify_requests (List[VerifyKeyRequest]): """ + + # create a deferred for each server we're going to look up the keys + # for; we'll resolve them once we have completed our lookups. + # These will be passed into wait_for_previous_lookups to block + # any other lookups until we have finished. + # The deferreds are called with no logcontext. server_to_deferred = { rq.server_name: defer.Deferred() for rq in verify_requests @@ -162,6 +168,8 @@ class Keyring(object): # When we've finished fetching all the keys for a given server_name, # resolve the deferred passed to `wait_for_previous_lookups` so that # any lookups waiting will proceed. + # + # map from server name to a set of request ids server_to_request_ids = {} def remove_deferreds(res, server_name, verify_request): From abdefb8a01bf67b3055e9fbe52bb11a02ffd8d9a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0141/1637] Fix potential race in _start_key_lookups If the verify_request.deferred has already completed, then `remove_deferreds` will be called immediately. It therefore might resolve the server_to_deferred deferred while there are still other requests for that server in flight. To avoid that, we should build the complete list of requests, and *then* add the callbacks. --- synapse/crypto/keyring.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d7fd831bf9..0e381c4710 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -172,7 +172,13 @@ class Keyring(object): # map from server name to a set of request ids server_to_request_ids = {} - def remove_deferreds(res, server_name, verify_request): + for verify_request in verify_requests: + server_name = verify_request.server_name + request_id = id(verify_request) + server_to_request_ids.setdefault(server_name, set()).add(request_id) + + def remove_deferreds(res, verify_request): + server_name = verify_request.server_name request_id = id(verify_request) server_to_request_ids[server_name].discard(request_id) if not server_to_request_ids[server_name]: @@ -182,11 +188,8 @@ class Keyring(object): return res for verify_request in verify_requests: - server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids.setdefault(server_name, set()).add(request_id) verify_request.deferred.addBoth( - remove_deferreds, server_name, verify_request, + remove_deferreds, verify_request, ) @defer.inlineCallbacks From c5b0e9f48542516a4fa82247c81e499894340cf5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0142/1637] Turn _start_key_lookups into an inlineCallbacks function ... which means that logcontexts can be correctly preserved for the stuff it does. get_server_verify_keys is now called with the logcontext, so needs to preserve_fn when it fires off its nested inlineCallbacks function. Also renames get_server_verify_keys to reflect the fact it's meant to be private. --- synapse/crypto/keyring.py | 79 +++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 41 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 0e381c4710..7e4cef13c1 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -123,7 +123,7 @@ class Keyring(object): verify_requests.append(verify_request) - self._start_key_lookups(verify_requests) + preserve_fn(self._start_key_lookups)(verify_requests) # Pass those keys to handle_key_deferred so that the json object # signatures can be verified @@ -132,6 +132,7 @@ class Keyring(object): for rq in verify_requests ] + @defer.inlineCallbacks def _start_key_lookups(self, verify_requests): """Sets off the key fetches for each verify request @@ -151,47 +152,43 @@ class Keyring(object): for rq in verify_requests } - with PreserveLoggingContext(): + # We want to wait for any previous lookups to complete before + # proceeding. + yield self.wait_for_previous_lookups( + [rq.server_name for rq in verify_requests], + server_to_deferred, + ) - # We want to wait for any previous lookups to complete before - # proceeding. - wait_on_deferred = self.wait_for_previous_lookups( - [rq.server_name for rq in verify_requests], - server_to_deferred, + # Actually start fetching keys. + self._get_server_verify_keys(verify_requests) + + # When we've finished fetching all the keys for a given server_name, + # resolve the deferred passed to `wait_for_previous_lookups` so that + # any lookups waiting will proceed. + # + # map from server name to a set of request ids + server_to_request_ids = {} + + for verify_request in verify_requests: + server_name = verify_request.server_name + request_id = id(verify_request) + server_to_request_ids.setdefault(server_name, set()).add(request_id) + + def remove_deferreds(res, verify_request): + server_name = verify_request.server_name + request_id = id(verify_request) + server_to_request_ids[server_name].discard(request_id) + if not server_to_request_ids[server_name]: + d = server_to_deferred.pop(server_name, None) + if d: + d.callback(None) + return res + + for verify_request in verify_requests: + verify_request.deferred.addBoth( + remove_deferreds, verify_request, ) - # Actually start fetching keys. - wait_on_deferred.addBoth( - lambda _: self.get_server_verify_keys(verify_requests) - ) - - # When we've finished fetching all the keys for a given server_name, - # resolve the deferred passed to `wait_for_previous_lookups` so that - # any lookups waiting will proceed. - # - # map from server name to a set of request ids - server_to_request_ids = {} - - for verify_request in verify_requests: - server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids.setdefault(server_name, set()).add(request_id) - - def remove_deferreds(res, verify_request): - server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids[server_name].discard(request_id) - if not server_to_request_ids[server_name]: - d = server_to_deferred.pop(server_name, None) - if d: - d.callback(None) - return res - - for verify_request in verify_requests: - verify_request.deferred.addBoth( - remove_deferreds, verify_request, - ) - @defer.inlineCallbacks def wait_for_previous_lookups(self, server_names, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. @@ -227,7 +224,7 @@ class Keyring(object): self.key_downloads[server_name] = deferred deferred.addBoth(rm, server_name) - def get_server_verify_keys(self, verify_requests): + def _get_server_verify_keys(self, verify_requests): """Tries to find at least one key for each verify request For each verify_request, verify_request.deferred is called back with @@ -312,7 +309,7 @@ class Keyring(object): if not verify_request.deferred.called: verify_request.deferred.errback(err) - do_iterations().addErrback(on_err) + preserve_fn(do_iterations)().addErrback(on_err) @defer.inlineCallbacks def get_keys_from_store(self, server_name_and_key_ids): From c5c24c239b63d06a6e312d86c338da60cfcee814 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0143/1637] Fix logcontext handling in verify_json_objects_for_server preserve_context_over_fn is essentially broken, because (a) it pointlessly drops the current logcontext before calling its wrapped function, which means we don't get any useful logcontexts for _handle_key_deferred; (b) it wraps the resulting deferred in a _PreservingContextDeferred, which is very dangerous because you then can't yield on it without leaking context back into the reactor. Instead, let's specify that the resultant deferreds call their callbacks with no logcontext. --- synapse/crypto/keyring.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 7e4cef13c1..2a1d383078 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -18,7 +18,7 @@ from synapse.crypto.keyclient import fetch_server_key from synapse.api.errors import SynapseError, Codes from synapse.util import unwrapFirstError, logcontext from synapse.util.logcontext import ( - preserve_context_over_fn, PreserveLoggingContext, + PreserveLoggingContext, preserve_fn ) from synapse.util.metrics import Measure @@ -83,9 +83,11 @@ class Keyring(object): self.key_downloads = {} def verify_json_for_server(self, server_name, json_object): - return self.verify_json_objects_for_server( - [(server_name, json_object)] - )[0] + return logcontext.make_deferred_yieldable( + self.verify_json_objects_for_server( + [(server_name, json_object)] + )[0] + ) def verify_json_objects_for_server(self, server_and_json): """Bulk verifies signatures of json objects, bulk fetching keys as @@ -95,8 +97,10 @@ class Keyring(object): server_and_json (list): List of pairs of (server_name, json_object) Returns: - list of deferreds indicating success or failure to verify each - json object's signature for the given server_name. + List: for each input pair, a deferred indicating success + or failure to verify each json object's signature for the given + server_name. The deferreds run their callbacks in the sentinel + logcontext. """ verify_requests = [] @@ -127,9 +131,9 @@ class Keyring(object): # Pass those keys to handle_key_deferred so that the json object # signatures can be verified + handle = preserve_fn(_handle_key_deferred) return [ - preserve_context_over_fn(_handle_key_deferred, rq) - for rq in verify_requests + handle(rq) for rq in verify_requests ] @defer.inlineCallbacks From 72472456d82d956d957c4a68c23554f4b43eec54 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0144/1637] Add some more tests for Keyring --- tests/crypto/test_keyring.py | 177 +++++++++++++++++++++++++++-------- 1 file changed, 140 insertions(+), 37 deletions(-) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 2e5878f087..570312da84 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -12,39 +12,72 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import signedjson +import time + +import signedjson.key +import signedjson.sign from mock import Mock from synapse.api.errors import SynapseError from synapse.crypto import keyring -from synapse.util import async +from synapse.util import async, logcontext from synapse.util.logcontext import LoggingContext from tests import unittest, utils from twisted.internet import defer +class MockPerspectiveServer(object): + def __init__(self): + self.server_name = "mock_server" + self.key = signedjson.key.generate_signing_key(0) + + def get_verify_keys(self): + vk = signedjson.key.get_verify_key(self.key) + return { + "%s:%s" % (vk.alg, vk.version): vk, + } + + def get_signed_key(self, server_name, verify_key): + key_id = "%s:%s" % (verify_key.alg, verify_key.version) + res = { + "server_name": server_name, + "old_verify_keys": {}, + "valid_until_ts": time.time() * 1000 + 3600, + "verify_keys": { + key_id: { + "key": signedjson.key.encode_verify_key_base64(verify_key) + } + } + } + signedjson.sign.sign_json(res, self.server_name, self.key) + return res + + class KeyringTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): + self.mock_perspective_server = MockPerspectiveServer() self.http_client = Mock() self.hs = yield utils.setup_test_homeserver( handlers=None, http_client=self.http_client, ) self.hs.config.perspectives = { - "persp_server": {"k": "v"} + self.mock_perspective_server.server_name: + self.mock_perspective_server.get_verify_keys() } + def check_context(self, _, expected): + self.assertEquals( + getattr(LoggingContext.current_context(), "test_key", None), + expected + ) + @defer.inlineCallbacks def test_wait_for_previous_lookups(self): sentinel_context = LoggingContext.current_context() kr = keyring.Keyring(self.hs) - def check_context(_, expected): - self.assertEquals( - LoggingContext.current_context().test_key, expected - ) - lookup_1_deferred = defer.Deferred() lookup_2_deferred = defer.Deferred() @@ -60,7 +93,7 @@ class KeyringTestCase(unittest.TestCase): self.assertTrue(wait_1_deferred.called) # ... so we should have preserved the LoggingContext. self.assertIs(LoggingContext.current_context(), context_one) - wait_1_deferred.addBoth(check_context, "one") + wait_1_deferred.addBoth(self.check_context, "one") with LoggingContext("two") as context_two: context_two.test_key = "two" @@ -74,7 +107,7 @@ class KeyringTestCase(unittest.TestCase): self.assertFalse(wait_2_deferred.called) # ... so we should have reset the LoggingContext. self.assertIs(LoggingContext.current_context(), sentinel_context) - wait_2_deferred.addBoth(check_context, "two") + wait_2_deferred.addBoth(self.check_context, "two") # let the first lookup complete (in the sentinel context) lookup_1_deferred.callback(None) @@ -89,38 +122,108 @@ class KeyringTestCase(unittest.TestCase): kr = keyring.Keyring(self.hs) json1 = {} - signedjson.sign.sign_json(json1, "server1", key1) + signedjson.sign.sign_json(json1, "server10", key1) - self.http_client.post_json.return_value = defer.Deferred() + persp_resp = { + "server_keys": [ + self.mock_perspective_server.get_signed_key( + "server10", + signedjson.key.get_verify_key(key1) + ), + ] + } + persp_deferred = defer.Deferred() - # start off a first set of lookups - res_deferreds = kr.verify_json_objects_for_server( - [("server1", json1), - ("server2", {}) - ] + @defer.inlineCallbacks + def get_perspectives(**kwargs): + self.assertEquals( + LoggingContext.current_context().test_key, "11", + ) + with logcontext.PreserveLoggingContext(): + yield persp_deferred + defer.returnValue(persp_resp) + self.http_client.post_json.side_effect = get_perspectives + + with LoggingContext("11") as context_11: + context_11.test_key = "11" + + # start off a first set of lookups + res_deferreds = kr.verify_json_objects_for_server( + [("server10", json1), + ("server11", {}) + ] + ) + + # the unsigned json should be rejected pretty quickly + self.assertTrue(res_deferreds[1].called) + try: + yield res_deferreds[1] + self.assertFalse("unsigned json didn't cause a failure") + except SynapseError: + pass + + self.assertFalse(res_deferreds[0].called) + res_deferreds[0].addBoth(self.check_context, None) + + # wait a tick for it to send the request to the perspectives server + # (it first tries the datastore) + yield async.sleep(0.005) + self.http_client.post_json.assert_called_once() + + self.assertIs(LoggingContext.current_context(), context_11) + + context_12 = LoggingContext("12") + context_12.test_key = "12" + with logcontext.PreserveLoggingContext(context_12): + # a second request for a server with outstanding requests + # should block rather than start a second call + self.http_client.post_json.reset_mock() + self.http_client.post_json.return_value = defer.Deferred() + + res_deferreds_2 = kr.verify_json_objects_for_server( + [("server10", json1)], + ) + yield async.sleep(0.005) + self.http_client.post_json.assert_not_called() + res_deferreds_2[0].addBoth(self.check_context, None) + + # complete the first request + with logcontext.PreserveLoggingContext(): + persp_deferred.callback(persp_resp) + self.assertIs(LoggingContext.current_context(), context_11) + + with logcontext.PreserveLoggingContext(): + yield res_deferreds[0] + yield res_deferreds_2[0] + + @defer.inlineCallbacks + def test_verify_json_for_server(self): + kr = keyring.Keyring(self.hs) + + key1 = signedjson.key.generate_signing_key(1) + yield self.hs.datastore.store_server_verify_key( + "server9", "", time.time() * 1000, + signedjson.key.get_verify_key(key1), ) + json1 = {} + signedjson.sign.sign_json(json1, "server9", key1) - # the unsigned json should be rejected pretty quickly - try: - yield res_deferreds[1] - self.assertFalse("unsigned json didn't cause a failure") - except SynapseError: - pass + sentinel_context = LoggingContext.current_context() - self.assertFalse(res_deferreds[0].called) + with LoggingContext("one") as context_one: + context_one.test_key = "one" - # wait a tick for it to send the request to the perspectives server - # (it first tries the datastore) - yield async.sleep(0.005) - self.http_client.post_json.assert_called_once() + defer = kr.verify_json_for_server("server9", {}) + try: + yield defer + self.fail("should fail on unsigned json") + except SynapseError: + pass + self.assertIs(LoggingContext.current_context(), context_one) - # a second request for a server with outstanding requests should - # block rather than start a second call - self.http_client.post_json.reset_mock() - self.http_client.post_json.return_value = defer.Deferred() + defer = kr.verify_json_for_server("server9", json1) + self.assertFalse(defer.called) + self.assertIs(LoggingContext.current_context(), sentinel_context) + yield defer - kr.verify_json_objects_for_server( - [("server1", json1)], - ) - yield async.sleep(0.005) - self.http_client.post_json.assert_not_called() + self.assertIs(LoggingContext.current_context(), context_one) From 6de74ea6d7394b63c9475e9dfff943188a9ed73b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Sep 2017 01:32:42 +0100 Subject: [PATCH 0145/1637] Fix logcontexts in _check_sigs_and_hashes --- synapse/federation/federation_base.py | 108 ++++++++++++------------ synapse/federation/federation_client.py | 8 +- 2 files changed, 59 insertions(+), 57 deletions(-) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index cabed33f74..babd9ea078 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -18,8 +18,7 @@ from synapse.api.errors import SynapseError from synapse.crypto.event_signing import check_event_content_hash from synapse.events import spamcheck from synapse.events.utils import prune_event -from synapse.util import unwrapFirstError -from synapse.util.logcontext import preserve_context_over_deferred +from synapse.util import unwrapFirstError, logcontext from twisted.internet import defer logger = logging.getLogger(__name__) @@ -51,56 +50,52 @@ class FederationBase(object): """ deferreds = self._check_sigs_and_hashes(pdus) - def callback(pdu): - return pdu + @defer.inlineCallbacks + def handle_check_result(pdu, deferred): + try: + res = yield logcontext.make_deferred_yieldable(deferred) + except SynapseError: + res = None - def errback(failure, pdu): - failure.trap(SynapseError) - return None - - def try_local_db(res, pdu): if not res: # Check local db. - return self.store.get_event( + res = yield self.store.get_event( pdu.event_id, allow_rejected=True, allow_none=True, ) - return res - def try_remote(res, pdu): if not res and pdu.origin != origin: - return self.get_pdu( - destinations=[pdu.origin], - event_id=pdu.event_id, - outlier=outlier, - timeout=10000, - ).addErrback(lambda e: None) - return res + try: + res = yield self.get_pdu( + destinations=[pdu.origin], + event_id=pdu.event_id, + outlier=outlier, + timeout=10000, + ) + except SynapseError: + pass - def warn(res, pdu): if not res: logger.warn( "Failed to find copy of %s with valid signature", pdu.event_id, ) - return res - for pdu, deferred in zip(pdus, deferreds): - deferred.addCallbacks( - callback, errback, errbackArgs=[pdu] - ).addCallback( - try_local_db, pdu - ).addCallback( - try_remote, pdu - ).addCallback( - warn, pdu + defer.returnValue(res) + + handle = logcontext.preserve_fn(handle_check_result) + deferreds2 = [ + handle(pdu, deferred) + for pdu, deferred in zip(pdus, deferreds) + ] + + valid_pdus = yield logcontext.make_deferred_yieldable( + defer.gatherResults( + deferreds2, + consumeErrors=True, ) - - valid_pdus = yield preserve_context_over_deferred(defer.gatherResults( - deferreds, - consumeErrors=True - )).addErrback(unwrapFirstError) + ).addErrback(unwrapFirstError) if include_none: defer.returnValue(valid_pdus) @@ -108,7 +103,9 @@ class FederationBase(object): defer.returnValue([p for p in valid_pdus if p]) def _check_sigs_and_hash(self, pdu): - return self._check_sigs_and_hashes([pdu])[0] + return logcontext.make_deferred_yieldable( + self._check_sigs_and_hashes([pdu])[0], + ) def _check_sigs_and_hashes(self, pdus): """Checks that each of the received events is correctly signed by the @@ -123,6 +120,7 @@ class FederationBase(object): * returns a redacted version of the event (if the signature matched but the hash did not) * throws a SynapseError if the signature check failed. + The deferreds run their callbacks in the sentinel logcontext. """ redacted_pdus = [ @@ -135,29 +133,33 @@ class FederationBase(object): for p in redacted_pdus ]) + ctx = logcontext.LoggingContext.current_context() + def callback(_, pdu, redacted): - if not check_event_content_hash(pdu): - logger.warn( - "Event content has been tampered, redacting %s: %s", - pdu.event_id, pdu.get_pdu_json() - ) - return redacted + with logcontext.PreserveLoggingContext(ctx): + if not check_event_content_hash(pdu): + logger.warn( + "Event content has been tampered, redacting %s: %s", + pdu.event_id, pdu.get_pdu_json() + ) + return redacted - if spamcheck.check_event_for_spam(pdu): - logger.warn( - "Event contains spam, redacting %s: %s", - pdu.event_id, pdu.get_pdu_json() - ) - return redacted + if spamcheck.check_event_for_spam(pdu): + logger.warn( + "Event contains spam, redacting %s: %s", + pdu.event_id, pdu.get_pdu_json() + ) + return redacted - return pdu + return pdu def errback(failure, pdu): failure.trap(SynapseError) - logger.warn( - "Signature check failed for %s", - pdu.event_id, - ) + with logcontext.PreserveLoggingContext(ctx): + logger.warn( + "Signature check failed for %s", + pdu.event_id, + ) return failure for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus): diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 861441708b..7c5e5d957f 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -22,7 +22,7 @@ from synapse.api.constants import Membership from synapse.api.errors import ( CodeMessageException, HttpResponseException, SynapseError, ) -from synapse.util import unwrapFirstError +from synapse.util import unwrapFirstError, logcontext from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.logutils import log_function from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred @@ -189,10 +189,10 @@ class FederationClient(FederationBase): ] # FIXME: We should handle signature failures more gracefully. - pdus[:] = yield preserve_context_over_deferred(defer.gatherResults( + pdus[:] = yield logcontext.make_deferred_yieldable(defer.gatherResults( self._check_sigs_and_hashes(pdus), consumeErrors=True, - )).addErrback(unwrapFirstError) + ).addErrback(unwrapFirstError)) defer.returnValue(pdus) @@ -252,7 +252,7 @@ class FederationClient(FederationBase): pdu = pdu_list[0] # Check signatures are correct. - signed_pdu = yield self._check_sigs_and_hashes([pdu])[0] + signed_pdu = yield self._check_sigs_and_hash(pdu) break From 069ae2df126418b5be1c96727a578cfd1dd4e506 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 20 Sep 2017 10:52:12 +0100 Subject: [PATCH 0146/1637] Fix initial sync --- synapse/storage/group_server.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 5433063507..b0399f8133 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -1085,7 +1085,15 @@ class GroupServerStore(SQLBaseStore): AND stream_id <= ? """ txn.execute(sql, (user_id, now_token,)) - return self.cursor_to_dict(txn) + return [ + { + "group_id": row[0], + "type": row[1], + "membership": row[2], + "content": json.loads(row[3]), + } + for row in txn + ] return self.runInteraction( "get_all_groups_for_user", _get_all_groups_for_user_txn, ) From 197d82dc070447b4a89a82816996f38f01ca7a04 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 20 Sep 2017 11:12:11 +0100 Subject: [PATCH 0147/1637] Correctly return next token --- synapse/storage/group_server.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index b0399f8133..2afd689d83 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -966,10 +966,11 @@ class GroupServerStore(SQLBaseStore): return next_id with self._group_updates_id_gen.get_next() as next_id: - yield self.runInteraction( + res = yield self.runInteraction( "register_user_group_membership", _register_user_group_membership_txn, next_id, ) + defer.returnValue(res) @defer.inlineCallbacks def create_group(self, group_id, user_id, name, avatar_url, short_description, From ae8d4bb0f0e39fc10275bdcc69ba08d98497624f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Sep 2017 15:55:18 +0100 Subject: [PATCH 0148/1637] Keep room_id's in group summary --- synapse/groups/groups_server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 94cf9788bb..25f48a11ab 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -109,7 +109,6 @@ class GroupsServerHandler(object): room_id, len(joined_users), with_alias=False, allow_private=True, ) - entry.pop("room_id", None) room_entry["profile"] = entry From bb746a9de109a6c8643cbf21bbd876a67b9f5c9d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Sep 2017 15:57:22 +0100 Subject: [PATCH 0149/1637] Revert: Keep room_id's in group summary --- synapse/groups/groups_server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 25f48a11ab..94cf9788bb 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -109,6 +109,7 @@ class GroupsServerHandler(object): room_id, len(joined_users), with_alias=False, allow_private=True, ) + entry.pop("room_id", None) room_entry["profile"] = entry From e1dec2f1a797122b4d72ba883e09b2d1b9eafcc9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Sep 2017 16:09:57 +0100 Subject: [PATCH 0150/1637] Remove user from group summary when the leave the group --- synapse/storage/group_server.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 2afd689d83..d0b5ad231a 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -822,6 +822,14 @@ class GroupServerStore(SQLBaseStore): "user_id": user_id, }, ) + self._simple_delete_txn( + txn, + table="group_summary_users", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + ) return self.runInteraction("remove_user_from_group", _remove_user_from_group_txn) def add_room_to_group(self, group_id, room_id, is_public): From 3166ed55b23d0939f08337336439d9222117c9e6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 22 Sep 2017 14:44:17 +0100 Subject: [PATCH 0151/1637] Fix device list when rejoining room (#2461) --- synapse/handlers/sync.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index bb78c25ee5..af1b527840 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -980,7 +980,18 @@ class SyncHandler(object): # We want to figure out if we joined the room at some point since # the last sync (even if we have since left). This is to make sure # we do send down the room, and with full state, where necessary + old_state_ids = None + if room_id in joined_room_ids and non_joins: + # Always include if the user (re)joined the room, especially + # important so that device list changes are calculated correctly. + # If there are non join member events, but we are still in the room, + # then the user must have left and joined + newly_joined_rooms.append(room_id) + + # User is in the room so we don't need to do the invite/leave checks + continue + if room_id in joined_room_ids or has_join: old_state_ids = yield self.get_state_at(room_id, since_token) old_mem_ev_id = old_state_ids.get((EventTypes.Member, user_id), None) @@ -992,8 +1003,9 @@ class SyncHandler(object): if not old_mem_ev or old_mem_ev.membership != Membership.JOIN: newly_joined_rooms.append(room_id) - if room_id in joined_room_ids: - continue + # If user is in the room then we don't need to do the invite/leave checks + if room_id in joined_room_ids: + continue if not non_joins: continue From f496399ac4a54410a88d3aba8fe66b54e74bc3cf Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 22 Sep 2017 15:34:14 +0100 Subject: [PATCH 0152/1637] fix thinko'd docstring --- synapse/events/spamcheck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 3eb4eab26a..56fa9e556e 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -19,7 +19,7 @@ def check_event_for_spam(event): If the server considers an event spammy, then it will be rejected if sent by a local user. If it is sent by a user on another server, then - users + users receive a blank event. Args: event (synapse.events.EventBase): the event to be checked From f65e31d22fe9a0b07053ee15004e106ca787048b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 22 Sep 2017 20:26:47 +0100 Subject: [PATCH 0153/1637] Do an AAAA lookup on SRV record targets (#2462) Support SRV records which point at AAAA records, as well as A records. Fixes https://github.com/matrix-org/synapse/issues/2405 --- synapse/http/endpoint.py | 116 ++++++++++++++++++++++++++++++++------- tests/test_dns.py | 26 +++++++-- 2 files changed, 118 insertions(+), 24 deletions(-) diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index d8923c9abb..241b17f2cb 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import socket from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.internet import defer, reactor @@ -30,7 +31,10 @@ logger = logging.getLogger(__name__) SERVER_CACHE = {} - +# our record of an individual server which can be tried to reach a destination. +# +# "host" is actually a dotted-quad or ipv6 address string. Except when there's +# no SRV record, in which case it is the original hostname. _Server = collections.namedtuple( "_Server", "priority weight host port expires" ) @@ -219,9 +223,10 @@ class SRVClientEndpoint(object): return self.default_server else: raise ConnectError( - "Not server available for %s" % self.service_name + "No server available for %s" % self.service_name ) + # look for all servers with the same priority min_priority = self.servers[0].priority weight_indexes = list( (index, server.weight + 1) @@ -231,11 +236,22 @@ class SRVClientEndpoint(object): total_weight = sum(weight for index, weight in weight_indexes) target_weight = random.randint(0, total_weight) - for index, weight in weight_indexes: target_weight -= weight if target_weight <= 0: server = self.servers[index] + # XXX: this looks totally dubious: + # + # (a) we never reuse a server until we have been through + # all of the servers at the same priority, so if the + # weights are A: 100, B:1, we always do ABABAB instead of + # AAAA...AAAB (approximately). + # + # (b) After using all the servers at the lowest priority, + # we move onto the next priority. We should only use the + # second priority if servers at the top priority are + # unreachable. + # del self.servers[index] self.used_servers.append(server) return server @@ -280,26 +296,21 @@ def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=t continue payload = answer.payload - host = str(payload.target) - srv_ttl = answer.ttl - try: - answers, _, _ = yield dns_client.lookupAddress(host) - except DNSNameError: - continue + hosts = yield _get_hosts_for_srv_record( + dns_client, str(payload.target) + ) - for answer in answers: - if answer.type == dns.A and answer.payload: - ip = answer.payload.dottedQuad() - host_ttl = min(srv_ttl, answer.ttl) + for (ip, ttl) in hosts: + host_ttl = min(answer.ttl, ttl) - servers.append(_Server( - host=ip, - port=int(payload.port), - priority=int(payload.priority), - weight=int(payload.weight), - expires=int(clock.time()) + host_ttl, - )) + servers.append(_Server( + host=ip, + port=int(payload.port), + priority=int(payload.priority), + weight=int(payload.weight), + expires=int(clock.time()) + host_ttl, + )) servers.sort() cache[service_name] = list(servers) @@ -317,3 +328,68 @@ def resolve_service(service_name, dns_client=client, cache=SERVER_CACHE, clock=t raise e defer.returnValue(servers) + + +@defer.inlineCallbacks +def _get_hosts_for_srv_record(dns_client, host): + """Look up each of the hosts in a SRV record + + Args: + dns_client (twisted.names.dns.IResolver): + host (basestring): host to look up + + Returns: + Deferred[list[(str, int)]]: a list of (host, ttl) pairs + + """ + ip4_servers = [] + ip6_servers = [] + + def cb(res): + # lookupAddress and lookupIP6Address return a three-tuple + # giving the answer, authority, and additional sections of the + # response. + # + # we only care about the answers. + + return res[0] + + def eb(res): + res.trap(DNSNameError) + return [] + + # no logcontexts here, so we can safely fire these off and gatherResults + d1 = dns_client.lookupAddress(host).addCallbacks(cb, eb) + d2 = dns_client.lookupIPV6Address(host).addCallbacks(cb, eb) + results = yield defer.gatherResults([d1, d2], consumeErrors=True) + + for result in results: + for answer in result: + if not answer.payload: + continue + + try: + if answer.type == dns.A: + ip = answer.payload.dottedQuad() + ip4_servers.append((ip, answer.ttl)) + elif answer.type == dns.AAAA: + ip = socket.inet_ntop( + socket.AF_INET6, answer.payload.address, + ) + ip6_servers.append((ip, answer.ttl)) + else: + # the most likely candidate here is a CNAME record. + # rfc2782 says srvs may not point to aliases. + logger.warn( + "Ignoring unexpected DNS record type %s for %s", + answer.type, host, + ) + continue + except Exception as e: + logger.warn("Ignoring invalid DNS response for %s: %s", + host, e) + continue + + # keep the ipv4 results before the ipv6 results, mostly to match historical + # behaviour. + defer.returnValue(ip4_servers + ip6_servers) diff --git a/tests/test_dns.py b/tests/test_dns.py index c394c57ee7..d08b0f4333 100644 --- a/tests/test_dns.py +++ b/tests/test_dns.py @@ -24,15 +24,17 @@ from synapse.http.endpoint import resolve_service from tests.utils import MockClock +@unittest.DEBUG class DnsTestCase(unittest.TestCase): @defer.inlineCallbacks def test_resolve(self): dns_client_mock = Mock() - service_name = "test_service.examle.com" + service_name = "test_service.example.com" host_name = "example.com" ip_address = "127.0.0.1" + ip6_address = "::1" answer_srv = dns.RRHeader( type=dns.SRV, @@ -48,8 +50,22 @@ class DnsTestCase(unittest.TestCase): ) ) - dns_client_mock.lookupService.return_value = ([answer_srv], None, None) - dns_client_mock.lookupAddress.return_value = ([answer_a], None, None) + answer_aaaa = dns.RRHeader( + type=dns.AAAA, + payload=dns.Record_AAAA( + address=ip6_address, + ) + ) + + dns_client_mock.lookupService.return_value = defer.succeed( + ([answer_srv], None, None), + ) + dns_client_mock.lookupAddress.return_value = defer.succeed( + ([answer_a], None, None), + ) + dns_client_mock.lookupIPV6Address.return_value = defer.succeed( + ([answer_aaaa], None, None), + ) cache = {} @@ -59,10 +75,12 @@ class DnsTestCase(unittest.TestCase): dns_client_mock.lookupService.assert_called_once_with(service_name) dns_client_mock.lookupAddress.assert_called_once_with(host_name) + dns_client_mock.lookupIPV6Address.assert_called_once_with(host_name) - self.assertEquals(len(servers), 1) + self.assertEquals(len(servers), 2) self.assertEquals(servers, cache[service_name]) self.assertEquals(servers[0].host, ip_address) + self.assertEquals(servers[1].host, ip6_address) @defer.inlineCallbacks def test_from_cache_expired_and_dns_fail(self): From 68f737702b5ce90425cbb77a3ce175225bf72086 Mon Sep 17 00:00:00 2001 From: rnbdsh Date: Sun, 24 Sep 2017 04:26:23 +0200 Subject: [PATCH 0154/1637] Remove non-existing files, add stop, use synctl Non-existing files, when running the suggested from https://github.com/matrix-org/synapse#configuring-synapse /etc/synapse/log_config.yaml so the --log-config leads to an error /etc/sysconfig/synapse The environment-file or even the /etc/sysconfig does not exist in arch linux Also instead of calling python2 we use synctl, as this seems to be the proper way to start it, and it gives us a more useful error in the systemctl status. And we now allow stop (and therefore restart). --- contrib/systemd/synapse.service | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/systemd/synapse.service b/contrib/systemd/synapse.service index 92d94b9d58..b71be582c6 100644 --- a/contrib/systemd/synapse.service +++ b/contrib/systemd/synapse.service @@ -9,9 +9,9 @@ Description=Synapse Matrix homeserver Type=simple User=synapse Group=synapse -EnvironmentFile=-/etc/sysconfig/synapse WorkingDirectory=/var/lib/synapse -ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml +ExecStart=/usr/bin/synctl start /etc/synapse/homeserver.yaml +ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml [Install] WantedBy=multi-user.target From b68b0ede7a79c4fe012b239201f71a32d1eb7fd2 Mon Sep 17 00:00:00 2001 From: rnbdsh Date: Sun, 24 Sep 2017 04:55:19 +0200 Subject: [PATCH 0155/1637] Start traditionally, stop synctl Starting with synctl lead to "no config file found" Stopping also leads to some (code=exited, status=1/FAILURE), but at least now we can stop the service. --- contrib/systemd/synapse.service | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/systemd/synapse.service b/contrib/systemd/synapse.service index b71be582c6..3f037055b9 100644 --- a/contrib/systemd/synapse.service +++ b/contrib/systemd/synapse.service @@ -10,8 +10,9 @@ Type=simple User=synapse Group=synapse WorkingDirectory=/var/lib/synapse -ExecStart=/usr/bin/synctl start /etc/synapse/homeserver.yaml +ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml [Install] WantedBy=multi-user.target + From 79b3cf3e02a3816791a8a0674bbac261b46abea9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 25 Sep 2017 09:51:39 +0100 Subject: [PATCH 0156/1637] Fix logcontxt leak in keyclient (#2465) preserve_context_over_function doesn't do what you want it to do. --- synapse/crypto/keyclient.py | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index c2bd64d6c2..f1fd488b90 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -13,14 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. - +from synapse.util import logcontext from twisted.web.http import HTTPClient from twisted.internet.protocol import Factory from twisted.internet import defer, reactor from synapse.http.endpoint import matrix_federation_endpoint -from synapse.util.logcontext import ( - preserve_context_over_fn, preserve_context_over_deferred -) import simplejson as json import logging @@ -43,14 +40,10 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1): for i in range(5): try: - protocol = yield preserve_context_over_fn( - endpoint.connect, factory - ) - server_response, server_certificate = yield preserve_context_over_deferred( - protocol.remote_key - ) - defer.returnValue((server_response, server_certificate)) - return + with logcontext.PreserveLoggingContext(): + protocol = yield endpoint.connect(factory) + server_response, server_certificate = yield protocol.remote_key + defer.returnValue((server_response, server_certificate)) except SynapseKeyClientError as e: logger.exception("Error getting key for %r" % (server_name,)) if e.status.startswith("4"): From ba8fdc925c0d6271d339be8fc27ef3a15a3f01c0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Sep 2017 11:01:31 +0100 Subject: [PATCH 0157/1637] Bump version and changes --- CHANGES.rst | 24 ++++++++++++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index a415944756..2ba396fc23 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,27 @@ +Changes in synapse v0.23.0-rc1 (2017-09-25) +=========================================== + +Changes: + +* Use bcrypt module instead of py-bcrypt (PR #2288) Thanks to @kyrias! +* Improve performance of generating push notifications (PR #2343, #2357, #2365, + #2366, #2371) +* Add a frontend proxy worker (PR #2344) +* Improve DB performance for device list handling in sync (PR #2362) +* Add sample prometheus config (PR #2416) +* Document known to work postgres version (PR #2433) Thanks to @ptman! +* Add support for event_id_only push format (PR #2450) + + +Bug fixes: + +* Fix caching error in the push evaluator (PR #2332) +* Fix bug where pusherpool didn't start and broke some rooms (PR #2342) +* Fix port script for user directory tables (PR #2375) +* Fix device lists notifications when user rejoins a room (PR #2443, #2449) +* Fix sync to always send down current state events in timeline (PR #2451) + + Changes in synapse v0.22.1 (2017-07-06) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index dbf22eca00..30f78c11d1 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.22.1" +__version__ = "0.23.0-rc1" From b15c2b7971b582c7e5ec136a01715d8e860bfe30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Sep 2017 11:34:12 +0100 Subject: [PATCH 0158/1637] Update CHANGES --- CHANGES.rst | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 2ba396fc23..b7abe32519 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,16 +1,22 @@ Changes in synapse v0.23.0-rc1 (2017-09-25) =========================================== +Features: + +* Add a frontend proxy worker (PR #2344) +* Add support for event_id_only push format (PR #2450) +* Add a PoC for filtering spammy events (PR #2456) +* Add a config option to block all room invites (PR #2457) + + Changes: * Use bcrypt module instead of py-bcrypt (PR #2288) Thanks to @kyrias! * Improve performance of generating push notifications (PR #2343, #2357, #2365, #2366, #2371) -* Add a frontend proxy worker (PR #2344) * Improve DB performance for device list handling in sync (PR #2362) -* Add sample prometheus config (PR #2416) +* Include a sample prometheus config (PR #2416) * Document known to work postgres version (PR #2433) Thanks to @ptman! -* Add support for event_id_only push format (PR #2450) Bug fixes: @@ -20,6 +26,8 @@ Bug fixes: * Fix port script for user directory tables (PR #2375) * Fix device lists notifications when user rejoins a room (PR #2443, #2449) * Fix sync to always send down current state events in timeline (PR #2451) +* Fix bug where guest users were incorrectly kicked (PR #2453) +* Fix bug talking to IPv6 only servers using SRV records (PR #2462) Changes in synapse v0.22.1 (2017-07-06) From 7141f1a5cc40a6b2d76edacfdc66fe656565666c Mon Sep 17 00:00:00 2001 From: Max Dor Date: Mon, 25 Sep 2017 16:20:23 +0200 Subject: [PATCH 0159/1637] Clarify recommended network setup --- README.rst | 39 ++++++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/README.rst b/README.rst index 4491b45181..8ca1e25d43 100644 --- a/README.rst +++ b/README.rst @@ -200,19 +200,21 @@ different. See `the spec`__ for more information on key management.) .. __: `key_management`_ The default configuration exposes two HTTP ports: 8008 and 8448. Port 8008 is -configured without TLS; it is not recommended this be exposed outside your -local network. Port 8448 is configured to use TLS with a self-signed -certificate. This is fine for testing with but, to avoid your clients -complaining about the certificate, you will almost certainly want to use -another certificate for production purposes. (Note that a self-signed +configured without TLS; it should be behind a reverse proxy for TLS/SSL +termination on port 443 which in turn should be used for clients. Port 8448 +is configured to use TLS with a self-signed certificate. If you would like +to do initial test with a client without having to setup a reverse proxy, +you can temporarly use another certificate. (Note that a self-signed certificate is fine for `Federation`_). You can do so by changing ``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path`` -in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure -to read `Using a reverse proxy with Synapse`_ when doing so. +in ``homeserver.yaml``; Apart from port 8448 using TLS, both ports are the same in the default configuration. +See https://github.com/matrix-org/synapse/issues/2438 for the recommended +production configuration. + Registering a user ------------------ @@ -283,10 +285,16 @@ Connecting to Synapse from a client The easiest way to try out your new Synapse installation is by connecting to it from a web client. The easiest option is probably the one at http://riot.im/app. You will need to specify a "Custom server" when you log on -or register: set this to ``https://localhost:8448`` - remember to specify the -port (``:8448``) unless you changed the configuration. (Leave the identity +or register: set this to ``https://domain.tld`` if you setup a reverse proxy +following the recommended setup, or ``https://localhost:8448`` - remember to specify the +port (``:8448``) if not ``:443`` unless you changed the configuration. (Leave the identity server as the default - see `Identity servers`_.) +If using port 8448 you will run into errors until you accept the self-signed +certificate. You can easily do this by going to ``https://localhost:8448`` +directly with your browser and accept the presented certificate. You can then +go back in your web client and proceed further. + If all goes well you should at least be able to log in, create a room, and start sending messages. @@ -593,8 +601,9 @@ you to run your server on a machine that might not have the same name as your domain name. For example, you might want to run your server at ``synapse.example.com``, but have your Matrix user-ids look like ``@user:example.com``. (A SRV record also allows you to change the port from -the default 8448. However, if you are thinking of using a reverse-proxy, be -sure to read `Reverse-proxying the federation port`_ first.) +the default 8448. However, if you are thinking of using a reverse-proxy on the +federation port, which is highly not recommended, be sure to read +`Reverse-proxying the federation port`_ first.) To use a SRV record, first create your SRV record and publish it in DNS. This should have the format ``_matrix._tcp. IN SRV 10 0 @@ -674,7 +683,7 @@ For information on how to install and use PostgreSQL, please see Using a reverse proxy with Synapse ================================== -It is possible to put a reverse proxy such as +It is recommended to put a reverse proxy such as `nginx `_, `Apache `_ or `HAProxy `_ in front of Synapse. One advantage of @@ -692,9 +701,9 @@ federation port has a number of pitfalls. It is possible, but be sure to read `Reverse-proxying the federation port`_. The recommended setup is therefore to configure your reverse-proxy on port 443 -for client connections, but to also expose port 8448 for server-server -connections. All the Matrix endpoints begin ``/_matrix``, so an example nginx -configuration might look like:: +to port 8008 of synapse for client connections, but to also directly expose port +8448 for server-server connections. All the Matrix endpoints begin ``/_matrix``, +so an example nginx configuration might look like:: server { listen 443 ssl; From e591f7b3f06ba4de55c439e0741b4fe4ef445556 Mon Sep 17 00:00:00 2001 From: Max Dor Date: Mon, 25 Sep 2017 16:42:26 +0200 Subject: [PATCH 0160/1637] Include review feedback --- README.rst | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/README.rst b/README.rst index 8ca1e25d43..9da8c7f7a8 100644 --- a/README.rst +++ b/README.rst @@ -207,14 +207,12 @@ to do initial test with a client without having to setup a reverse proxy, you can temporarly use another certificate. (Note that a self-signed certificate is fine for `Federation`_). You can do so by changing ``tls_certificate_path``, ``tls_private_key_path`` and ``tls_dh_params_path`` -in ``homeserver.yaml``; +in ``homeserver.yaml``; alternatively, you can use a reverse-proxy, but be sure +to read `Using a reverse proxy with Synapse`_ when doing so. Apart from port 8448 using TLS, both ports are the same in the default configuration. -See https://github.com/matrix-org/synapse/issues/2438 for the recommended -production configuration. - Registering a user ------------------ @@ -602,7 +600,7 @@ domain name. For example, you might want to run your server at ``synapse.example.com``, but have your Matrix user-ids look like ``@user:example.com``. (A SRV record also allows you to change the port from the default 8448. However, if you are thinking of using a reverse-proxy on the -federation port, which is highly not recommended, be sure to read +federation port, which is not recommended, be sure to read `Reverse-proxying the federation port`_ first.) To use a SRV record, first create your SRV record and publish it in DNS. This From e3edca3b5d23e52d4b51afe5fa9fe2da79f09700 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Sep 2017 17:35:39 +0100 Subject: [PATCH 0161/1637] Refactor to speed up incremental syncs --- synapse/handlers/sync.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index af1b527840..dd0ec00ae6 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -293,11 +293,6 @@ class SyncHandler(object): timeline_limit = sync_config.filter_collection.timeline_limit() block_all_timeline = sync_config.filter_collection.blocks_all_room_timeline() - # Pull out the current state, as we always want to include those events - # in the timeline if they're there. - current_state_ids = yield self.state.get_current_state_ids(room_id) - current_state_ids = frozenset(current_state_ids.itervalues()) - if recents is None or newly_joined_room or timeline_limit < len(recents): limited = True else: @@ -305,6 +300,15 @@ class SyncHandler(object): if recents: recents = sync_config.filter_collection.filter_room_timeline(recents) + + # We check if there are any state events, if there are then we pass + # all current state events to the filter_events function. This is to + # ensure that we always include current state in the timeline + current_state_ids = frozenset() + if any(e.is_state() for e in recents): + current_state_ids = yield self.state.get_current_state_ids(room_id) + current_state_ids = frozenset(current_state_ids.itervalues()) + recents = yield filter_events_for_client( self.store, sync_config.user.to_string(), @@ -341,6 +345,15 @@ class SyncHandler(object): loaded_recents = sync_config.filter_collection.filter_room_timeline( events ) + + # We check if there are any state events, if there are then we pass + # all current state events to the filter_events function. This is to + # ensure that we always include current state in the timeline + current_state_ids = frozenset() + if any(e.is_state() for e in loaded_recents): + current_state_ids = yield self.state.get_current_state_ids(room_id) + current_state_ids = frozenset(current_state_ids.itervalues()) + loaded_recents = yield filter_events_for_client( self.store, sync_config.user.to_string(), From f4c8cd5e85192bb7bf1f979ac6e1a0134766763f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Sep 2017 10:02:48 +0100 Subject: [PATCH 0162/1637] Bump changelog and version --- CHANGES.rst | 8 ++++++++ synapse/__init__.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index b7abe32519..6291fedb9a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,11 @@ +Changes in synapse v0.23.0-rc2 (2017-09-26) +=========================================== + +Bug fixes: + +* Fix regression in performance of syncs (PR #2470) + + Changes in synapse v0.23.0-rc1 (2017-09-25) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index 30f78c11d1..ec83e6adb7 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.23.0-rc1" +__version__ = "0.23.0-rc2" From 95298783bb86e13da56e07fa3f73b6e2de053c08 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Sep 2017 11:04:37 +0100 Subject: [PATCH 0163/1637] Add is_publicised to group summary --- synapse/handlers/groups_local.py | 50 ++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index b4833f8ef8..14fdf06b58 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -96,32 +96,38 @@ class GroupsLocalHandler(object): res = yield self.groups_server_handler.get_group_summary( group_id, requester_user_id ) - defer.returnValue(res) + else: + res = yield self.transport_client.get_group_summary( + get_domain_from_id(group_id), group_id, requester_user_id, + ) - res = yield self.transport_client.get_group_summary( - get_domain_from_id(group_id), group_id, requester_user_id, - ) + # Loop through the users and validate the attestations. + chunk = res["users_section"]["users"] + valid_users = [] + for entry in chunk: + g_user_id = entry["user_id"] + attestation = entry.pop("attestation") + try: + yield self.attestations.verify_attestation( + attestation, + group_id=group_id, + user_id=g_user_id, + ) + valid_users.append(entry) + except Exception as e: + logger.info("Failed to verify user is in group: %s", e) - # Loop through the users and validate the attestations. - chunk = res["users_section"]["users"] - valid_users = [] - for entry in chunk: - g_user_id = entry["user_id"] - attestation = entry.pop("attestation") - try: - yield self.attestations.verify_attestation( - attestation, - group_id=group_id, - user_id=g_user_id, - ) - valid_users.append(entry) - except Exception as e: - logger.info("Failed to verify user is in group: %s", e) + res["users_section"]["users"] = valid_users - res["users_section"]["users"] = valid_users + res["users_section"]["users"].sort(key=lambda e: e.get("order", 0)) + res["rooms_section"]["rooms"].sort(key=lambda e: e.get("order", 0)) - res["users_section"]["users"].sort(key=lambda e: e.get("order", 0)) - res["rooms_section"]["rooms"].sort(key=lambda e: e.get("order", 0)) + # Add `is_publicised` flag to indicate whether the user has publicised their + # membership of the group on their profile + result = yield self.store.get_publicised_groups_for_user(requester_user_id) + is_publicised = group_id in result + + res.setdefault("user", {})["is_publicised"] = is_publicised defer.returnValue(res) From a8e2a3df32f3584d728021d5feafecf78b0f37d1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Sep 2017 15:39:13 +0100 Subject: [PATCH 0164/1637] Add unique index to group_rooms table --- synapse/groups/groups_server.py | 2 -- synapse/storage/schema/delta/43/group_server.sql | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 94cf9788bb..699d8a5265 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -466,8 +466,6 @@ class GroupsServerHandler(object): group_id, and_exists=True, and_is_admin=requester_user_id ) - # TODO: Check if room has already been added - is_public = _parse_visibility_from_contents(content) yield self.store.add_room_to_group(group_id, room_id, is_public=is_public) diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/43/group_server.sql index e74554381f..b2333848a0 100644 --- a/synapse/storage/schema/delta/43/group_server.sql +++ b/synapse/storage/schema/delta/43/group_server.sql @@ -52,7 +52,7 @@ CREATE TABLE group_rooms ( is_public BOOLEAN NOT NULL -- whether the room can be seen by everyone ); -CREATE INDEX groups_rooms_g_idx ON group_rooms(group_id, room_id); +CREATE UNIQUE INDEX groups_rooms_g_idx ON group_rooms(group_id, room_id); CREATE INDEX groups_rooms_r_idx ON group_rooms(room_id); From 17b8e2bd02ad0abbd25103b637eb8490f3a53507 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Sep 2017 15:52:41 +0100 Subject: [PATCH 0165/1637] Add remove room API --- synapse/federation/transport/client.py | 12 ++++++++++++ synapse/federation/transport/server.py | 14 +++++++++++++- synapse/groups/groups_server.py | 12 ++++++++++++ synapse/handlers/groups_local.py | 1 + synapse/rest/client/v2_alpha/groups.py | 11 +++++++++++ synapse/storage/group_server.py | 23 +++++++++++++++++++++++ 6 files changed, 72 insertions(+), 1 deletion(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index ce68cc4937..36f6eb75e9 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -525,6 +525,18 @@ class TransportLayerClient(object): ignore_backoff=True, ) + def remove_room_from_group(self, destination, group_id, requester_user_id, room_id): + """Remove a room from a group + """ + path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) + + return self.client.delete_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + @log_function def get_users_in_group(self, destination, group_id, requester_user_id): """Get users in a group diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index b5f07c50bf..c7565e0737 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -674,7 +674,7 @@ class FederationGroupsRoomsServlet(BaseFederationServlet): class FederationGroupsAddRoomsServlet(BaseFederationServlet): - """Add room to group + """Add/remove room from group """ PATH = "/groups/(?P[^/]*)/room/(?)$" @@ -690,6 +690,18 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): defer.returnValue((200, new_content)) + @defer.inlineCallbacks + def on_DELETE(self, origin, content, query, group_id, room_id): + requester_user_id = parse_string_from_args(query, "requester_user_id") + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.remove_room_from_group( + group_id, requester_user_id, room_id, + ) + + defer.returnValue((200, new_content)) + class FederationGroupsUsersServlet(BaseFederationServlet): """Get the users in a group on behalf of a user diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 699d8a5265..10bf61d178 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -472,6 +472,18 @@ class GroupsServerHandler(object): defer.returnValue({}) + @defer.inlineCallbacks + def remove_room_from_group(self, group_id, requester_user_id, room_id): + """Remove room from group + """ + yield self.check_group_is_ours( + group_id, and_exists=True, and_is_admin=requester_user_id + ) + + yield self.store.remove_room_from_group(group_id, room_id) + + defer.returnValue({}) + @defer.inlineCallbacks def invite_to_group(self, group_id, user_id, requester_user_id, content): """Invite user to group diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 14fdf06b58..a2bacbfc38 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -69,6 +69,7 @@ class GroupsLocalHandler(object): get_rooms_in_group = _create_rerouter("get_rooms_in_group") add_room_to_group = _create_rerouter("add_room_to_group") + remove_room_from_group = _create_rerouter("remove_room_from_group") update_group_summary_room = _create_rerouter("update_group_summary_room") delete_group_summary_room = _create_rerouter("delete_group_summary_room") diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index b469058e9d..8f3ce15b02 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -423,6 +423,17 @@ class GroupAdminRoomsServlet(RestServlet): defer.returnValue((200, result)) + @defer.inlineCallbacks + def on_DELETE(self, request, group_id, room_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + result = yield self.groups_handler.remove_room_from_group( + group_id, user_id, room_id, + ) + + defer.returnValue((200, result)) + class GroupAdminUsersInviteServlet(RestServlet): """Invite a user to the group diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index d0b5ad231a..4fe9172adc 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -843,6 +843,29 @@ class GroupServerStore(SQLBaseStore): desc="add_room_to_group", ) + def remove_room_from_group(self, group_id, room_id): + def _remove_room_from_group_txn(txn): + self._simple_delete_txn( + txn, + table="group_rooms", + keyvalues={ + "group_id": group_id, + "room_id": room_id, + }, + ) + + self._simple_delete_txn( + txn, + table="group_summary_rooms", + keyvalues={ + "group_id": group_id, + "room_id": room_id, + }, + ) + return self.runInteraction( + "remove_room_from_group", _remove_room_from_group_txn, + ) + def get_publicised_groups_for_user(self, user_id): """Get all groups a user is publicising """ From 4824a33c31c32a055fc5b8ff4d1197c0bd3933c5 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 26 Sep 2017 17:51:26 +0100 Subject: [PATCH 0166/1637] Factor out module loading to a separate place So it can be reused --- synapse/config/password_auth_providers.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py index 83762d089a..90824cab7f 100644 --- a/synapse/config/password_auth_providers.py +++ b/synapse/config/password_auth_providers.py @@ -15,13 +15,15 @@ from ._base import Config, ConfigError -import importlib +from synapse.util.module_loader import load_module class PasswordAuthProviderConfig(Config): def read_config(self, config): self.password_providers = [] + provider_config = None + # We want to be backwards compatible with the old `ldap_config` # param. ldap_config = config.get("ldap_config", {}) @@ -38,19 +40,15 @@ class PasswordAuthProviderConfig(Config): if provider['module'] == "synapse.util.ldap_auth_provider.LdapAuthProvider": from ldap_auth_provider import LdapAuthProvider provider_class = LdapAuthProvider + try: + provider_config = provider_class.parse_config(provider["config"]) + except Exception as e: + raise ConfigError( + "Failed to parse config for %r: %r" % (provider['module'], e) + ) else: - # We need to import the module, and then pick the class out of - # that, so we split based on the last dot. - module, clz = provider['module'].rsplit(".", 1) - module = importlib.import_module(module) - provider_class = getattr(module, clz) + (provider_class, provider_config) = load_module(provider) - try: - provider_config = provider_class.parse_config(provider["config"]) - except Exception as e: - raise ConfigError( - "Failed to parse config for %r: %r" % (provider['module'], e) - ) self.password_providers.append((provider_class, provider_config)) def default_config(self, **kwargs): From 0b03a9770829247055fe8eaf66c24bb1892a3c50 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 26 Sep 2017 17:56:41 +0100 Subject: [PATCH 0167/1637] Add module_loader.py --- synapse/util/module_loader.py | 41 +++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 synapse/util/module_loader.py diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py new file mode 100644 index 0000000000..b4464790ee --- /dev/null +++ b/synapse/util/module_loader.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib + +from synapse.config._base import ConfigError + +def load_module(provider): + """ Loads a module with its config + Take a dict with keys 'module' (the module name) and 'config' + (the config dict). + + Returns + Tuple of (provider class, parsed config object) + """ + # We need to import the module, and then pick the class out of + # that, so we split based on the last dot. + module, clz = provider['module'].rsplit(".", 1) + module = importlib.import_module(module) + provider_class = getattr(module, clz) + + try: + provider_config = provider_class.parse_config(provider["config"]) + except Exception as e: + raise ConfigError( + "Failed to parse config for %r: %r" % (provider['module'], e) + ) + + return (provider_class, provider_config) From 9fd086e506ae3cb3db7f1b1c7317c7602a4d71e3 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 26 Sep 2017 17:59:46 +0100 Subject: [PATCH 0168/1637] unnecessary parens --- synapse/util/module_loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index b4464790ee..4b51d7a77b 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -38,4 +38,4 @@ def load_module(provider): "Failed to parse config for %r: %r" % (provider['module'], e) ) - return (provider_class, provider_config) + return provider_class, provider_config From 6cd5fcd5366cfef4959d107e818d0e20d78aa483 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 26 Sep 2017 19:20:23 +0100 Subject: [PATCH 0169/1637] Make the spam checker a module --- synapse/config/homeserver.py | 4 ++- synapse/events/spamcheck.py | 37 +++++++++++++++------------ synapse/federation/federation_base.py | 5 ++-- synapse/handlers/message.py | 5 ++-- synapse/server.py | 5 ++++ 5 files changed, 33 insertions(+), 23 deletions(-) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index b22cacf8dc..3f9d9d5f8b 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -34,6 +34,7 @@ from .password_auth_providers import PasswordAuthProviderConfig from .emailconfig import EmailConfig from .workers import WorkerConfig from .push import PushConfig +from .spam_checker import SpamCheckerConfig class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, @@ -41,7 +42,8 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig, AppServiceConfig, KeyConfig, SAML2Config, CasConfig, JWTConfig, PasswordConfig, EmailConfig, - WorkerConfig, PasswordAuthProviderConfig, PushConfig,): + WorkerConfig, PasswordAuthProviderConfig, PushConfig, + SpamCheckerConfig,): pass diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 56fa9e556e..7b22b3413a 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -13,26 +13,29 @@ # See the License for the specific language governing permissions and # limitations under the License. +class SpamChecker(object): + def __init__(self, hs): + self.spam_checker = None -def check_event_for_spam(event): - """Checks if a given event is considered "spammy" by this server. + if hs.config.spam_checker is not None: + module, config = hs.config.spam_checker + print("cfg %r", config) + self.spam_checker = module(config=config) - If the server considers an event spammy, then it will be rejected if - sent by a local user. If it is sent by a user on another server, then - users receive a blank event. + def check_event_for_spam(self, event): + """Checks if a given event is considered "spammy" by this server. - Args: - event (synapse.events.EventBase): the event to be checked + If the server considers an event spammy, then it will be rejected if + sent by a local user. If it is sent by a user on another server, then + users receive a blank event. - Returns: - bool: True if the event is spammy. - """ - if not hasattr(event, "content") or "body" not in event.content: - return False + Args: + event (synapse.events.EventBase): the event to be checked - # for example: - # - # if "the third flower is green" in event.content["body"]: - # return True + Returns: + bool: True if the event is spammy. + """ + if self.spam_checker is None: + return False - return False + return self.spam_checker.check_event_for_spam(event) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index babd9ea078..a0f5d40eb3 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -16,7 +16,6 @@ import logging from synapse.api.errors import SynapseError from synapse.crypto.event_signing import check_event_content_hash -from synapse.events import spamcheck from synapse.events.utils import prune_event from synapse.util import unwrapFirstError, logcontext from twisted.internet import defer @@ -26,7 +25,7 @@ logger = logging.getLogger(__name__) class FederationBase(object): def __init__(self, hs): - pass + self.spam_checker = hs.get_spam_checker() @defer.inlineCallbacks def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False, @@ -144,7 +143,7 @@ class FederationBase(object): ) return redacted - if spamcheck.check_event_for_spam(pdu): + if self.spam_checker.check_event_for_spam(pdu): logger.warn( "Event contains spam, redacting %s: %s", pdu.event_id, pdu.get_pdu_json() diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index da18bf23db..37f0a2772a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.events import spamcheck from twisted.internet import defer from synapse.api.constants import EventTypes, Membership @@ -58,6 +57,8 @@ class MessageHandler(BaseHandler): self.action_generator = hs.get_action_generator() + self.spam_checker = hs.get_spam_checker() + @defer.inlineCallbacks def purge_history(self, room_id, event_id): event = yield self.store.get_event(event_id) @@ -322,7 +323,7 @@ class MessageHandler(BaseHandler): txn_id=txn_id ) - if spamcheck.check_event_for_spam(event): + if self.spam_checker.check_event_for_spam(event): raise SynapseError( 403, "Spam is not permitted here", Codes.FORBIDDEN ) diff --git a/synapse/server.py b/synapse/server.py index a38e5179e0..4d44af745e 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -31,6 +31,7 @@ from synapse.appservice.api import ApplicationServiceApi from synapse.appservice.scheduler import ApplicationServiceScheduler from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory +from synapse.events.spamcheck import SpamChecker from synapse.federation import initialize_http_replication from synapse.federation.send_queue import FederationRemoteSendQueue from synapse.federation.transport.client import TransportLayerClient @@ -139,6 +140,7 @@ class HomeServer(object): 'read_marker_handler', 'action_generator', 'user_directory_handler', + 'spam_checker', ] def __init__(self, hostname, **kwargs): @@ -309,6 +311,9 @@ class HomeServer(object): def build_user_directory_handler(self): return UserDirectoyHandler(self) + def build_spam_checker(self): + return SpamChecker(self) + def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) From 8ad5f34908df99804b27bd045fde5b9d5625d784 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 26 Sep 2017 19:21:41 +0100 Subject: [PATCH 0170/1637] pep8 --- synapse/util/module_loader.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index 4b51d7a77b..4288312b8a 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -17,6 +17,7 @@ import importlib from synapse.config._base import ConfigError + def load_module(provider): """ Loads a module with its config Take a dict with keys 'module' (the module name) and 'config' From 1786b0e768877a608a6f44a6a37cc36e598eda4e Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 27 Sep 2017 10:22:54 +0100 Subject: [PATCH 0171/1637] Forgot the new file again :( --- synapse/config/spam_checker.py | 35 ++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 synapse/config/spam_checker.py diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py new file mode 100644 index 0000000000..3fec42bdb0 --- /dev/null +++ b/synapse/config/spam_checker.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util.module_loader import load_module + +from ._base import Config + + +class SpamCheckerConfig(Config): + def read_config(self, config): + self.spam_checker = None + + provider = config.get("spam_checker", None) + if provider is not None: + self.spam_checker = load_module(provider) + + def default_config(self, **kwargs): + return """\ + # spam_checker: + # module: "my_custom_project.SuperSpamChecker" + # config: + # example_option: 'things' + """ From 60c78666abbf82c3adfaa3bb4faf86f867eb18ea Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 27 Sep 2017 10:26:13 +0100 Subject: [PATCH 0172/1637] pep8 --- synapse/events/spamcheck.py | 1 + synapse/util/module_loader.py | 1 + 2 files changed, 2 insertions(+) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 7b22b3413a..a876bcb816 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + class SpamChecker(object): def __init__(self, hs): self.spam_checker = None diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index 4b51d7a77b..4288312b8a 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -17,6 +17,7 @@ import importlib from synapse.config._base import ConfigError + def load_module(provider): """ Loads a module with its config Take a dict with keys 'module' (the module name) and 'config' From 8c06dd607165c109de23aa41098a8b45e02dcbd8 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 27 Sep 2017 10:31:14 +0100 Subject: [PATCH 0173/1637] Remove unintentional debugging --- synapse/events/spamcheck.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index a876bcb816..8ddbf2ca38 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -20,7 +20,6 @@ class SpamChecker(object): if hs.config.spam_checker is not None: module, config = hs.config.spam_checker - print("cfg %r", config) self.spam_checker = module(config=config) def check_event_for_spam(self, event): From ef3a5ae787e2fa25cc753b7c5dc9f31ba3bf4316 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 27 Sep 2017 11:24:19 +0100 Subject: [PATCH 0174/1637] Don't test is spam_checker not None Sometimes it's a Mock object which is not none but is still not what we're after --- synapse/events/spamcheck.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 8ddbf2ca38..e739f105b2 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -18,8 +18,14 @@ class SpamChecker(object): def __init__(self, hs): self.spam_checker = None - if hs.config.spam_checker is not None: + module = None + config = None + try: module, config = hs.config.spam_checker + except: + pass + + if module is not None: self.spam_checker = module(config=config) def check_event_for_spam(self, event): From adec03395d1c9a8e237a74ea420966bae8ea0002 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Sep 2017 15:01:25 +0100 Subject: [PATCH 0175/1637] Fix bug where /joined_members didn't check user was in room --- synapse/handlers/message.py | 31 +++++++++++++++++++++++++++++++ synapse/rest/client/v1/room.py | 17 +++++++---------- 2 files changed, 38 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 37f0a2772a..f6740544c1 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -419,6 +419,37 @@ class MessageHandler(BaseHandler): [serialize_event(c, now) for c in room_state.values()] ) + @defer.inlineCallbacks + def get_joined_members(self, user_id, room_id): + """Get all the joined members in the room and their profile information. + + If the user has left the room return the state events from when they left. + + Args: + user_id(str): The user requesting state events. + room_id(str): The room ID to get all state events from. + Returns: + A dict of user_id to profile info + """ + membership, membership_event_id = yield self._check_in_room_or_world_readable( + room_id, user_id + ) + + if membership == Membership.JOIN: + users_with_profile = yield self.state.get_current_user_in_room(room_id) + else: + raise NotImplementedError( + "Getting joined members after leaving is not implemented" + ) + + defer.returnValue({ + user_id: { + "avatar_url": profile.avatar_url, + "display_name": profile.display_name, + } + for user_id, profile in users_with_profile.iteritems() + }) + @measure_func("_create_new_client_event") @defer.inlineCallbacks def _create_new_client_event(self, builder, requester=None, prev_event_ids=None): diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index cd388770c8..4be0fee38d 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -398,22 +398,19 @@ class JoinedRoomMemberListRestServlet(ClientV1RestServlet): def __init__(self, hs): super(JoinedRoomMemberListRestServlet, self).__init__(hs) - self.state = hs.get_state_handler() + self.message_handler = hs.get_handlers().message_handler @defer.inlineCallbacks def on_GET(self, request, room_id): - yield self.auth.get_user_by_req(request) + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() - users_with_profile = yield self.state.get_current_user_in_room(room_id) + users_with_profile = yield self.message_handler.get_joined_members( + user_id, room_id, + ) defer.returnValue((200, { - "joined": { - user_id: { - "avatar_url": profile.avatar_url, - "display_name": profile.display_name, - } - for user_id, profile in users_with_profile.iteritems() - } + "joined": users_with_profile, })) From 8090fd4664de87bad636ace6774dad8c33bd5276 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 10:09:32 +0100 Subject: [PATCH 0176/1637] Fix /joined_members to work with AS users --- synapse/handlers/message.py | 36 +++++++++++++++++++++++----------- synapse/rest/client/v1/room.py | 3 +-- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index f6740544c1..ca8c6c55bb 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -420,27 +420,41 @@ class MessageHandler(BaseHandler): ) @defer.inlineCallbacks - def get_joined_members(self, user_id, room_id): + def get_joined_members(self, requester, room_id): """Get all the joined members in the room and their profile information. If the user has left the room return the state events from when they left. Args: - user_id(str): The user requesting state events. + requester(Requester): The user requesting state events. room_id(str): The room ID to get all state events from. Returns: A dict of user_id to profile info """ - membership, membership_event_id = yield self._check_in_room_or_world_readable( - room_id, user_id - ) - - if membership == Membership.JOIN: - users_with_profile = yield self.state.get_current_user_in_room(room_id) - else: - raise NotImplementedError( - "Getting joined members after leaving is not implemented" + user_id = requester.user.to_string() + if not requester.app_service: + # We check AS auth after fetching the room membership, as it + # requires us to pull out all joined members anyway. + membership, _ = yield self._check_in_room_or_world_readable( + room_id, user_id ) + if membership != Membership.JOIN: + raise NotImplementedError( + "Getting joined members after leaving is not implemented" + ) + + users_with_profile = yield self.state.get_current_user_in_room(room_id) + + # If this is an AS, double check that they are allowed to see the members. + # This can either be because the AS user is in the room or becuase there + # is a user in the room that the AS is "interested in" + if requester.app_service and user_id not in users_with_profile: + for uid in users_with_profile: + if requester.app_service.is_interested_in_user(uid): + break + else: + # Loop fell through, AS has no interested users in room + raise AuthError(403, "Appservice not in room") defer.returnValue({ user_id: { diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 4be0fee38d..6c379d53ac 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -403,10 +403,9 @@ class JoinedRoomMemberListRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, room_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() users_with_profile = yield self.message_handler.get_joined_members( - user_id, room_id, + requester, room_id, ) defer.returnValue((200, { From 95e02b856b2ac1409dc0d5575ee0b7be55105e9a Mon Sep 17 00:00:00 2001 From: Robert Swain Date: Thu, 28 Sep 2017 12:12:47 +0200 Subject: [PATCH 0177/1637] docker: Initial Dockerfile and docker-compose.yaml --- Dockerfile | 58 ++++++++++++++++++ docker/README.md | 70 ++++++++++++++++++++++ docker/docker-compose.yaml | 39 ++++++++++++ docker/rootfs/etc/service/synapse/finish | 17 ++++++ docker/rootfs/etc/service/synapse/run | 75 ++++++++++++++++++++++++ 5 files changed, 259 insertions(+) create mode 100644 Dockerfile create mode 100644 docker/README.md create mode 100644 docker/docker-compose.yaml create mode 100755 docker/rootfs/etc/service/synapse/finish create mode 100755 docker/rootfs/etc/service/synapse/run diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..9b11a143f6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,58 @@ +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM phusion/baseimage:0.9.22 + +COPY ./ /synapse/source/ + +RUN apt-get update -y \ + && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + libffi-dev \ + libjpeg-dev \ + libpq-dev \ + libssl-dev \ + libxslt1-dev \ + python-pip \ + python-setuptools \ + python-virtualenv \ + python2.7-dev \ + sqlite3 \ + && virtualenv -p python2.7 /synapse \ + && . /synapse/bin/activate \ + && pip install --upgrade pip \ + && pip install --upgrade setuptools \ + && pip install --upgrade psycopg2 \ + && cd /synapse/source \ + && pip install --upgrade ./ \ + && cd / \ + && rm -rf /synapse/source \ + && apt-get autoremove -y \ + build-essential \ + libffi-dev \ + libjpeg-dev \ + libpq-dev \ + libssl-dev \ + libxslt1-dev \ + python2.7-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +COPY docker/rootfs/ / + +VOLUME /synapse/config/ +VOLUME /synapse/data/ + +CMD ["/sbin/my_init"] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000000..c9e6fd216d --- /dev/null +++ b/docker/README.md @@ -0,0 +1,70 @@ +# Synapse Docker + +## Build + +Build the docker image with the `docker build` command from the root of the synapse repository. + +``` +docker build -t matrix-org/synapse:v0.22.1 . +``` + +The `-t` option sets the image tag. Official images are tagged `matrix-org/synapse:` where `` is the same as the release tag in the synapse git repository. + +## Configure + +Synapse provides a command for generating homeserver configuration files. These are a good starting point for setting up your own deployment. + +The documentation below will refer to a `CONFIG_PATH` shell variable. This is a path to a directory where synapse configuration will be stored. It needs to be mapped into the container as a volume at `/synapse/config/` as can be seen in the example `docker run` command. + +Docker container environment variables: +* `GENERATE_CONFIG` - Set this to any non-empty string, such as `yes`, to trigger generation of configuration files. Existing files in the `CONFIG_PATH` will **not** be overwritten. +* `POSTGRES_DATABASE` - The database name for the synapse postgres database. [default: `synapse`] +* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `postgres` which is useful when using a container on the same docker network in a compose file where the postgres service is called `postgres`] **NOTE**: `localhost` and `127.0.0.1` refer to the container itself unless running the container with `host` networking. +* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy. +* `POSTGRES_USER` - The user for the synapse postgres database. [default: `postgres`] +* `REPORT_STATS` - Whether to send anonymous usage statistics back to the Matrix project which helps us to get funding! Must be `yes` or `no`. [default: `yes`] +* `SERVER_NAME` - The domain used for the Matrix homeserver. If you intend to run this synapse instance on a public domain, use that domain. [default: `localhost`] + +``` +CONFIG_PATH=/my/magical/config/path/ +mkdir -p ${CONFIG_PATH} +docker run \ + --rm \ + -e GENERATE_CONFIG=yes \ + -e POSTGRES_PASSWORD=MyVerySecretPassword \ + -e REPORT_STATS=yes \ + -e SERVER_NAME=example.com \ + -v ${CONFIG_PATH}:/synapse/config/ \ + matrix-org/synapse:develop +``` + +This will create a temporary container from the image and use the synapse code for generating configuration files and TLS keys and certificates for the specified `SERVER_NAME` domain. The files are written to `CONFIG_PATH`. + +## Run + +**NOTE**: If you are not using postgresql and are using sqlite3 as your database, you will need to make a directory to store the sqlite3 database file in and then mount this volume into the container at `/synapse/data/`. As it is so easy to use postgresql, when using Docker containers, this is not documented to somewhat discourage it. Choose a `POSTGRES_PASSWORD` instead. + +### Docker Compose + +A `docker-compose.yaml` file is included to ease deployment of the basic synapse and postgres setup. Remember to set a `POSTGRES_PASSWORD` when generating your configuration above. You will need it for running the containers in the composition. + +From the `docker/` subdirectory of the synapse repository: +``` +CONFIG_PATH=/my/magical/config/path/ +POSTGRES_PASSWORD=MyVerySecretPassword \ +docker-compose \ + -p synapse \ + up -d +``` + +### Docker + +Note that the following is just a guideline and you may need to add parameters to the docker run command to account for the network situation with your postgres database. + +``` +docker run \ + -d \ + --name synapse \ + -v ${CONFIG_PATH}:/synapse/config/ \ + matrix-org/synapse:v0.22.1 +``` diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml new file mode 100644 index 0000000000..ff36081a9b --- /dev/null +++ b/docker/docker-compose.yaml @@ -0,0 +1,39 @@ +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: '3' + +services: + postgres: + image: postgres:9.6.5-alpine + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: synapse + expose: + - 5432 + restart: unless-stopped + volumes: + - postgres-data:/var/lib/postgresql/data/ + + synapse: + image: matrix-org/synapse:develop + ports: + - 8008:8008 + - 8448:8448 + restart: unless-stopped + volumes: + - ${CONFIG_PATH}:/synapse/config/ + +volumes: + postgres-data: diff --git a/docker/rootfs/etc/service/synapse/finish b/docker/rootfs/etc/service/synapse/finish new file mode 100755 index 0000000000..2aace581a1 --- /dev/null +++ b/docker/rootfs/etc/service/synapse/finish @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kill -TERM 1 diff --git a/docker/rootfs/etc/service/synapse/run b/docker/rootfs/etc/service/synapse/run new file mode 100755 index 0000000000..dd797d3ef9 --- /dev/null +++ b/docker/rootfs/etc/service/synapse/run @@ -0,0 +1,75 @@ +#!/bin/bash +# +# Copyright 2017 Vector Creations Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +: ${CONFIG_PATH:="/synapse/config"} +: ${POSTGRES_DATABASE:="synapse"} +: ${POSTGRES_HOST:="postgres"} +: ${POSTGRES_USER:="postgres"} +: ${REPORT_STATS:="yes"} +: ${SERVER_NAME:="localhost"} + +DATABASE_CONFIG_PATH="${CONFIG_PATH}/database.yaml" +HOMESERVER_CONFIG_PATH="${CONFIG_PATH}/homeserver.yaml" +SYNAPSE_COMMAND="python -m synapse.app.homeserver" + +. /synapse/bin/activate +cd /synapse + +if [[ -n "${GENERATE_CONFIG}" ]]; then + ${SYNAPSE_COMMAND} \ + --server-name ${SERVER_NAME} \ + --config-path ${HOMESERVER_CONFIG_PATH} \ + --generate-config \ + --report-stats=${REPORT_STATS} + + if [[ -f "${DATABASE_CONFIG_PATH}" ]]; then + echo "Config file '${DATABASE_CONFIG_PATH}' already exists. Remove it if you want it to be generated." + else + echo "Generating ${DATABASE_CONFIG_PATH}..." + if [[ -n "${POSTGRES_PASSWORD}" ]]; then + (cat > ${DATABASE_CONFIG_PATH}) < ${DATABASE_CONFIG_PATH}) < Date: Thu, 28 Sep 2017 12:18:06 +0100 Subject: [PATCH 0178/1637] Delete expired url cache data --- synapse/rest/media/v1/filepath.py | 43 ++++++++- synapse/rest/media/v1/preview_url_resource.py | 90 ++++++++++++++++++- synapse/storage/media_repository.py | 61 +++++++++++++ synapse/storage/prepare_database.py | 2 +- .../schema/delta/44/expire_url_cache.sql | 17 ++++ 5 files changed, 208 insertions(+), 5 deletions(-) create mode 100644 synapse/storage/schema/delta/44/expire_url_cache.sql diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index d92b7ff337..c5d43209f9 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -73,19 +73,58 @@ class MediaFilePaths(object): ) def url_cache_filepath(self, media_id): + # Media id is of the form + # E.g.: 2017-09-28-fsdRDt24DS234dsf return os.path.join( self.base_path, "url_cache", - media_id[0:2], media_id[2:4], media_id[4:] + media_id[:10], media_id[11:] ) + def url_cache_filepath_dirs_to_delete(self, media_id): + "The dirs to try and remove if we delete the media_id file" + return [ + os.path.join( + self.base_path, "url_cache", + media_id[:10], + ), + ] + def url_cache_thumbnail(self, media_id, width, height, content_type, method): + # Media id is of the form + # E.g.: 2017-09-28-fsdRDt24DS234dsf + top_level_type, sub_type = content_type.split("/") file_name = "%i-%i-%s-%s-%s" % ( width, height, top_level_type, sub_type, method ) + return os.path.join( self.base_path, "url_cache_thumbnails", - media_id[0:2], media_id[2:4], media_id[4:], + media_id[:10], media_id[11:], file_name ) + + def url_cache_thumbnail_directory(self, media_id): + # Media id is of the form + # E.g.: 2017-09-28-fsdRDt24DS234dsf + + return os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[:10], media_id[11:], + ) + + def url_cache_thumbnail_dirs_to_delete(self, media_id): + "The dirs to try and remove if we delete the media_id thumbnails" + # Media id is of the form + # E.g.: 2017-09-28-fsdRDt24DS234dsf + return [ + os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[:10], media_id[11:], + ), + os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[:10], + ), + ] diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index b81a336c5d..c5ba83ddfd 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -36,6 +36,9 @@ import cgi import ujson as json import urlparse import itertools +import datetime +import errno +import shutil import logging logger = logging.getLogger(__name__) @@ -70,6 +73,10 @@ class PreviewUrlResource(Resource): self.downloads = {} + self._cleaner_loop = self.clock.looping_call( + self._expire_url_cache_data, 30 * 10000 + ) + def render_GET(self, request): self._async_render_GET(request) return NOT_DONE_YET @@ -253,8 +260,7 @@ class PreviewUrlResource(Resource): # we're most likely being explicitly triggered by a human rather than a # bot, so are we really a robot? - # XXX: horrible duplication with base_resource's _download_remote_file() - file_id = random_string(24) + file_id = datetime.date.today().isoformat() + '_' + random_string(16) fname = self.filepaths.url_cache_filepath(file_id) self.media_repo._makedirs(fname) @@ -328,6 +334,86 @@ class PreviewUrlResource(Resource): "etag": headers["ETag"][0] if "ETag" in headers else None, }) + @defer.inlineCallbacks + def _expire_url_cache_data(self): + """Clean up expired url cache content, media and thumbnails. + """ + now = self.clock.time_msec() + + # First we delete expired url cache entries + media_ids = yield self.store.get_expired_url_cache(now) + + removed_media = [] + for media_id in media_ids: + fname = self.filepaths.url_cache_filepath(media_id) + try: + os.remove(fname) + except OSError as e: + # If the path doesn't exist, meh + if e.errno != errno.ENOENT: + logger.warn("Failed to remove media: %r: %s", media_id, e) + continue + + removed_media.append(media_id) + + try: + dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) + for dir in dirs: + os.rmdir(dir) + except: + pass + + yield self.store.delete_url_cache(removed_media) + + logger.info("Deleted %d entries from url cache", len(removed_media)) + + # Now we delete old images associated with the url cache. + # These may be cached for a bit on the client (i.e., they + # may have a room open with a preview url thing open). + # So we wait a couple of days before deleting, just in case. + expire_before = now - 2 * 24 * 60 * 60 * 1000 + yield self.store.get_url_cache_media_before(expire_before) + + removed_media = [] + for media_id in media_ids: + fname = self.filepaths.url_cache_filepath(media_id) + try: + os.remove(fname) + except OSError as e: + # If the path doesn't exist, meh + if e.errno != errno.ENOENT: + logger.warn("Failed to remove media: %r: %s", media_id, e) + continue + + try: + dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) + for dir in dirs: + os.rmdir(dir) + except: + pass + + thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id) + try: + shutil.rmtree(thumbnail_dir) + except OSError as e: + # If the path doesn't exist, meh + if e.errno != errno.ENOENT: + logger.warn("Failed to remove media: %r: %s", media_id, e) + continue + + removed_media.append(media_id) + + try: + dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id) + for dir in dirs: + os.rmdir(dir) + except: + pass + + yield self.store.delete_url_cache_media(removed_media) + + logger.info("Deleted %d media from url cache", len(removed_media)) + def decode_and_calc_og(body, media_uri, request_encoding=None): from lxml import etree diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py index 82bb61b811..5cca14ccb2 100644 --- a/synapse/storage/media_repository.py +++ b/synapse/storage/media_repository.py @@ -238,3 +238,64 @@ class MediaRepositoryStore(SQLBaseStore): }, ) return self.runInteraction("delete_remote_media", delete_remote_media_txn) + + def get_expired_url_cache(self, now_ts): + sql = ( + "SELECT media_id FROM local_media_repository_url_cache" + " WHERE download_ts + expires < ?" + " ORDER BY download_ts + expires ASC" + " LIMIT 100" + ) + + def _get_expired_url_cache_txn(txn): + txn.execute(sql, (now_ts,)) + return [row[0] for row in txn] + + return self.runInteraction("get_expired_url_cache", _get_expired_url_cache_txn) + + def delete_url_cache(self, media_ids): + sql = ( + "DELETE FROM local_media_repository_url_cache" + " WHERE media_id = ?" + ) + + def _delete_url_cache_txn(txn): + txn.executemany(sql, [(media_id) for media_id in media_ids]) + + return self.runInteraction("delete_url_cache", _delete_url_cache_txn) + + def get_url_cache_media_before(self, before_ts): + sql = ( + "SELECT media_id FROM local_media_repository" + " WHERE created_ts < ?" + " ORDER BY created_ts ASC" + " LIMIT 100" + ) + + def _get_url_cache_media_before_txn(txn): + txn.execute(sql, (before_ts,)) + return [row[0] for row in txn] + + return self.runInteraction( + "get_url_cache_media_before", _get_url_cache_media_before_txn, + ) + + def delete_url_cache_media(self, media_ids): + def _delete_url_cache_media_txn(txn): + sql = ( + "DELETE FROM local_media_repository" + " WHERE media_id = ?" + ) + + txn.executemany(sql, [(media_id) for media_id in media_ids]) + + sql = ( + "DELETE FROM local_media_repository_thumbnails" + " WHERE media_id = ?" + ) + + txn.executemany(sql, [(media_id) for media_id in media_ids]) + + return self.runInteraction( + "delete_url_cache_media", _delete_url_cache_media_txn, + ) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 72b670b83b..a0af8456f5 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 43 +SCHEMA_VERSION = 44 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/schema/delta/44/expire_url_cache.sql b/synapse/storage/schema/delta/44/expire_url_cache.sql new file mode 100644 index 0000000000..96202bd2a6 --- /dev/null +++ b/synapse/storage/schema/delta/44/expire_url_cache.sql @@ -0,0 +1,17 @@ +/* Copyright 2017 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE INDEX local_media_repository_url_idx ON local_media_repository(created_ts) WHERE url_cache IS NOT NULL; +CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(download_ts + expires); From 77f1d24de3c696f52bc1ba6d0f61e82f03a9de7a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 12:23:15 +0100 Subject: [PATCH 0179/1637] More brackets --- synapse/storage/schema/delta/44/expire_url_cache.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/44/expire_url_cache.sql b/synapse/storage/schema/delta/44/expire_url_cache.sql index 96202bd2a6..997e790b6d 100644 --- a/synapse/storage/schema/delta/44/expire_url_cache.sql +++ b/synapse/storage/schema/delta/44/expire_url_cache.sql @@ -14,4 +14,4 @@ */ CREATE INDEX local_media_repository_url_idx ON local_media_repository(created_ts) WHERE url_cache IS NOT NULL; -CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(download_ts + expires); +CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache((download_ts + expires)); From ae79764fe55ab15156b4f28658326bd2c9c0b937 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 12:37:53 +0100 Subject: [PATCH 0180/1637] Change expires column to expires_ts --- synapse/rest/media/v1/preview_url_resource.py | 4 ++-- synapse/storage/media_repository.py | 14 ++++++------- .../schema/delta/44/expire_url_cache.sql | 21 ++++++++++++++++++- 3 files changed, 29 insertions(+), 10 deletions(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index c5ba83ddfd..6f896ffb53 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -137,7 +137,7 @@ class PreviewUrlResource(Resource): cache_result = yield self.store.get_url_cache(url, ts) if ( cache_result and - cache_result["download_ts"] + cache_result["expires"] > ts and + cache_result["expires_ts"] > ts and cache_result["response_code"] / 100 == 2 ): respond_with_json_bytes( @@ -246,7 +246,7 @@ class PreviewUrlResource(Resource): url, media_info["response_code"], media_info["etag"], - media_info["expires"], + media_info["expires"] + media_info["created_ts"], json.dumps(og), media_info["filesystem_id"], media_info["created_ts"], diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py index 5cca14ccb2..b8a0dd0762 100644 --- a/synapse/storage/media_repository.py +++ b/synapse/storage/media_repository.py @@ -62,7 +62,7 @@ class MediaRepositoryStore(SQLBaseStore): def get_url_cache_txn(txn): # get the most recently cached result (relative to the given ts) sql = ( - "SELECT response_code, etag, expires, og, media_id, download_ts" + "SELECT response_code, etag, expires_ts, og, media_id, download_ts" " FROM local_media_repository_url_cache" " WHERE url = ? AND download_ts <= ?" " ORDER BY download_ts DESC LIMIT 1" @@ -74,7 +74,7 @@ class MediaRepositoryStore(SQLBaseStore): # ...or if we've requested a timestamp older than the oldest # copy in the cache, return the oldest copy (if any) sql = ( - "SELECT response_code, etag, expires, og, media_id, download_ts" + "SELECT response_code, etag, expires_ts, og, media_id, download_ts" " FROM local_media_repository_url_cache" " WHERE url = ? AND download_ts > ?" " ORDER BY download_ts ASC LIMIT 1" @@ -86,14 +86,14 @@ class MediaRepositoryStore(SQLBaseStore): return None return dict(zip(( - 'response_code', 'etag', 'expires', 'og', 'media_id', 'download_ts' + 'response_code', 'etag', 'expires_ts', 'og', 'media_id', 'download_ts' ), row)) return self.runInteraction( "get_url_cache", get_url_cache_txn ) - def store_url_cache(self, url, response_code, etag, expires, og, media_id, + def store_url_cache(self, url, response_code, etag, expires_ts, og, media_id, download_ts): return self._simple_insert( "local_media_repository_url_cache", @@ -101,7 +101,7 @@ class MediaRepositoryStore(SQLBaseStore): "url": url, "response_code": response_code, "etag": etag, - "expires": expires, + "expires_ts": expires_ts, "og": og, "media_id": media_id, "download_ts": download_ts, @@ -242,8 +242,8 @@ class MediaRepositoryStore(SQLBaseStore): def get_expired_url_cache(self, now_ts): sql = ( "SELECT media_id FROM local_media_repository_url_cache" - " WHERE download_ts + expires < ?" - " ORDER BY download_ts + expires ASC" + " WHERE expires_ts < ?" + " ORDER BY expires_ts ASC" " LIMIT 100" ) diff --git a/synapse/storage/schema/delta/44/expire_url_cache.sql b/synapse/storage/schema/delta/44/expire_url_cache.sql index 997e790b6d..9475d53e84 100644 --- a/synapse/storage/schema/delta/44/expire_url_cache.sql +++ b/synapse/storage/schema/delta/44/expire_url_cache.sql @@ -14,4 +14,23 @@ */ CREATE INDEX local_media_repository_url_idx ON local_media_repository(created_ts) WHERE url_cache IS NOT NULL; -CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache((download_ts + expires)); + +-- we need to change `expires` to `expires_ts` so that we can index on it. SQLite doesn't support +-- indices on expressions until 3.9. +CREATE TABLE local_media_repository_url_cache_new( + url TEXT, + response_code INTEGER, + etag TEXT, + expires_ts BIGINT, + og TEXT, + media_id TEXT, + download_ts BIGINT +); + +INSERT INTO local_media_repository_url_cache_new + SELECT url, response_code, etag, expires + download_ts, og, media_id, download_ts FROM local_media_repository_url_cache; + +DROP TABLE local_media_repository_url_cache; +ALTER TABLE local_media_repository_url_cache_new RENAME TO local_media_repository_url_cache; + +CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(expires_ts); From 7a44c01d894d85a0eb829b4a82d1aeaff9a39ec9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 12:46:04 +0100 Subject: [PATCH 0181/1637] Fix typo --- synapse/storage/media_repository.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py index b8a0dd0762..5e39daa210 100644 --- a/synapse/storage/media_repository.py +++ b/synapse/storage/media_repository.py @@ -260,7 +260,7 @@ class MediaRepositoryStore(SQLBaseStore): ) def _delete_url_cache_txn(txn): - txn.executemany(sql, [(media_id) for media_id in media_ids]) + txn.executemany(sql, [(media_id,) for media_id in media_ids]) return self.runInteraction("delete_url_cache", _delete_url_cache_txn) @@ -287,14 +287,14 @@ class MediaRepositoryStore(SQLBaseStore): " WHERE media_id = ?" ) - txn.executemany(sql, [(media_id) for media_id in media_ids]) + txn.executemany(sql, [(media_id,) for media_id in media_ids]) sql = ( "DELETE FROM local_media_repository_thumbnails" " WHERE media_id = ?" ) - txn.executemany(sql, [(media_id) for media_id in media_ids]) + txn.executemany(sql, [(media_id,) for media_id in media_ids]) return self.runInteraction( "delete_url_cache_media", _delete_url_cache_media_txn, From ace807908602cb955fc7a2cae63dc6e64bf90cc5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 12:52:51 +0100 Subject: [PATCH 0182/1637] Support new and old style media id formats --- synapse/rest/media/v1/filepath.py | 112 +++++++++++++++++++++--------- 1 file changed, 81 insertions(+), 31 deletions(-) diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index c5d43209f9..d5cec10127 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -14,6 +14,9 @@ # limitations under the License. import os +import re + +NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d") class MediaFilePaths(object): @@ -73,21 +76,39 @@ class MediaFilePaths(object): ) def url_cache_filepath(self, media_id): - # Media id is of the form - # E.g.: 2017-09-28-fsdRDt24DS234dsf - return os.path.join( - self.base_path, "url_cache", - media_id[:10], media_id[11:] - ) + if NEW_FORMAT_ID_RE.match(media_id): + # Media id is of the form + # E.g.: 2017-09-28-fsdRDt24DS234dsf + return os.path.join( + self.base_path, "url_cache", + media_id[:10], media_id[11:] + ) + else: + return os.path.join( + self.base_path, "url_cache", + media_id[0:2], media_id[2:4], media_id[4:], + ) def url_cache_filepath_dirs_to_delete(self, media_id): "The dirs to try and remove if we delete the media_id file" - return [ - os.path.join( - self.base_path, "url_cache", - media_id[:10], - ), - ] + if NEW_FORMAT_ID_RE.match(media_id): + return [ + os.path.join( + self.base_path, "url_cache", + media_id[:10], + ), + ] + else: + return [ + os.path.join( + self.base_path, "url_cache", + media_id[0:2], media_id[2:4], + ), + os.path.join( + self.base_path, "url_cache", + media_id[0:2], + ), + ] def url_cache_thumbnail(self, media_id, width, height, content_type, method): @@ -99,32 +120,61 @@ class MediaFilePaths(object): width, height, top_level_type, sub_type, method ) - return os.path.join( - self.base_path, "url_cache_thumbnails", - media_id[:10], media_id[11:], - file_name - ) + if NEW_FORMAT_ID_RE.match(media_id): + return os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[:10], media_id[11:], + file_name + ) + else: + return os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[0:2], media_id[2:4], media_id[4:], + file_name + ) def url_cache_thumbnail_directory(self, media_id): # Media id is of the form # E.g.: 2017-09-28-fsdRDt24DS234dsf - return os.path.join( - self.base_path, "url_cache_thumbnails", - media_id[:10], media_id[11:], - ) + if NEW_FORMAT_ID_RE.match(media_id): + return os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[:10], media_id[11:], + ) + else: + return os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[0:2], media_id[2:4], media_id[4:], + ) def url_cache_thumbnail_dirs_to_delete(self, media_id): "The dirs to try and remove if we delete the media_id thumbnails" # Media id is of the form # E.g.: 2017-09-28-fsdRDt24DS234dsf - return [ - os.path.join( - self.base_path, "url_cache_thumbnails", - media_id[:10], media_id[11:], - ), - os.path.join( - self.base_path, "url_cache_thumbnails", - media_id[:10], - ), - ] + if NEW_FORMAT_ID_RE.match(media_id): + return [ + os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[:10], media_id[11:], + ), + os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[:10], + ), + ] + else: + return [ + os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[0:2], media_id[2:4], media_id[4:], + ), + os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[0:2], media_id[2:4], + ), + os.path.join( + self.base_path, "url_cache_thumbnails", + media_id[0:2], + ), + ] From 5f501ec7e2645abe232bd6bab407ac863e3250c2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 12:59:01 +0100 Subject: [PATCH 0183/1637] Fix typo in url cache expiry timer --- synapse/rest/media/v1/preview_url_resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 6f896ffb53..1616809e8f 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -74,7 +74,7 @@ class PreviewUrlResource(Resource): self.downloads = {} self._cleaner_loop = self.clock.looping_call( - self._expire_url_cache_data, 30 * 10000 + self._expire_url_cache_data, 30 * 1000 ) def render_GET(self, request): From 93247a424a5068b088567fa98b6990e47608b7cb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 13:48:14 +0100 Subject: [PATCH 0184/1637] Only pull out local media that were for url cache --- synapse/storage/media_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py index 5e39daa210..1f2eab98e3 100644 --- a/synapse/storage/media_repository.py +++ b/synapse/storage/media_repository.py @@ -267,7 +267,7 @@ class MediaRepositoryStore(SQLBaseStore): def get_url_cache_media_before(self, before_ts): sql = ( "SELECT media_id FROM local_media_repository" - " WHERE created_ts < ?" + " WHERE created_ts < ? AND url_cache IS NOT NULL" " ORDER BY created_ts ASC" " LIMIT 100" ) From e1e7d76cf16858d998884f19b141f90a0415d297 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 13:55:29 +0100 Subject: [PATCH 0185/1637] Actually assign result to variable --- synapse/rest/media/v1/preview_url_resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 1616809e8f..0123369a7f 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -372,7 +372,7 @@ class PreviewUrlResource(Resource): # may have a room open with a preview url thing open). # So we wait a couple of days before deleting, just in case. expire_before = now - 2 * 24 * 60 * 60 * 1000 - yield self.store.get_url_cache_media_before(expire_before) + media_ids = yield self.store.get_url_cache_media_before(expire_before) removed_media = [] for media_id in media_ids: From 7cc483aa0ef9e51bd3839768e44b449cf6d24136 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 13:56:53 +0100 Subject: [PATCH 0186/1637] Clear up expired url cache every 10s --- synapse/rest/media/v1/preview_url_resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 0123369a7f..2300c263e0 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -74,7 +74,7 @@ class PreviewUrlResource(Resource): self.downloads = {} self._cleaner_loop = self.clock.looping_call( - self._expire_url_cache_data, 30 * 1000 + self._expire_url_cache_data, 10 * 1000 ) def render_GET(self, request): From 4dc07e93a85f0f6e09a6763a7833ef935be1c417 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 14:10:33 +0100 Subject: [PATCH 0187/1637] Add old indices --- synapse/storage/schema/delta/44/expire_url_cache.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/storage/schema/delta/44/expire_url_cache.sql b/synapse/storage/schema/delta/44/expire_url_cache.sql index 9475d53e84..e2b775f038 100644 --- a/synapse/storage/schema/delta/44/expire_url_cache.sql +++ b/synapse/storage/schema/delta/44/expire_url_cache.sql @@ -34,3 +34,5 @@ DROP TABLE local_media_repository_url_cache; ALTER TABLE local_media_repository_url_cache_new RENAME TO local_media_repository_url_cache; CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(expires_ts); +CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache(url, download_ts); +CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache(media_id); From 768f00dedbee83dd6bfb7c37bfadc511f7aeb10e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 14:27:27 +0100 Subject: [PATCH 0188/1637] Up the limits on number of url cache entries to delete at one time --- synapse/storage/media_repository.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py index 1f2eab98e3..7110a71279 100644 --- a/synapse/storage/media_repository.py +++ b/synapse/storage/media_repository.py @@ -244,7 +244,7 @@ class MediaRepositoryStore(SQLBaseStore): "SELECT media_id FROM local_media_repository_url_cache" " WHERE expires_ts < ?" " ORDER BY expires_ts ASC" - " LIMIT 100" + " LIMIT 500" ) def _get_expired_url_cache_txn(txn): @@ -269,7 +269,7 @@ class MediaRepositoryStore(SQLBaseStore): "SELECT media_id FROM local_media_repository" " WHERE created_ts < ? AND url_cache IS NOT NULL" " ORDER BY created_ts ASC" - " LIMIT 100" + " LIMIT 500" ) def _get_url_cache_media_before_txn(txn): From 75e67b9ee4526bc8e5ffd9251ad0370604db13cb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 28 Sep 2017 15:24:00 +0100 Subject: [PATCH 0189/1637] Handle SERVFAILs when doing AAAA lookups for federation (#2477) ... to cope with people with broken dnssec setups, mostly --- synapse/http/endpoint.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index 241b17f2cb..a97532162f 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -354,16 +354,28 @@ def _get_hosts_for_srv_record(dns_client, host): return res[0] - def eb(res): - res.trap(DNSNameError) - return [] + def eb(res, record_type): + if res.check(DNSNameError): + return [] + logger.warn("Error looking up %s for %s: %s", + record_type, host, res, res.value) + return res # no logcontexts here, so we can safely fire these off and gatherResults d1 = dns_client.lookupAddress(host).addCallbacks(cb, eb) d2 = dns_client.lookupIPV6Address(host).addCallbacks(cb, eb) - results = yield defer.gatherResults([d1, d2], consumeErrors=True) + results = yield defer.DeferredList( + [d1, d2], consumeErrors=True) + + # if all of the lookups failed, raise an exception rather than blowing out + # the cache with an empty result. + if results and all(s == defer.FAILURE for (s, _) in results): + defer.returnValue(results[0][1]) + + for (success, result) in results: + if success == defer.FAILURE: + continue - for result in results: for answer in result: if not answer.payload: continue From e43de3ae4b33fb2fad7a4db042f413ecd7448545 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 28 Sep 2017 13:44:47 +0100 Subject: [PATCH 0190/1637] Improve logging of failures in matrixfederationclient * don't log exception types twice * not all exceptions have a meaningful 'message'. Use the repr rather than attempting to build a string ourselves. --- synapse/http/matrixfederationclient.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 747a791f83..6fc3a41c29 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -204,18 +204,15 @@ class MatrixFederationHttpClient(object): raise logger.warn( - "{%s} Sending request failed to %s: %s %s: %s - %s", + "{%s} Sending request failed to %s: %s %s: %s", txn_id, destination, method, url_bytes, - type(e).__name__, _flatten_response_never_received(e), ) - log_result = "%s - %s" % ( - type(e).__name__, _flatten_response_never_received(e), - ) + log_result = _flatten_response_never_received(e) if retries_left and not timeout: if long_retries: @@ -578,12 +575,14 @@ class _JsonProducer(object): def _flatten_response_never_received(e): if hasattr(e, "reasons"): - return ", ".join( + reasons = ", ".join( _flatten_response_never_received(f.value) for f in e.reasons ) + + return "%s:[%s]" % (type(e).__name__, reasons) else: - return "%s: %s" % (type(e).__name__, e.message,) + return repr(e) def check_content_type_is_json(headers): From d5694ac5fa3266a777fa171f33bebc0d7477c12a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Sep 2017 16:08:08 +0100 Subject: [PATCH 0191/1637] Only log if we've removed media --- synapse/rest/media/v1/preview_url_resource.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 2300c263e0..895b480d5c 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -365,7 +365,8 @@ class PreviewUrlResource(Resource): yield self.store.delete_url_cache(removed_media) - logger.info("Deleted %d entries from url cache", len(removed_media)) + if removed_media: + logger.info("Deleted %d entries from url cache", len(removed_media)) # Now we delete old images associated with the url cache. # These may be cached for a bit on the client (i.e., they @@ -412,7 +413,8 @@ class PreviewUrlResource(Resource): yield self.store.delete_url_cache_media(removed_media) - logger.info("Deleted %d media from url cache", len(removed_media)) + if removed_media: + logger.info("Deleted %d media from url cache", len(removed_media)) def decode_and_calc_og(body, media_uri, request_encoding=None): From 24d162814bc8c9ba05bfecac04e7218baebf2859 Mon Sep 17 00:00:00 2001 From: Robert Swain Date: Fri, 29 Sep 2017 11:40:15 +0200 Subject: [PATCH 0192/1637] docker: s/matrix-org/matrixdotorg/g --- docker/README.md | 8 ++++---- docker/docker-compose.yaml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/README.md b/docker/README.md index c9e6fd216d..c15517d0e0 100644 --- a/docker/README.md +++ b/docker/README.md @@ -5,10 +5,10 @@ Build the docker image with the `docker build` command from the root of the synapse repository. ``` -docker build -t matrix-org/synapse:v0.22.1 . +docker build -t matrixdotorg/synapse:v0.22.1 . ``` -The `-t` option sets the image tag. Official images are tagged `matrix-org/synapse:` where `` is the same as the release tag in the synapse git repository. +The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:` where `` is the same as the release tag in the synapse git repository. ## Configure @@ -35,7 +35,7 @@ docker run \ -e REPORT_STATS=yes \ -e SERVER_NAME=example.com \ -v ${CONFIG_PATH}:/synapse/config/ \ - matrix-org/synapse:develop + matrixdotorg/synapse:v0.22.1 ``` This will create a temporary container from the image and use the synapse code for generating configuration files and TLS keys and certificates for the specified `SERVER_NAME` domain. The files are written to `CONFIG_PATH`. @@ -66,5 +66,5 @@ docker run \ -d \ --name synapse \ -v ${CONFIG_PATH}:/synapse/config/ \ - matrix-org/synapse:v0.22.1 + matrixdotorg/synapse:v0.22.1 ``` diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index ff36081a9b..73cc29f8fd 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -27,7 +27,7 @@ services: - postgres-data:/var/lib/postgresql/data/ synapse: - image: matrix-org/synapse:develop + image: matrixdotorg/synapse:v0.22.1 ports: - 8008:8008 - 8448:8448 From cafb8de132999507e9b05c751fbb32d199e7de50 Mon Sep 17 00:00:00 2001 From: Jeremy Cline Date: Sat, 30 Sep 2017 11:22:37 -0400 Subject: [PATCH 0193/1637] Unfreeze event before serializing with ujson In newer versions of https://github.com/esnme/ultrajson, ujson does not serialize frozendicts (introduced in esnme/ultrajson@53f85b1). Although the PyPI version is still 1.35, Fedora ships with a build from commit esnme/ultrajson@2f1d487. This causes the serialization to fail if the distribution-provided package is used. This runs the event through the unfreeze utility before serializing it. Thanks to @ignatenkobrain for tracking down the root cause. fixes #2351 Signed-off-by: Jeremy Cline --- synapse/handlers/message.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index be4f123c54..fe9d8848bc 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -26,6 +26,7 @@ from synapse.types import ( from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter from synapse.util.logcontext import preserve_fn from synapse.util.metrics import measure_func +from synapse.util.frozenutils import unfreeze from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -503,7 +504,7 @@ class MessageHandler(BaseHandler): # Ensure that we can round trip before trying to persist in db try: - dump = ujson.dumps(event.content) + dump = ujson.dumps(unfreeze(event.content)) ujson.loads(dump) except: logger.exception("Failed to encode content: %r", event.content) From 7fc1aad195b01c4ffe990fc705ff61d128dc0190 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 2 Oct 2017 00:53:32 +0100 Subject: [PATCH 0194/1637] Drop search values with nul characters https://github.com/matrix-org/synapse/issues/2187 contains a report of a port failing due to nul characters somewhere in the search table. Let's try dropping the offending rows. --- scripts/synapse_port_db | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index bc167b59af..dc7fe940e8 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -376,10 +376,13 @@ class Porter(object): " VALUES (?,?,?,?,to_tsvector('english', ?),?,?)" ) - rows_dict = [ - dict(zip(headers, row)) - for row in rows - ] + rows_dict = [] + for row in rows: + d = dict(zip(headers, row)) + if "\0" in d['value']: + logger.warn('dropping search row %s', d) + else: + rows_dict.append(d) txn.executemany(sql, [ ( From e4a709eda3a21de41a2e6921674bb65b89f212a2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Oct 2017 13:51:38 +0100 Subject: [PATCH 0195/1637] Bump version and change log --- CHANGES.rst | 6 ++++++ synapse/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 6291fedb9a..4be6604ddd 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,9 @@ +Changes in synapse v0.23.0 (2017-10-02) +======================================= + +No changes since v0.23.0-rc2 + + Changes in synapse v0.23.0-rc2 (2017-09-26) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index ec83e6adb7..97d6c4094d 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.23.0-rc2" +__version__ = "0.23.0" From 3fed5bb25f92586a6467494d4673627abcd25665 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Oct 2017 17:59:34 +0100 Subject: [PATCH 0196/1637] Move quit_with_error --- synapse/app/_base.py | 10 ++++++++++ synapse/app/homeserver.py | 11 +---------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index cd0e815919..e1ff8f9b7c 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -97,3 +97,13 @@ def start_reactor( daemon.start() else: run() + + +def quit_with_error(error_string): + message_lines = error_string.split("\n") + line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2 + sys.stderr.write("*" * line_length + '\n') + for line in message_lines: + sys.stderr.write(" %s\n" % (line.rstrip(),)) + sys.stderr.write("*" * line_length + '\n') + sys.exit(1) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 84ad8f04a0..3adf72e141 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -25,6 +25,7 @@ from synapse.api.urls import CONTENT_REPO_PREFIX, FEDERATION_PREFIX, \ LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, \ STATIC_PREFIX, WEB_CLIENT_PREFIX from synapse.app import _base +from synapse.app._base import quit_with_error from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory @@ -249,16 +250,6 @@ class SynapseHomeServer(HomeServer): return db_conn -def quit_with_error(error_string): - message_lines = error_string.split("\n") - line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2 - sys.stderr.write("*" * line_length + '\n') - for line in message_lines: - sys.stderr.write(" %s\n" % (line.rstrip(),)) - sys.stderr.write("*" * line_length + '\n') - sys.exit(1) - - def setup(config_options): """ Args: From ea87cb1ba5f0a2614043be6f4499cfe842b9b8eb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Oct 2017 18:03:59 +0100 Subject: [PATCH 0197/1637] Make 'affinity' package optional --- synapse/app/_base.py | 15 ++++++++++++++- synapse/python_dependencies.py | 4 +++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index e1ff8f9b7c..cf4730730d 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -12,10 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import gc import logging +import sys + +try: + import affinity +except: + affinity = None -import affinity from daemonize import Daemonize from synapse.util import PreserveLoggingContext from synapse.util.rlimit import change_resource_limit @@ -78,6 +84,13 @@ def start_reactor( with PreserveLoggingContext(): logger.info("Running") if cpu_affinity is not None: + if not affinity: + quit_with_error( + "Missing package 'affinity' required for cpu_affinity\n" + "option\n\n" + "Install by running:\n\n" + " pip install affinity\n\n" + ) logger.info("Setting CPU affinity to %s" % cpu_affinity) affinity.set_process_affinity_mask(0, cpu_affinity) change_resource_limit(soft_file_limit) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 630e92c90e..7052333c19 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -40,7 +40,6 @@ REQUIREMENTS = { "pymacaroons-pynacl": ["pymacaroons"], "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], - "affinity": ["affinity"], } CONDITIONAL_REQUIREMENTS = { "web_client": { @@ -59,6 +58,9 @@ CONDITIONAL_REQUIREMENTS = { "psutil": { "psutil>=2.0.0": ["psutil>=2.0.0"], }, + "affinity": { + "affinity": ["affinity"], + }, } From 6c1bb1601e43c89637ae5bd8720c255646ca8141 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Oct 2017 18:05:17 +0100 Subject: [PATCH 0198/1637] Bump version and changelog --- CHANGES.rst | 8 ++++++++ synapse/__init__.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 4be6604ddd..f1529e79bd 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,11 @@ +Changes in synapse v0.23.1 (2017-10-02) +======================================= + +Changes: + +* Make 'affinity' package optional, as it is not supported on some platforms + + Changes in synapse v0.23.0 (2017-10-02) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index 97d6c4094d..bee4aba625 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.23.0" +__version__ = "0.23.1" From 30848c0fcd34aaf0b2db7b65c91648ae49c480a2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Oct 2017 11:09:51 +0100 Subject: [PATCH 0199/1637] Ignore incoming events for rooms that we have left When synapse receives an event for a room its not in over federation, it double checks with the remote server to see if it is in fact in the room. This is done so that if the server has forgotten about the room (usually as a result of the database being dropped) it can recover from it. However, in the presence of state resets in large rooms, this can cause a lot of work for servers that have legitimately left. As a hacky solution that supports both cases we drop incoming events for rooms that we have explicitly left. This means that we no longer support the case of servers having forgotten that they've rejoined a room, but that is sufficiently rare that we're not going to support it for now. --- synapse/handlers/federation.py | 23 +++++++++++++++++++++++ synapse/storage/roommember.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 18f87cad67..b160ff1684 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -125,6 +125,29 @@ class FederationHandler(BaseHandler): self.room_queues[pdu.room_id].append((pdu, origin)) return + # If we're no longer in the room just ditch the event entirely. This + # is probably an old server that has come back and thinks we're still + # in the room. + # + # If we were never in the room then maybe our database got vaped and + # we should check if we *are* in fact in the room. If we are then we + # can magically rejoin the room. + is_in_room = yield self.auth.check_host_in_room( + pdu.room_id, + self.server_name + ) + if not is_in_room: + was_in_room = yield self.store.was_host_joined( + pdu.room_id, self.server_name, + + ) + if was_in_room: + logger.info( + "Ignoring PDU %s for room %s from %s as we've left the room!", + pdu.event_id, pdu.room_id, origin, + ) + return + state = None auth_chain = [] diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 457ca288d0..cb0791e591 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -533,6 +533,38 @@ class RoomMemberStore(SQLBaseStore): defer.returnValue(True) + @cachedInlineCallbacks() + def was_host_joined(self, room_id, host): + """Check whether the server is or ever was in the room. + """ + if '%' in host or '_' in host: + raise Exception("Invalid host name") + + sql = """ + SELECT user_id FROM room_memberships + WHERE room_id = ? + AND user_id LIKE ? + AND membership = 'join' + LIMIT 1 + """ + + # We do need to be careful to ensure that host doesn't have any wild cards + # in it, but we checked above for known ones and we'll check below that + # the returned user actually has the correct domain. + like_clause = "%:" + host + + rows = yield self._execute("was_host_joined", None, sql, room_id, like_clause) + + if not rows: + defer.returnValue(False) + + user_id = rows[0][0] + if get_domain_from_id(user_id) != host: + # This can only happen if the host name has something funky in it + raise Exception("Invalid host name") + + defer.returnValue(True) + def get_joined_hosts(self, room_id, state_entry): state_group = state_entry.state_group if not state_group: From f2da6df568178aa542209f2b9413f4fcff5484fd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Oct 2017 11:31:06 +0100 Subject: [PATCH 0200/1637] Remove spurious line feed --- synapse/handlers/federation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b160ff1684..7456b23005 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -139,7 +139,6 @@ class FederationHandler(BaseHandler): if not is_in_room: was_in_room = yield self.store.was_host_joined( pdu.room_id, self.server_name, - ) if was_in_room: logger.info( From 84716d267c6d93cfe759e8da336efb3136dc1560 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 3 Oct 2017 13:53:09 +0100 Subject: [PATCH 0201/1637] Allow spam checker to reject invites too --- synapse/handlers/federation.py | 4 ++++ synapse/handlers/room_member.py | 20 ++++++++++++++------ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 18f87cad67..32078fde3c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -77,6 +77,7 @@ class FederationHandler(BaseHandler): self.action_generator = hs.get_action_generator() self.is_mine_id = hs.is_mine_id self.pusher_pool = hs.get_pusherpool() + self.spam_checker = hs.get_spam_checker() self.replication_layer.set_handler(self) @@ -1077,6 +1078,9 @@ class FederationHandler(BaseHandler): if self.hs.config.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") + if not self.spam_checker.user_may_invite(requester.user): + raise SynapseError(403, "This user is not permitted to send invites to this server") + membership = event.content.get("membership") if event.type != EventTypes.Member or membership != Membership.INVITE: raise SynapseError(400, "The event was not an m.room.member invite event") diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 9a498c2d3e..61b0140e69 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -48,6 +48,7 @@ class RoomMemberHandler(BaseHandler): self.member_linearizer = Linearizer(name="member") self.clock = hs.get_clock() + self.spam_checker = hs.get_spam_checker() self.distributor = hs.get_distributor() self.distributor.declare("user_joined_room") @@ -210,12 +211,19 @@ class RoomMemberHandler(BaseHandler): if is_blocked: raise SynapseError(403, "This room has been blocked on this server") - if (effective_membership_state == "invite" and - self.hs.config.block_non_admin_invites): - is_requester_admin = yield self.auth.is_server_admin( - requester.user, - ) - if not is_requester_admin: + if effective_membership_state == "invite": + block_invite = False + if self.hs.config.block_non_admin_invites: + is_requester_admin = yield self.auth.is_server_admin( + requester.user, + ) + if not is_requester_admin: + block_invite = True + + if not self.spam_checker.user_may_invite(requester.user): + block_invite = True + + if block_invite: raise SynapseError( 403, "Invites have been disabled on this server", ) From 2a7ed700d51f0a81f563298c78cd4566994ddbab Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 3 Oct 2017 14:04:10 +0100 Subject: [PATCH 0202/1637] Fix param name & lint --- synapse/handlers/federation.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 32078fde3c..8571350cc8 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1078,8 +1078,10 @@ class FederationHandler(BaseHandler): if self.hs.config.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") - if not self.spam_checker.user_may_invite(requester.user): - raise SynapseError(403, "This user is not permitted to send invites to this server") + if not self.spam_checker.user_may_invite(event.sender): + raise SynapseError( + 403, "This user is not permitted to send invites to this server" + ) membership = event.content.get("membership") if event.type != EventTypes.Member or membership != Membership.INVITE: From e4ab96021e84ad9cccb2c3e0dea6347cce4e6149 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Oct 2017 14:10:21 +0100 Subject: [PATCH 0203/1637] Update comments --- synapse/handlers/federation.py | 2 +- synapse/storage/roommember.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7456b23005..77dd0ae1e2 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -127,7 +127,7 @@ class FederationHandler(BaseHandler): # If we're no longer in the room just ditch the event entirely. This # is probably an old server that has come back and thinks we're still - # in the room. + # in the room (or we've been rejoined to the room by a state reset). # # If we were never in the room then maybe our database got vaped and # we should check if we *are* in fact in the room. If we are then we diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index cb0791e591..63f6115ba9 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -536,6 +536,13 @@ class RoomMemberStore(SQLBaseStore): @cachedInlineCallbacks() def was_host_joined(self, room_id, host): """Check whether the server is or ever was in the room. + + Args: + room_id (str) + host (str) + + Returns: + bool: whether the host is/was in the room or not """ if '%' in host or '_' in host: raise Exception("Invalid host name") From 11d62f43c9a28de7efd00a534cfbf05f254bfc3e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Oct 2017 14:12:28 +0100 Subject: [PATCH 0204/1637] Invalidate cache --- synapse/storage/events.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 7002b3752e..4f0b43c36d 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -784,6 +784,9 @@ class EventsStore(SQLBaseStore): self._invalidate_cache_and_stream( txn, self.is_host_joined, (room_id, host) ) + self._invalidate_cache_and_stream( + txn, self.was_host_joined, (room_id, host) + ) self._invalidate_cache_and_stream( txn, self.get_users_in_room, (room_id,) From 41fd9989a28cfd6cc0b401677be61270f3959cfa Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 3 Oct 2017 14:17:44 +0100 Subject: [PATCH 0205/1637] Skip spam check for admin users --- synapse/handlers/room_member.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 61b0140e69..e88ba0e3a6 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -213,16 +213,16 @@ class RoomMemberHandler(BaseHandler): if effective_membership_state == "invite": block_invite = False - if self.hs.config.block_non_admin_invites: - is_requester_admin = yield self.auth.is_server_admin( - requester.user, - ) - if not is_requester_admin: + is_requester_admin = yield self.auth.is_server_admin( + requester.user, + ) + if not is_requester_admin: + if ( + self.hs.config.block_non_admin_invites or + not self.spam_checker.user_may_invite(requester.user) + ): block_invite = True - if not self.spam_checker.user_may_invite(requester.user): - block_invite = True - if block_invite: raise SynapseError( 403, "Invites have been disabled on this server", From 537088e7dceff8af4b283e11e46d7df7e2f38065 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 3 Oct 2017 14:28:12 +0100 Subject: [PATCH 0206/1637] Actually write warpper function --- synapse/events/spamcheck.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index e739f105b2..605261f4b5 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -45,3 +45,19 @@ class SpamChecker(object): return False return self.spam_checker.check_event_for_spam(event) + + def user_may_invite(self, userid): + """Checks if a given user may send an invite + + If this method returns false, the invite will be rejected. + + Args: + userid (string): The sender's user ID + + Returns: + bool: True if the user may send an invite, otherwise False + """ + if self.spam_checker is None: + return True + + return self.spam_checker.user_may_invite(userid) From bd769a81e12ea710accdebbaa296db1c1a625f75 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 3 Oct 2017 15:16:40 +0100 Subject: [PATCH 0207/1637] better logging --- synapse/handlers/room_member.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index e88ba0e3a6..76e46d93fe 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -217,10 +217,15 @@ class RoomMemberHandler(BaseHandler): requester.user, ) if not is_requester_admin: - if ( - self.hs.config.block_non_admin_invites or - not self.spam_checker.user_may_invite(requester.user) - ): + if self.hs.config.block_non_admin_invites: + logger.debug( + "Blocking invite: user is not admin and non-admin " + "invites disabled" + ) + block_invite = True + + if not self.spam_checker.user_may_invite(requester.user): + logger.debug("Blocking invite due to spam checker") block_invite = True if block_invite: From c46a0d7eb4704c6532a611040a591633dac02b1a Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 3 Oct 2017 15:20:14 +0100 Subject: [PATCH 0208/1637] this shouldn't be debug --- synapse/handlers/room_member.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 76e46d93fe..77e5b95e8a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -218,14 +218,14 @@ class RoomMemberHandler(BaseHandler): ) if not is_requester_admin: if self.hs.config.block_non_admin_invites: - logger.debug( + logger.info( "Blocking invite: user is not admin and non-admin " "invites disabled" ) block_invite = True if not self.spam_checker.user_may_invite(requester.user): - logger.debug("Blocking invite due to spam checker") + logger.info("Blocking invite due to spam checker") block_invite = True if block_invite: From c2c188b699e555376912dfea49c42b02c4168270 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 3 Oct 2017 15:46:19 +0100 Subject: [PATCH 0209/1637] Federation was passing strings anyway so pass string everywhere --- synapse/handlers/room_member.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 77e5b95e8a..a33a8ad42b 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -224,7 +224,7 @@ class RoomMemberHandler(BaseHandler): ) block_invite = True - if not self.spam_checker.user_may_invite(requester.user): + if not self.spam_checker.user_may_invite(requester.user.to_string()): logger.info("Blocking invite due to spam checker") block_invite = True From 1e375468de914fdefc7c0b4b65217c4ec95784a4 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 3 Oct 2017 17:13:14 +0100 Subject: [PATCH 0210/1637] pass room id too --- synapse/events/spamcheck.py | 4 ++-- synapse/handlers/federation.py | 2 +- synapse/handlers/room_member.py | 4 +++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 605261f4b5..fe2d22a6f2 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -46,7 +46,7 @@ class SpamChecker(object): return self.spam_checker.check_event_for_spam(event) - def user_may_invite(self, userid): + def user_may_invite(self, userid, roomid): """Checks if a given user may send an invite If this method returns false, the invite will be rejected. @@ -60,4 +60,4 @@ class SpamChecker(object): if self.spam_checker is None: return True - return self.spam_checker.user_may_invite(userid) + return self.spam_checker.user_may_invite(userid, roomid) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8571350cc8..737fe518ef 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1078,7 +1078,7 @@ class FederationHandler(BaseHandler): if self.hs.config.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") - if not self.spam_checker.user_may_invite(event.sender): + if not self.spam_checker.user_may_invite(event.sender, event.room_id): raise SynapseError( 403, "This user is not permitted to send invites to this server" ) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index a33a8ad42b..37985fa1f9 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -224,7 +224,9 @@ class RoomMemberHandler(BaseHandler): ) block_invite = True - if not self.spam_checker.user_may_invite(requester.user.to_string()): + if not self.spam_checker.user_may_invite( + requester.user.to_string(), room_id, + ): logger.info("Blocking invite due to spam checker") block_invite = True From 1e2ac543516baaec06d4e0ebdd2dbbe003e1f73b Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 3 Oct 2017 17:41:38 +0100 Subject: [PATCH 0211/1637] s/roomid/room_id/ --- synapse/events/spamcheck.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index fe2d22a6f2..8b01c091e9 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -46,7 +46,7 @@ class SpamChecker(object): return self.spam_checker.check_event_for_spam(event) - def user_may_invite(self, userid, roomid): + def user_may_invite(self, userid, room_id): """Checks if a given user may send an invite If this method returns false, the invite will be rejected. @@ -60,4 +60,4 @@ class SpamChecker(object): if self.spam_checker is None: return True - return self.spam_checker.user_may_invite(userid, roomid) + return self.spam_checker.user_may_invite(userid, room_id) From 197c14dbcfa9bc5bb281833a91ee035cb154216d Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 4 Oct 2017 10:47:54 +0100 Subject: [PATCH 0212/1637] Add room creation checks to spam checker Lets the spam checker deny attempts to create rooms and add aliases to them. --- synapse/events/spamcheck.py | 32 ++++++++++++++++++++++++++++++++ synapse/handlers/directory.py | 7 +++++++ synapse/handlers/room.py | 8 ++++++++ 3 files changed, 47 insertions(+) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 8b01c091e9..7cb3468df4 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -61,3 +61,35 @@ class SpamChecker(object): return True return self.spam_checker.user_may_invite(userid, room_id) + + def user_may_create_room(self, userid): + """Checks if a given user may create a room + + If this method returns false, the creation request will be rejected. + + Args: + userid (string): The sender's user ID + + Returns: + bool: True if the user may create a room, otherwise False + """ + if self.spam_checker is None: + return True + + return self.spam_checker.user_may_create_room(userid) + + def user_may_create_room_alias(self, userid, room_alias): + """Checks if a given user may create a room alias + + If this method returns false, the association request will be rejected. + + Args: + userid (string): The sender's user ID + + Returns: + bool: True if the user may create a room alias, otherwise False + """ + if self.spam_checker is None: + return True + + return self.spam_checker.user_may_create_room_alias(userid, room_alias) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 943554ce98..ed18bb20bb 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -40,6 +40,8 @@ class DirectoryHandler(BaseHandler): "directory", self.on_directory_query ) + self.spam_checker = hs.get_spam_checker() + @defer.inlineCallbacks def _create_association(self, room_alias, room_id, servers=None, creator=None): # general association creation for both human users and app services @@ -73,6 +75,11 @@ class DirectoryHandler(BaseHandler): # association creation for human users # TODO(erikj): Do user auth. + if not self.spam_checker.user_may_create_room_alias(user_id, room_alias): + raise SynapseError( + 403, "This user is not permitted to create this alias", + ) + can_create = yield self.can_modify_alias( room_alias, user_id=user_id diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 5698d28088..f909ea04f0 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -60,6 +60,11 @@ class RoomCreationHandler(BaseHandler): }, } + def __init__(self, hs): + super(RoomCreationHandler, self).__init__(hs) + + self.spam_checker = hs.get_spam_checker() + @defer.inlineCallbacks def create_room(self, requester, config, ratelimit=True): """ Creates a new room. @@ -75,6 +80,9 @@ class RoomCreationHandler(BaseHandler): """ user_id = requester.user.to_string() + if not self.spam_checker.user_may_create_room(user_id): + raise SynapseError(403, "You are not permitted to create rooms") + if ratelimit: yield self.ratelimit(requester) From 78d4ced82941ba249b7b16ea72684ade69c6a0d2 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 4 Oct 2017 12:44:27 +0100 Subject: [PATCH 0213/1637] un-double indent --- synapse/handlers/room.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f909ea04f0..535ba9517c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -81,7 +81,7 @@ class RoomCreationHandler(BaseHandler): user_id = requester.user.to_string() if not self.spam_checker.user_may_create_room(user_id): - raise SynapseError(403, "You are not permitted to create rooms") + raise SynapseError(403, "You are not permitted to create rooms") if ratelimit: yield self.ratelimit(requester) From d8ce68b09b0966330b4da720eeb41719c7c61be6 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 4 Oct 2017 14:29:33 +0100 Subject: [PATCH 0214/1637] spam check room publishing --- synapse/events/spamcheck.py | 18 ++++++++++++++++++ synapse/handlers/directory.py | 8 ++++++++ 2 files changed, 26 insertions(+) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 7cb3468df4..595b1760f8 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -85,6 +85,7 @@ class SpamChecker(object): Args: userid (string): The sender's user ID + room_alias (string): The alias to be created Returns: bool: True if the user may create a room alias, otherwise False @@ -93,3 +94,20 @@ class SpamChecker(object): return True return self.spam_checker.user_may_create_room_alias(userid, room_alias) + + def user_may_publish_room(self, userid, room_id): + """Checks if a given user may publish a room to the directory + + If this method returns false, the publish request will be rejected. + + Args: + userid (string): The sender's user ID + room_id (string): The ID of the room that would be published + + Returns: + bool: True if the user may publish the room, otherwise False + """ + if self.spam_checker is None: + return True + + return self.spam_checker.user_may_publish_room(userid, room_id) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index ed18bb20bb..a0464ae5c0 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -334,6 +334,14 @@ class DirectoryHandler(BaseHandler): room_id (str) visibility (str): "public" or "private" """ + if not self.spam_checker.user_may_publish_room( + requester.user.to_string(), room_id + ): + raise AuthError( + 403, + "This user is not permitted to publish rooms to the room list" + ) + if requester.is_guest: raise AuthError(403, "Guests cannot edit the published room list") From 6748f0a57962fb9657cab60083d94b4c97a0526c Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Oct 2017 11:33:30 +0100 Subject: [PATCH 0215/1637] Fix notif kws that start/end with non-word chars Only prepend / append word bounary characters if the search expression starts or ends with a word character, otherwise they don't work because there's no word bounary between whitespace and a non-word char. --- synapse/push/push_rule_evaluator.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 172c27c137..5a34d60abb 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -26,6 +26,8 @@ logger = logging.getLogger(__name__) GLOB_REGEX = re.compile(r'\\\[(\\\!|)(.*)\\\]') IS_GLOB = re.compile(r'[\?\*\[\]]') INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") +STARTS_WITH_WORD_CHAR_REGEX = re.compile(r"^\w") +ENDS_WITH_WORD_CHAR_REGEX = re.compile(r"\w$") def _room_member_count(ev, condition, room_member_count): @@ -183,7 +185,7 @@ def _glob_to_re(glob, word_boundary): r, ) if word_boundary: - r = r"\b%s\b" % (r,) + r = _re_word_boundary(r) return re.compile(r, flags=re.IGNORECASE) else: @@ -192,13 +194,30 @@ def _glob_to_re(glob, word_boundary): return re.compile(r, flags=re.IGNORECASE) elif word_boundary: r = re.escape(glob) - r = r"\b%s\b" % (r,) + r = _re_word_boundary(r) return re.compile(r, flags=re.IGNORECASE) else: r = "^" + re.escape(glob) + "$" return re.compile(r, flags=re.IGNORECASE) +def _re_word_boundary(r): + """ + Adds word boundary characters to the start and end of an + expression to require that the match occur as a whole word, + but do so respecting the fact that strings starting or ending + with non-word characters will change word boundaries. + """ + # Matching a regex string aginst a regex, since by definition + # \b is the boundary between a \w and a \W, so match \w at the + # start or end of the expression (although this will miss, eg. + # "[dl]og") + if STARTS_WITH_WORD_CHAR_REGEX.search(r): + r = r"\b%s" % (r,) + if ENDS_WITH_WORD_CHAR_REGEX.search(r): + r = r"%s\b" % (r,) + return r + def _flatten_dict(d, prefix=[], result=None): if result is None: From cbe3c3fdd49b87a452a9a9a229abfdf8dbe45922 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Oct 2017 11:43:10 +0100 Subject: [PATCH 0216/1637] pep8 --- synapse/push/push_rule_evaluator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 5a34d60abb..b78f2d90d7 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -201,6 +201,7 @@ def _glob_to_re(glob, word_boundary): r = "^" + re.escape(glob) + "$" return re.compile(r, flags=re.IGNORECASE) + def _re_word_boundary(r): """ Adds word boundary characters to the start and end of an From eaaa837e002fa068f23b04b140d538e91ccc2eab Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 5 Oct 2017 11:43:22 +0100 Subject: [PATCH 0217/1637] Don't corrupt cache --- synapse/groups/groups_server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 10bf61d178..991cc12cce 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -109,6 +109,7 @@ class GroupsServerHandler(object): room_id, len(joined_users), with_alias=False, allow_private=True, ) + entry = dict(entry) # so we don't change whats cached entry.pop("room_id", None) room_entry["profile"] = entry From 0c8da8b519fbd8bca984117e354fe57c3a76e154 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Oct 2017 11:57:43 +0100 Subject: [PATCH 0218/1637] Use better method for word boundary searching From https://github.com/matrix-org/matrix-js-sdk/commit/ebc95667b8a5777d13e5d3c679972bedae022fd5 --- synapse/push/push_rule_evaluator.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index b78f2d90d7..65f9a63fd8 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -26,8 +26,6 @@ logger = logging.getLogger(__name__) GLOB_REGEX = re.compile(r'\\\[(\\\!|)(.*)\\\]') IS_GLOB = re.compile(r'[\?\*\[\]]') INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") -STARTS_WITH_WORD_CHAR_REGEX = re.compile(r"^\w") -ENDS_WITH_WORD_CHAR_REGEX = re.compile(r"\w$") def _room_member_count(ev, condition, room_member_count): @@ -209,15 +207,9 @@ def _re_word_boundary(r): but do so respecting the fact that strings starting or ending with non-word characters will change word boundaries. """ - # Matching a regex string aginst a regex, since by definition - # \b is the boundary between a \w and a \W, so match \w at the - # start or end of the expression (although this will miss, eg. - # "[dl]og") - if STARTS_WITH_WORD_CHAR_REGEX.search(r): - r = r"\b%s" % (r,) - if ENDS_WITH_WORD_CHAR_REGEX.search(r): - r = r"%s\b" % (r,) - return r + # we can't use \b as it chokes on unicode. however \W seems to be okay + # as shorthand for [^0-9A-Za-z_]. + return r"(^|\W)%s(\W|$)" % (r,) def _flatten_dict(d, prefix=[], result=None): From fa969cfdde72a2d136eba08eb99e00d47ddb5cdf Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Oct 2017 12:39:18 +0100 Subject: [PATCH 0219/1637] Support for channel notifications Add condition type to check the sender's power level and add a base rule using it for @channel notifications. --- synapse/push/baserules.py | 23 ++++++++++++++++++++++ synapse/push/bulk_push_rule_evaluator.py | 19 +++++++++++++++++- synapse/push/push_rule_evaluator.py | 25 +++++++++++++++++------- 3 files changed, 59 insertions(+), 8 deletions(-) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 85effdfa46..354b1f4493 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -1,4 +1,5 @@ # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -238,6 +239,28 @@ BASE_APPEND_OVERRIDE_RULES = [ } ] }, + { + 'rule_id': 'global/underride/.m.rule.channelnotif', + 'conditions': [ + { + 'kind': 'event_match', + 'key': 'content.body', + 'pattern': '*@channel*', + '_id': '_channelnotif_content', + }, + { + 'kind': 'sender_power_level', + 'is': '>=50', + '_id': '_channelnotif_pl', + }, + ], + 'actions': [ + 'notify', { + 'set_tweak': 'highlight', + 'value': True, + } + ] + } ] diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index b0d64aa6c4..6459eec225 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -19,6 +19,7 @@ from twisted.internet import defer from .push_rule_evaluator import PushRuleEvaluatorForEvent +from synapse.event_auth import get_user_power_level from synapse.api.constants import EventTypes, Membership from synapse.metrics import get_metrics_for from synapse.util.caches import metrics as cache_metrics @@ -59,6 +60,7 @@ class BulkPushRuleEvaluator(object): def __init__(self, hs): self.hs = hs self.store = hs.get_datastore() + self.auth = hs.get_auth() self.room_push_rule_cache_metrics = cache_metrics.register_cache( "cache", @@ -108,6 +110,17 @@ class BulkPushRuleEvaluator(object): self.room_push_rule_cache_metrics, ) + @defer.inlineCallbacks + def _get_sender_power_level(self, event, context): + auth_events_ids = yield self.auth.compute_auth_events( + event, context.prev_state_ids, for_verification=False, + ) + auth_events = yield self.store.get_events(auth_events_ids) + auth_events = { + (e.type, e.state_key): e for e in auth_events.values() + } + defer.returnValue(get_user_power_level(event.sender, auth_events)) + @defer.inlineCallbacks def action_for_event_by_user(self, event, context): """Given an event and context, evaluate the push rules and return @@ -123,7 +136,11 @@ class BulkPushRuleEvaluator(object): event, context ) - evaluator = PushRuleEvaluatorForEvent(event, len(room_members)) + sender_power_level = yield self._get_sender_power_level(event, context) + + evaluator = PushRuleEvaluatorForEvent( + event, len(room_members), sender_power_level + ) condition_cache = {} diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 65f9a63fd8..9cf3f9c632 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2015 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,6 +30,12 @@ INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") def _room_member_count(ev, condition, room_member_count): + return _test_ineq_condition(condition, room_member_count) + +def _sender_power_level(ev, condition, power_level): + return _test_ineq_condition(condition, power_level) + +def _test_ineq_condition(condition, number): if 'is' not in condition: return False m = INEQUALITY_EXPR.match(condition['is']) @@ -41,19 +48,18 @@ def _room_member_count(ev, condition, room_member_count): rhs = int(rhs) if ineq == '' or ineq == '==': - return room_member_count == rhs + return number == rhs elif ineq == '<': - return room_member_count < rhs + return number < rhs elif ineq == '>': - return room_member_count > rhs + return number > rhs elif ineq == '>=': - return room_member_count >= rhs + return number >= rhs elif ineq == '<=': - return room_member_count <= rhs + return number <= rhs else: return False - def tweaks_for_actions(actions): tweaks = {} for a in actions: @@ -65,9 +71,10 @@ def tweaks_for_actions(actions): class PushRuleEvaluatorForEvent(object): - def __init__(self, event, room_member_count): + def __init__(self, event, room_member_count, sender_power_level): self._event = event self._room_member_count = room_member_count + self._sender_power_level = sender_power_level # Maps strings of e.g. 'content.body' -> event["content"]["body"] self._value_cache = _flatten_dict(event) @@ -81,6 +88,10 @@ class PushRuleEvaluatorForEvent(object): return _room_member_count( self._event, condition, self._room_member_count ) + elif condition['kind'] == 'sender_power_level': + return _sender_power_level( + self._event, condition, self._sender_power_level + ) else: return True From b9b9714fd5c3fa4c209d3ca1f6ddde365373ec98 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Oct 2017 13:02:19 +0100 Subject: [PATCH 0220/1637] Get rule type right --- synapse/push/baserules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 354b1f4493..3b290a22a1 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -240,7 +240,7 @@ BASE_APPEND_OVERRIDE_RULES = [ ] }, { - 'rule_id': 'global/underride/.m.rule.channelnotif', + 'rule_id': 'global/override/.m.rule.channelnotif', 'conditions': [ { 'kind': 'event_match', From 985ce80375b601d500ae58faa032874a794e942a Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Oct 2017 13:03:44 +0100 Subject: [PATCH 0221/1637] They're called rooms --- synapse/push/baserules.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 3b290a22a1..71f9ab6b25 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -240,18 +240,18 @@ BASE_APPEND_OVERRIDE_RULES = [ ] }, { - 'rule_id': 'global/override/.m.rule.channelnotif', + 'rule_id': 'global/override/.m.rule.roomnotif', 'conditions': [ { 'kind': 'event_match', 'key': 'content.body', - 'pattern': '*@channel*', - '_id': '_channelnotif_content', + 'pattern': '*@room*', + '_id': '_roomnotif_content', }, { 'kind': 'sender_power_level', 'is': '>=50', - '_id': '_channelnotif_pl', + '_id': '_roomnotif_pl', }, ], 'actions': [ From e433393c4fded875cc38b09b02b453b7a014a9af Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Oct 2017 13:08:02 +0100 Subject: [PATCH 0222/1637] pep8 --- synapse/push/push_rule_evaluator.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 9cf3f9c632..a91dc0ee08 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -32,9 +32,11 @@ INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") def _room_member_count(ev, condition, room_member_count): return _test_ineq_condition(condition, room_member_count) + def _sender_power_level(ev, condition, power_level): return _test_ineq_condition(condition, power_level) + def _test_ineq_condition(condition, number): if 'is' not in condition: return False @@ -60,6 +62,7 @@ def _test_ineq_condition(condition, number): else: return False + def tweaks_for_actions(actions): tweaks = {} for a in actions: From ed80c6b6cc6da27849038a1b83bec7fa1ac54b3e Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Oct 2017 13:20:22 +0100 Subject: [PATCH 0223/1637] Add fastpath optimisation --- synapse/push/bulk_push_rule_evaluator.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 6459eec225..ca3b5af807 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -112,9 +112,15 @@ class BulkPushRuleEvaluator(object): @defer.inlineCallbacks def _get_sender_power_level(self, event, context): - auth_events_ids = yield self.auth.compute_auth_events( - event, context.prev_state_ids, for_verification=False, - ) + pl_event_key = (EventTypes.PowerLevels, "", ) + if pl_event_key in context.prev_state_ids: + # fastpath: if there's a power level event, that's all we need, and + # not having a power level event is an extreme edge case + auth_events_ids = [context.prev_state_ids[pl_event_key]] + else: + auth_events_ids = yield self.auth.compute_auth_events( + event, context.prev_state_ids, for_verification=False, + ) auth_events = yield self.store.get_events(auth_events_ids) auth_events = { (e.type, e.state_key): e for e in auth_events.values() From 269af961e9910e254f5abc979f0bd293687414f3 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Oct 2017 13:27:12 +0100 Subject: [PATCH 0224/1637] Make be faster --- synapse/push/bulk_push_rule_evaluator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index ca3b5af807..df16d5ce9e 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -112,11 +112,11 @@ class BulkPushRuleEvaluator(object): @defer.inlineCallbacks def _get_sender_power_level(self, event, context): - pl_event_key = (EventTypes.PowerLevels, "", ) - if pl_event_key in context.prev_state_ids: + pl_event_id = context.prev_state_ids.get((EventTypes.PowerLevels, "",)) + if pl_event_id: # fastpath: if there's a power level event, that's all we need, and # not having a power level event is an extreme edge case - auth_events_ids = [context.prev_state_ids[pl_event_key]] + auth_events_ids = [pl_event_id] else: auth_events_ids = yield self.auth.compute_auth_events( event, context.prev_state_ids, for_verification=False, From f878e6f8af9e80cfa4be717c03cc4f9853a93794 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 5 Oct 2017 14:02:28 +0100 Subject: [PATCH 0225/1637] Spam checking: add the invitee to user_may_invite --- synapse/events/spamcheck.py | 4 ++-- synapse/handlers/federation.py | 12 +++++++----- synapse/handlers/room_member.py | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 595b1760f8..dccc579eac 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -46,7 +46,7 @@ class SpamChecker(object): return self.spam_checker.check_event_for_spam(event) - def user_may_invite(self, userid, room_id): + def user_may_invite(self, inviter_userid, invitee_userid, room_id): """Checks if a given user may send an invite If this method returns false, the invite will be rejected. @@ -60,7 +60,7 @@ class SpamChecker(object): if self.spam_checker is None: return True - return self.spam_checker.user_may_invite(userid, room_id) + return self.spam_checker.user_may_invite(inviter_userid, invitee_userid, room_id) def user_may_create_room(self, userid): """Checks if a given user may create a room diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 737fe518ef..8fccf8bab3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1071,6 +1071,9 @@ class FederationHandler(BaseHandler): """ event = pdu + if event.state_key is None: + raise SynapseError(400, "The invite event did not have a state key") + is_blocked = yield self.store.is_room_blocked(event.room_id) if is_blocked: raise SynapseError(403, "This room has been blocked on this server") @@ -1078,9 +1081,11 @@ class FederationHandler(BaseHandler): if self.hs.config.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") - if not self.spam_checker.user_may_invite(event.sender, event.room_id): + if not self.spam_checker.user_may_invite( + event.sender, event.state_key, event.room_id, + ): raise SynapseError( - 403, "This user is not permitted to send invites to this server" + 403, "This user is not permitted to send invites to this server/user" ) membership = event.content.get("membership") @@ -1091,9 +1096,6 @@ class FederationHandler(BaseHandler): if sender_domain != origin: raise SynapseError(400, "The invite event was not from the server sending it") - if event.state_key is None: - raise SynapseError(400, "The invite event did not have a state key") - if not self.is_mine_id(event.state_key): raise SynapseError(400, "The invite event must be for this server") diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 37985fa1f9..36a8ef8ce0 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -225,7 +225,7 @@ class RoomMemberHandler(BaseHandler): block_invite = True if not self.spam_checker.user_may_invite( - requester.user.to_string(), room_id, + requester.user.to_string(), target.to_string(), room_id, ): logger.info("Blocking invite due to spam checker") block_invite = True From 3ddda939d35951896faa48631a3fe023e89e13e1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 5 Oct 2017 14:58:17 +0100 Subject: [PATCH 0226/1637] some comments in the state res code --- synapse/state.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/synapse/state.py b/synapse/state.py index 390799fbd5..dcdcdef65e 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -288,6 +288,9 @@ class StateHandler(object): """ logger.debug("resolve_state_groups event_ids %s", event_ids) + # map from state group id to the state in that state group (where + # 'state' is a map from state key to event id) + # dict[int, dict[(str, str), str]] state_groups_ids = yield self.store.get_state_groups_ids( room_id, event_ids ) @@ -320,11 +323,15 @@ class StateHandler(object): "Resolving state for %s with %d groups", room_id, len(state_groups_ids) ) + # build a map from state key to the event_ids which set that state. + # dict[(str, str), set[str]) state = {} for st in state_groups_ids.values(): for key, e_id in st.items(): state.setdefault(key, set()).add(e_id) + # build a map from state key to the event_ids which set that state, + # including only those where there are state keys in conflict. conflicted_state = { k: list(v) for k, v in state.items() @@ -494,8 +501,14 @@ def _resolve_with_state_fac(unconflicted_state, conflicted_state, logger.info("Asking for %d conflicted events", len(needed_events)) + # dict[str, FrozenEvent]: a map from state event id to event. Only includes + # the state events which are in conflict. state_map = yield state_map_factory(needed_events) + # get the ids of the auth events which allow us to authenticate the + # conflicted state, picking only from the unconflicting state. + # + # dict[(str, str), str]: a map from state key to event id auth_events = _create_auth_events_from_maps( unconflicted_state, conflicted_state, state_map ) From c8f568ddf9e8827d8971e14137663fa8df5b57d2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Oct 2017 22:14:24 +0100 Subject: [PATCH 0227/1637] Fix up deferred handling in federation.py * Avoid preserve_context_over_deferred, which is broken * set consumeErrors=True on defer.gatherResults, to avoid spurious "unhandled failure" erros --- synapse/handlers/federation.py | 45 ++++++++++++++++------------------ 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8fccf8bab3..63c56a4a32 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -14,7 +14,6 @@ # limitations under the License. """Contains handlers for federation events.""" -import synapse.util.logcontext from signedjson.key import decode_verify_key_bytes from signedjson.sign import verify_signed_json from unpaddedbase64 import decode_base64 @@ -26,10 +25,7 @@ from synapse.api.errors import ( ) from synapse.api.constants import EventTypes, Membership, RejectedReason from synapse.events.validator import EventValidator -from synapse.util import unwrapFirstError -from synapse.util.logcontext import ( - preserve_fn, preserve_context_over_deferred -) +from synapse.util import unwrapFirstError, logcontext from synapse.util.metrics import measure_func from synapse.util.logutils import log_function from synapse.util.async import run_on_reactor, Linearizer @@ -592,9 +588,9 @@ class FederationHandler(BaseHandler): missing_auth - failed_to_fetch ) - results = yield preserve_context_over_deferred(defer.gatherResults( + results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self.replication_layer.get_pdu)( + logcontext.preserve_fn(self.replication_layer.get_pdu)( [dest], event_id, outlier=True, @@ -786,10 +782,14 @@ class FederationHandler(BaseHandler): event_ids = list(extremities.keys()) logger.debug("calling resolve_state_groups in _maybe_backfill") - states = yield preserve_context_over_deferred(defer.gatherResults([ - preserve_fn(self.state_handler.resolve_state_groups)(room_id, [e]) - for e in event_ids - ])) + states = yield logcontext.make_deferred_yieldable(defer.gatherResults( + [ + logcontext.preserve_fn(self.state_handler.resolve_state_groups)( + room_id, [e] + ) + for e in event_ids + ], consumeErrors=True, + )) states = dict(zip(event_ids, [s.state for s in states])) state_map = yield self.store.get_events( @@ -942,9 +942,7 @@ class FederationHandler(BaseHandler): # lots of requests for missing prev_events which we do actually # have. Hence we fire off the deferred, but don't wait for it. - synapse.util.logcontext.preserve_fn(self._handle_queued_pdus)( - room_queue - ) + logcontext.preserve_fn(self._handle_queued_pdus)(room_queue) defer.returnValue(True) @@ -1438,7 +1436,7 @@ class FederationHandler(BaseHandler): if not backfilled: # this intentionally does not yield: we don't care about the result # and don't need to wait for it. - preserve_fn(self.pusher_pool.on_new_notifications)( + logcontext.preserve_fn(self.pusher_pool.on_new_notifications)( event_stream_id, max_stream_id ) @@ -1451,16 +1449,16 @@ class FederationHandler(BaseHandler): a bunch of outliers, but not a chunk of individual events that depend on each other for state calculations. """ - contexts = yield preserve_context_over_deferred(defer.gatherResults( + contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - preserve_fn(self._prep_event)( + logcontext.preserve_fn(self._prep_event)( origin, ev_info["event"], state=ev_info.get("state"), auth_events=ev_info.get("auth_events"), ) for ev_info in event_infos - ] + ], consumeErrors=True, )) yield self.store.persist_events( @@ -1768,18 +1766,17 @@ class FederationHandler(BaseHandler): # Do auth conflict res. logger.info("Different auth: %s", different_auth) - different_events = yield preserve_context_over_deferred(defer.gatherResults( - [ - preserve_fn(self.store.get_event)( + different_events = yield logcontext.make_deferred_yieldable( + defer.gatherResults([ + logcontext.preserve_fn(self.store.get_event)( d, allow_none=True, allow_rejected=False, ) for d in different_auth if d in have_events and not have_events[d] - ], - consumeErrors=True - )).addErrback(unwrapFirstError) + ], consumeErrors=True) + ).addErrback(unwrapFirstError) if different_events: local_view = dict(auth_events) From 148428ce763978583da2b1d3c435ec321df45855 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Oct 2017 22:24:28 +0100 Subject: [PATCH 0228/1637] Fix logcontext handling for concurrently_execute Avoid preserve_context_over_deferred, which is broken. --- synapse/util/async.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/util/async.py b/synapse/util/async.py index 1453faf0ef..bb252f75d7 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -19,7 +19,7 @@ from twisted.internet import defer, reactor from .logcontext import ( PreserveLoggingContext, preserve_fn, preserve_context_over_deferred, ) -from synapse.util import unwrapFirstError +from synapse.util import logcontext, unwrapFirstError from contextlib import contextmanager @@ -155,7 +155,7 @@ def concurrently_execute(func, args, limit): except StopIteration: pass - return preserve_context_over_deferred(defer.gatherResults([ + return logcontext.make_deferred_yieldable(defer.gatherResults([ preserve_fn(_concurrently_execute_inner)() for _ in xrange(limit) ], consumeErrors=True)).addErrback(unwrapFirstError) From 01bbacf3c49f4311fddb61ef1ff98ee4f55fc44b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Oct 2017 15:12:43 +0100 Subject: [PATCH 0229/1637] Fix up logcontext handling in (federation) TransactionQueue Avoid using preserve_context_over_function, which has problems with respect to logcontexts. --- synapse/federation/transaction_queue.py | 48 ++++++++++++++++--------- 1 file changed, 32 insertions(+), 16 deletions(-) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 003eaba893..7a3c9cbb70 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -20,8 +20,8 @@ from .persistence import TransactionActions from .units import Transaction, Edu from synapse.api.errors import HttpResponseException +from synapse.util import logcontext from synapse.util.async import run_on_reactor -from synapse.util.logcontext import preserve_context_over_fn, preserve_fn from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter from synapse.util.metrics import measure_func from synapse.handlers.presence import format_user_presence_state, get_interested_remotes @@ -231,11 +231,9 @@ class TransactionQueue(object): (pdu, order) ) - preserve_context_over_fn( - self._attempt_new_transaction, destination - ) + self._attempt_new_transaction(destination) - @preserve_fn # the caller should not yield on this + @logcontext.preserve_fn # the caller should not yield on this @defer.inlineCallbacks def send_presence(self, states): """Send the new presence states to the appropriate destinations. @@ -299,7 +297,7 @@ class TransactionQueue(object): state.user_id: state for state in states }) - preserve_fn(self._attempt_new_transaction)(destination) + self._attempt_new_transaction(destination) def send_edu(self, destination, edu_type, content, key=None): edu = Edu( @@ -321,9 +319,7 @@ class TransactionQueue(object): else: self.pending_edus_by_dest.setdefault(destination, []).append(edu) - preserve_context_over_fn( - self._attempt_new_transaction, destination - ) + self._attempt_new_transaction(destination) def send_failure(self, failure, destination): if destination == self.server_name or destination == "localhost": @@ -336,9 +332,7 @@ class TransactionQueue(object): destination, [] ).append(failure) - preserve_context_over_fn( - self._attempt_new_transaction, destination - ) + self._attempt_new_transaction(destination) def send_device_messages(self, destination): if destination == self.server_name or destination == "localhost": @@ -347,15 +341,24 @@ class TransactionQueue(object): if not self.can_send_to(destination): return - preserve_context_over_fn( - self._attempt_new_transaction, destination - ) + self._attempt_new_transaction(destination) def get_current_token(self): return 0 - @defer.inlineCallbacks def _attempt_new_transaction(self, destination): + """Try to start a new transaction to this destination + + If there is already a transaction in progress to this destination, + returns immediately. Otherwise kicks off the process of sending a + transaction in the background. + + Args: + destination (str): + + Returns: + None + """ # list of (pending_pdu, deferred, order) if destination in self.pending_transactions: # XXX: pending_transactions can get stuck on by a never-ending @@ -368,6 +371,19 @@ class TransactionQueue(object): ) return + logger.debug("TX [%s] Starting transaction loop", destination) + + # Drop the logcontext before starting the transaction. It doesn't + # really make sense to log all the outbound transactions against + # whatever path led us to this point: that's pretty arbitrary really. + # + # (this also means we can fire off _perform_transaction without + # yielding) + with logcontext.PreserveLoggingContext(): + self._transaction_transmission_loop(destination) + + @defer.inlineCallbacks + def _transaction_transmission_loop(self, destination): pending_pdus = [] try: self.pending_transactions[destination] = 1 From e8496efe8467568f488c6f53056be4bf69fd56e1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 Oct 2017 15:17:34 +0100 Subject: [PATCH 0230/1637] Fix up comment --- synapse/storage/roommember.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 63f6115ba9..a0fc9a6867 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -542,7 +542,8 @@ class RoomMemberStore(SQLBaseStore): host (str) Returns: - bool: whether the host is/was in the room or not + Deferred: Resolves to True if the host is/was in the room, otherwise + False. """ if '%' in host or '_' in host: raise Exception("Invalid host name") From 3cc852d339ad1cdcb0a435c76b44182bdb81dfe9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 9 Oct 2017 17:44:42 +0100 Subject: [PATCH 0231/1637] Fancy logformatter to format exceptions better This is a bit of an experimental change at this point; the idea is to see if it helps us track down where our stack overflows are coming from by logging the stack when the exception was caught and turned into a Failure. (We'll also need https://github.com/richvdh/twisted/commit/edf27044200e74680ea67c525768e36dc9d9af2b). If we deploy this, we'll be able to enable it via the log config yaml. --- synapse/util/logformatter.py | 43 ++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 synapse/util/logformatter.py diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py new file mode 100644 index 0000000000..60504162e9 --- /dev/null +++ b/synapse/util/logformatter.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import StringIO +import logging +import traceback + + +class LogFormatter(logging.Formatter): + """Log formatter which gives more detail for exceptions + + This is the same as the standard log formatter, except that when logging + exceptions [typically via log.foo("msg", exc_info=1)], it prints the + sequence that led up to the point at which the exception was caught. + (Normally only stack frames between the point the exception was raised and + where it was caught are logged). + """ + def __init__(self, *args, **kwargs): + super(LogFormatter, self).__init__(*args, **kwargs) + + def formatException(self, ei): + sio = StringIO.StringIO() + sio.write("Capture point (most recent call last):\n") + traceback.print_stack(ei[2].tb_frame.f_back, None, sio) + traceback.print_exception(ei[0], ei[1], ei[2], None, sio) + s = sio.getvalue() + sio.close() + if s[-1:] == "\n": + s = s[:-1] + return s From a6e3222fe5abf5b65b53678d1208c4c58f97b391 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Oct 2017 14:24:06 +0100 Subject: [PATCH 0232/1637] Fed server: Move origin-check code to _handle_received_pdu The response-building code expects there to be an entry in the `results` list for each entry in the pdu_list, so the early `continue` was messing this up. That doesn't really matter, because all that the federation client does is log any errors, but it's pretty poor form. --- synapse/federation/federation_server.py | 48 ++++++++++++------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 51e3fdea06..e791a1266d 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -143,30 +143,6 @@ class FederationServer(FederationBase): results = [] for pdu in pdu_list: - # check that it's actually being sent from a valid destination to - # workaround bug #1753 in 0.18.5 and 0.18.6 - if transaction.origin != get_domain_from_id(pdu.event_id): - # We continue to accept join events from any server; this is - # necessary for the federation join dance to work correctly. - # (When we join over federation, the "helper" server is - # responsible for sending out the join event, rather than the - # origin. See bug #1893). - if not ( - pdu.type == 'm.room.member' and - pdu.content and - pdu.content.get("membership", None) == 'join' - ): - logger.info( - "Discarding PDU %s from invalid origin %s", - pdu.event_id, transaction.origin - ) - continue - else: - logger.info( - "Accepting join PDU %s from %s", - pdu.event_id, transaction.origin - ) - try: yield self._handle_received_pdu(transaction.origin, pdu) results.append({}) @@ -520,6 +496,30 @@ class FederationServer(FederationBase): Returns (Deferred): completes with None Raises: FederationError if the signatures / hash do not match """ + # check that it's actually being sent from a valid destination to + # workaround bug #1753 in 0.18.5 and 0.18.6 + if origin != get_domain_from_id(pdu.event_id): + # We continue to accept join events from any server; this is + # necessary for the federation join dance to work correctly. + # (When we join over federation, the "helper" server is + # responsible for sending out the join event, rather than the + # origin. See bug #1893). + if not ( + pdu.type == 'm.room.member' and + pdu.content and + pdu.content.get("membership", None) == 'join' + ): + logger.info( + "Discarding PDU %s from invalid origin %s", + pdu.event_id, origin + ) + return + else: + logger.info( + "Accepting join PDU %s from %s", + pdu.event_id, origin + ) + # Check signature. try: pdu = yield self._check_sigs_and_hash(pdu) From ba5b9b80a56a449ffab44afaf4661d5b44277898 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Oct 2017 15:18:58 +0100 Subject: [PATCH 0233/1637] fed server: refactor on_incoming_transaction Move as much as possible to after the have_responded check, and reduce the number of times we iterate over the pdu list. --- synapse/federation/federation_server.py | 53 ++++++++++++++----------- 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index e791a1266d..fa4ec2ad3c 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -109,23 +109,12 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function def on_incoming_transaction(self, transaction_data): + # keep this as early as possible to make the calculated origin ts as + # accurate as possible. + request_time = int(self._clock.time_msec()) + transaction = Transaction(**transaction_data) - received_pdus_counter.inc_by(len(transaction.pdus)) - - for p in transaction.pdus: - if "unsigned" in p: - unsigned = p["unsigned"] - if "age" in unsigned: - p["age"] = unsigned["age"] - if "age" in p: - p["age_ts"] = int(self._clock.time_msec()) - int(p["age"]) - del p["age"] - - pdu_list = [ - self.event_from_pdu_json(p) for p in transaction.pdus - ] - logger.debug("[%s] Got transaction", transaction.transaction_id) response = yield self.transaction_actions.have_responded(transaction) @@ -140,17 +129,35 @@ class FederationServer(FederationBase): logger.debug("[%s] Transaction is new", transaction.transaction_id) - results = [] + received_pdus_counter.inc_by(len(transaction.pdus)) + + pdu_list = [] + + for p in transaction.pdus: + if "unsigned" in p: + unsigned = p["unsigned"] + if "age" in unsigned: + p["age"] = unsigned["age"] + if "age" in p: + p["age_ts"] = request_time - int(p["age"]) + del p["age"] + + event = self.event_from_pdu_json(p) + pdu_list.append(event) + + pdu_results = {} for pdu in pdu_list: + event_id = pdu.event_id try: yield self._handle_received_pdu(transaction.origin, pdu) - results.append({}) + pdu_results[event_id] = {} except FederationError as e: + logger.warn("Error handling PDU %s: %s", event_id, e) self.send_failure(e, transaction.origin) - results.append({"error": str(e)}) + pdu_results[event_id] = {"error": str(e)} except Exception as e: - results.append({"error": str(e)}) + pdu_results[event_id] = {"error": str(e)} logger.exception("Failed to handle PDU") if hasattr(transaction, "edus"): @@ -164,14 +171,12 @@ class FederationServer(FederationBase): for failure in getattr(transaction, "pdu_failures", []): logger.info("Got failure %r", failure) - logger.debug("Returning: %s", str(results)) - response = { - "pdus": dict(zip( - (p.event_id for p in pdu_list), results - )), + "pdus": pdu_results, } + logger.debug("Returning: %s", str(response)) + yield self.transaction_actions.set_response( transaction, 200, response From 4c7c4d4061ae298ce6df445c888b91d3e5791164 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Oct 2017 15:31:58 +0100 Subject: [PATCH 0234/1637] Fed server: use a linearizer for ongoing transactions We don't want to process the same transaction multiple times concurrently, so use a linearizer. --- synapse/federation/federation_server.py | 30 ++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index fa4ec2ad3c..b2dffa2c3d 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -53,6 +53,7 @@ class FederationServer(FederationBase): self.auth = hs.get_auth() self._server_linearizer = Linearizer("fed_server") + self._transaction_linearizer = Linearizer("fed_txn_handler") # We cache responses to state queries, as they take a while and often # come in waves. @@ -111,12 +112,39 @@ class FederationServer(FederationBase): def on_incoming_transaction(self, transaction_data): # keep this as early as possible to make the calculated origin ts as # accurate as possible. - request_time = int(self._clock.time_msec()) + request_time = self._clock.time_msec() transaction = Transaction(**transaction_data) + if not transaction.transaction_id: + raise Exception("Transaction missing transaction_id") + if not transaction.origin: + raise Exception("Transaction missing origin") + logger.debug("[%s] Got transaction", transaction.transaction_id) + # use a linearizer to ensure that we don't process the same transaction + # multiple times in parallel. + with (yield self._transaction_linearizer.queue( + (transaction.origin, transaction.transaction_id), + )): + result = yield self._handle_incoming_transaction( + transaction, request_time, + ) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _handle_incoming_transaction(self, transaction, request_time): + """ Process an incoming transaction and return the HTTP response + + Args: + transaction (Transaction): incoming transaction + request_time (int): timestamp that the HTTP request arrived at + + Returns: + Deferred[(int, object)]: http response code and body + """ response = yield self.transaction_actions.have_responded(transaction) if response: From 6a6cc27aee16ee045b6909d2c401a9d4f6e54324 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Oct 2017 16:07:20 +0100 Subject: [PATCH 0235/1637] fed server: process PDUs for different rooms in parallel With luck, this will give a real-time improvement when there are many rooms and the server ends up calling out to fetch missing events. --- synapse/federation/federation_server.py | 53 ++++++++++++++++--------- 1 file changed, 34 insertions(+), 19 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index b2dffa2c3d..f00d59e701 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -12,14 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - from twisted.internet import defer from .federation_base import FederationBase from .units import Transaction, Edu -from synapse.util.async import Linearizer +from synapse.util import async from synapse.util.logutils import log_function from synapse.util.caches.response_cache import ResponseCache from synapse.events import FrozenEvent @@ -33,6 +31,9 @@ from synapse.crypto.event_signing import compute_event_signature import simplejson as json import logging +# when processing incoming transactions, we try to handle multiple rooms in +# parallel, up to this limit. +TRANSACTION_CONCURRENCY_LIMIT = 10 logger = logging.getLogger(__name__) @@ -52,8 +53,8 @@ class FederationServer(FederationBase): self.auth = hs.get_auth() - self._server_linearizer = Linearizer("fed_server") - self._transaction_linearizer = Linearizer("fed_txn_handler") + self._server_linearizer = async.Linearizer("fed_server") + self._transaction_linearizer = async.Linearizer("fed_txn_handler") # We cache responses to state queries, as they take a while and often # come in waves. @@ -159,7 +160,7 @@ class FederationServer(FederationBase): received_pdus_counter.inc_by(len(transaction.pdus)) - pdu_list = [] + pdus_by_room = {} for p in transaction.pdus: if "unsigned" in p: @@ -171,22 +172,36 @@ class FederationServer(FederationBase): del p["age"] event = self.event_from_pdu_json(p) - pdu_list.append(event) + room_id = event.room_id + pdus_by_room.setdefault(room_id, []).append(event) pdu_results = {} - for pdu in pdu_list: - event_id = pdu.event_id - try: - yield self._handle_received_pdu(transaction.origin, pdu) - pdu_results[event_id] = {} - except FederationError as e: - logger.warn("Error handling PDU %s: %s", event_id, e) - self.send_failure(e, transaction.origin) - pdu_results[event_id] = {"error": str(e)} - except Exception as e: - pdu_results[event_id] = {"error": str(e)} - logger.exception("Failed to handle PDU") + # we can process different rooms in parallel (which is useful if they + # require callouts to other servers to fetch missing events), but + # impose a limit to avoid going too crazy with ram/cpu. + @defer.inlineCallbacks + def process_pdus_for_room(room_id): + logger.debug("Processing PDUs for %s", room_id) + for pdu in pdus_by_room[room_id]: + event_id = pdu.event_id + try: + yield self._handle_received_pdu( + transaction.origin, pdu + ) + pdu_results[event_id] = {} + except FederationError as e: + logger.warn("Error handling PDU %s: %s", event_id, e) + self.send_failure(e, transaction.origin) + pdu_results[event_id] = {"error": str(e)} + except Exception as e: + pdu_results[event_id] = {"error": str(e)} + logger.exception("Failed to handle PDU %s", event_id) + + yield async.concurrently_execute( + process_pdus_for_room, pdus_by_room.keys(), + TRANSACTION_CONCURRENCY_LIMIT, + ) if hasattr(transaction, "edus"): for edu in (Edu(**x) for x in transaction.edus): From 707374d5dc8ed0ed077ed525262678ebcd583090 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 10 Oct 2017 11:21:41 +0100 Subject: [PATCH 0236/1637] What year is it!? Who's the president!? --- synapse/push/push_rule_evaluator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index a91dc0ee08..7cf777f16f 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd -# Copyright 2015 New Vector Ltd +# Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From a9f9d686316da9efa3e165275fb20066c0367649 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 10 Oct 2017 11:38:31 +0100 Subject: [PATCH 0237/1637] More optimisation --- synapse/push/bulk_push_rule_evaluator.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index df16d5ce9e..66e8a68a05 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -112,19 +112,22 @@ class BulkPushRuleEvaluator(object): @defer.inlineCallbacks def _get_sender_power_level(self, event, context): - pl_event_id = context.prev_state_ids.get((EventTypes.PowerLevels, "",)) + pl_event_key = (EventTypes.PowerLevels, "", ) + pl_event_id = context.prev_state_ids.get(pl_event_key) if pl_event_id: # fastpath: if there's a power level event, that's all we need, and # not having a power level event is an extreme edge case - auth_events_ids = [pl_event_id] + pl_event = yield self.store.get_event(pl_event_id) + auth_events = { pl_event_key: pl_event } else: auth_events_ids = yield self.auth.compute_auth_events( event, context.prev_state_ids, for_verification=False, ) - auth_events = yield self.store.get_events(auth_events_ids) - auth_events = { - (e.type, e.state_key): e for e in auth_events.values() - } + auth_events = yield self.store.get_events(auth_events_ids) + auth_events = { + (e.type, e.state_key): e for e in auth_events.itervalues() + } + defer.returnValue(get_user_power_level(event.sender, auth_events)) @defer.inlineCallbacks From c9f034b4acb0a48915d1680e310b692816eed713 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 10 Oct 2017 11:47:10 +0100 Subject: [PATCH 0238/1637] There was already a constant for this also update copyright --- synapse/push/bulk_push_rule_evaluator.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 66e8a68a05..adc99bd4f6 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015 OpenMarket Ltd +# Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,6 +26,7 @@ from synapse.metrics import get_metrics_for from synapse.util.caches import metrics as cache_metrics from synapse.util.caches.descriptors import cached from synapse.util.async import Linearizer +from synapse.state import POWER_KEY from collections import namedtuple @@ -112,13 +114,12 @@ class BulkPushRuleEvaluator(object): @defer.inlineCallbacks def _get_sender_power_level(self, event, context): - pl_event_key = (EventTypes.PowerLevels, "", ) - pl_event_id = context.prev_state_ids.get(pl_event_key) + pl_event_id = context.prev_state_ids.get(POWER_KEY) if pl_event_id: # fastpath: if there's a power level event, that's all we need, and # not having a power level event is an extreme edge case pl_event = yield self.store.get_event(pl_event_id) - auth_events = { pl_event_key: pl_event } + auth_events = { POWER_KEY: pl_event } else: auth_events_ids = yield self.auth.compute_auth_events( event, context.prev_state_ids, for_verification=False, From 0f1eb3e914a1e47e441bd8bfb7d523882646fb6e Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 10 Oct 2017 15:23:00 +0100 Subject: [PATCH 0239/1637] Use notification levels in power_levels Rather than making the condition directly require a specific power level. This way the level require to notify a room can be configured per room. --- synapse/push/baserules.py | 4 ++-- synapse/push/bulk_push_rule_evaluator.py | 10 ++++++---- synapse/push/push_rule_evaluator.py | 20 ++++++++++++++------ 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 71f9ab6b25..9dce99ebec 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -249,8 +249,8 @@ BASE_APPEND_OVERRIDE_RULES = [ '_id': '_roomnotif_content', }, { - 'kind': 'sender_power_level', - 'is': '>=50', + 'kind': 'sender_notification_permission', + 'key': 'room', '_id': '_roomnotif_pl', }, ], diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index adc99bd4f6..db07a97a94 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -113,7 +113,7 @@ class BulkPushRuleEvaluator(object): ) @defer.inlineCallbacks - def _get_sender_power_level(self, event, context): + def _get_power_levels_and_sender_level(self, event, context): pl_event_id = context.prev_state_ids.get(POWER_KEY) if pl_event_id: # fastpath: if there's a power level event, that's all we need, and @@ -129,7 +129,9 @@ class BulkPushRuleEvaluator(object): (e.type, e.state_key): e for e in auth_events.itervalues() } - defer.returnValue(get_user_power_level(event.sender, auth_events)) + sender_level = get_user_power_level(event.sender, auth_events) + + defer.returnValue((auth_events[POWER_KEY].content, sender_level)) @defer.inlineCallbacks def action_for_event_by_user(self, event, context): @@ -146,10 +148,10 @@ class BulkPushRuleEvaluator(object): event, context ) - sender_power_level = yield self._get_sender_power_level(event, context) + (power_levels, sender_power_level) = yield self._get_power_levels_and_sender_level(event, context) evaluator = PushRuleEvaluatorForEvent( - event, len(room_members), sender_power_level + event, len(room_members), sender_power_level, power_levels, ) condition_cache = {} diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 7cf777f16f..5011bef4f1 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -33,8 +33,15 @@ def _room_member_count(ev, condition, room_member_count): return _test_ineq_condition(condition, room_member_count) -def _sender_power_level(ev, condition, power_level): - return _test_ineq_condition(condition, power_level) +def _sender_notification_permission(ev, condition, sender_power_level, power_levels): + notif_level_key = condition.get('key') + if notif_level_key is None: + return False + + notif_levels = power_levels.get('notifications', {}) + room_notif_level = notif_levels.get(notif_level_key, 50) + + return sender_power_level >= room_notif_level; def _test_ineq_condition(condition, number): @@ -74,10 +81,11 @@ def tweaks_for_actions(actions): class PushRuleEvaluatorForEvent(object): - def __init__(self, event, room_member_count, sender_power_level): + def __init__(self, event, room_member_count, sender_power_level, power_levels): self._event = event self._room_member_count = room_member_count self._sender_power_level = sender_power_level + self._power_levels = power_levels # Maps strings of e.g. 'content.body' -> event["content"]["body"] self._value_cache = _flatten_dict(event) @@ -91,9 +99,9 @@ class PushRuleEvaluatorForEvent(object): return _room_member_count( self._event, condition, self._room_member_count ) - elif condition['kind'] == 'sender_power_level': - return _sender_power_level( - self._event, condition, self._sender_power_level + elif condition['kind'] == 'sender_notification_permission': + return _sender_notification_permission( + self._event, condition, self._sender_power_level, self._power_levels, ) else: return True From ab1bc9bf5fcc8875c61e4ec7357d6e43abc76a55 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 10 Oct 2017 15:34:05 +0100 Subject: [PATCH 0240/1637] Don't KeyError if no power_levels event --- synapse/push/bulk_push_rule_evaluator.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index db07a97a94..05c1c5165f 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -131,7 +131,9 @@ class BulkPushRuleEvaluator(object): sender_level = get_user_power_level(event.sender, auth_events) - defer.returnValue((auth_events[POWER_KEY].content, sender_level)) + pl_event = auth_events.get(POWER_KEY) + + defer.returnValue((pl_event.content if pl_event else {}, sender_level)) @defer.inlineCallbacks def action_for_event_by_user(self, event, context): From 81a5e0073cbca1ed469fdae94150de3059e5c3e3 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 10 Oct 2017 15:53:34 +0100 Subject: [PATCH 0241/1637] pep8 --- synapse/push/bulk_push_rule_evaluator.py | 6 ++++-- synapse/push/push_rule_evaluator.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 05c1c5165f..425a017bdf 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -119,7 +119,7 @@ class BulkPushRuleEvaluator(object): # fastpath: if there's a power level event, that's all we need, and # not having a power level event is an extreme edge case pl_event = yield self.store.get_event(pl_event_id) - auth_events = { POWER_KEY: pl_event } + auth_events = {POWER_KEY: pl_event} else: auth_events_ids = yield self.auth.compute_auth_events( event, context.prev_state_ids, for_verification=False, @@ -150,7 +150,9 @@ class BulkPushRuleEvaluator(object): event, context ) - (power_levels, sender_power_level) = yield self._get_power_levels_and_sender_level(event, context) + (power_levels, sender_power_level) = ( + yield self._get_power_levels_and_sender_level(event, context) + ) evaluator = PushRuleEvaluatorForEvent( event, len(room_members), sender_power_level, power_levels, diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 5011bef4f1..3601f2d365 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -41,7 +41,7 @@ def _sender_notification_permission(ev, condition, sender_power_level, power_lev notif_levels = power_levels.get('notifications', {}) room_notif_level = notif_levels.get(notif_level_key, 50) - return sender_power_level >= room_notif_level; + return sender_power_level >= room_notif_level def _test_ineq_condition(condition, number): From ec954f47fb7a1aaa176a7fbf7ca8e683cf428af8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Oct 2017 13:15:44 +0100 Subject: [PATCH 0242/1637] Validate room ids --- synapse/groups/groups_server.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 991cc12cce..6a85908dd6 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError -from synapse.types import UserID, get_domain_from_id +from synapse.types import UserID, get_domain_from_id, RoomID import logging @@ -160,6 +160,8 @@ class GroupsServerHandler(object): """ yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) + RoomID.from_string(room_id) # Ensure valid room id + order = content.get("order", None) is_public = _parse_visibility_from_contents(content) @@ -463,6 +465,8 @@ class GroupsServerHandler(object): def add_room_to_group(self, group_id, requester_user_id, room_id, content): """Add room to group """ + RoomID.from_string(room_id) # Ensure valid room id + yield self.check_group_is_ours( group_id, and_exists=True, and_is_admin=requester_user_id ) From c2c47550f9b85fda1a24964f053d03e459bb8436 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Oct 2017 13:23:15 +0100 Subject: [PATCH 0243/1637] Fix schema delta versions --- synapse/storage/prepare_database.py | 2 +- synapse/storage/schema/delta/{43 => 45}/group_server.sql | 0 synapse/storage/schema/delta/{43 => 45}/profile_cache.sql | 0 3 files changed, 1 insertion(+), 1 deletion(-) rename synapse/storage/schema/delta/{43 => 45}/group_server.sql (100%) rename synapse/storage/schema/delta/{43 => 45}/profile_cache.sql (100%) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index a0af8456f5..ccaaabcfa0 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 44 +SCHEMA_VERSION = 45 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/schema/delta/43/group_server.sql b/synapse/storage/schema/delta/45/group_server.sql similarity index 100% rename from synapse/storage/schema/delta/43/group_server.sql rename to synapse/storage/schema/delta/45/group_server.sql diff --git a/synapse/storage/schema/delta/43/profile_cache.sql b/synapse/storage/schema/delta/45/profile_cache.sql similarity index 100% rename from synapse/storage/schema/delta/43/profile_cache.sql rename to synapse/storage/schema/delta/45/profile_cache.sql From 4ce43792350f0df432df25006c1bdd78c08647e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Oct 2017 14:11:43 +0100 Subject: [PATCH 0244/1637] Fix attestations to check correct server name --- synapse/handlers/groups_local.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index a2bacbfc38..50e40548c2 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -102,6 +102,8 @@ class GroupsLocalHandler(object): get_domain_from_id(group_id), group_id, requester_user_id, ) + group_server_name = get_domain_from_id(group_id) + # Loop through the users and validate the attestations. chunk = res["users_section"]["users"] valid_users = [] @@ -109,11 +111,13 @@ class GroupsLocalHandler(object): g_user_id = entry["user_id"] attestation = entry.pop("attestation") try: - yield self.attestations.verify_attestation( - attestation, - group_id=group_id, - user_id=g_user_id, - ) + if get_domain_from_id(g_user_id) != group_server_name: + yield self.attestations.verify_attestation( + attestation, + group_id=group_id, + user_id=g_user_id, + server_name=get_domain_from_id(g_user_id), + ) valid_users.append(entry) except Exception as e: logger.info("Failed to verify user is in group: %s", e) @@ -160,6 +164,7 @@ class GroupsLocalHandler(object): remote_attestation, group_id=group_id, user_id=user_id, + server_name=get_domain_from_id(group_id), ) is_publicised = content.get("publicise", False) @@ -187,6 +192,8 @@ class GroupsLocalHandler(object): ) defer.returnValue(res) + group_server_name = get_domain_from_id(group_id) + res = yield self.transport_client.get_users_in_group( get_domain_from_id(group_id), group_id, requester_user_id, ) @@ -197,11 +204,13 @@ class GroupsLocalHandler(object): g_user_id = entry["user_id"] attestation = entry.pop("attestation") try: - yield self.attestations.verify_attestation( - attestation, - group_id=group_id, - user_id=g_user_id, - ) + if get_domain_from_id(g_user_id) != group_server_name: + yield self.attestations.verify_attestation( + attestation, + group_id=group_id, + user_id=g_user_id, + server_name=get_domain_from_id(g_user_id), + ) valid_entries.append(entry) except Exception as e: logger.info("Failed to verify user is in group: %s", e) @@ -240,6 +249,7 @@ class GroupsLocalHandler(object): remote_attestation, group_id=group_id, user_id=user_id, + server_name=get_domain_from_id(group_id), ) # TODO: Check that the group is public and we're being added publically From 27e727a1469f336c3decc82288172923f2d46ddf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Oct 2017 14:32:40 +0100 Subject: [PATCH 0245/1637] Fix typo --- synapse/groups/groups_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 6a85908dd6..1083bc2990 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -400,7 +400,7 @@ class GroupsServerHandler(object): if not is_public: entry["is_public"] = False - if not self.is_mine_id(requester_user_id): + if not self.is_mine_id(g_user_id): attestation = yield self.store.get_remote_attestation(group_id, g_user_id) if not attestation: continue From b75d443caff63a736e914236ef7448c9502e6495 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 11 Oct 2017 14:25:16 +0100 Subject: [PATCH 0246/1637] log pdu_failures from incoming transactions ... even if we have no EDUs. This appears to have been introduced in 476899295f5fd6cff64799bcbc84cd4bf9005e33. --- synapse/federation/federation_server.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index fa4ec2ad3c..b4b587c867 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -168,8 +168,9 @@ class FederationServer(FederationBase): edu.content ) - for failure in getattr(transaction, "pdu_failures", []): - logger.info("Got failure %r", failure) + pdu_failures = getattr(transaction, "pdu_failures", []) + for failure in pdu_failures: + logger.info("Got failure %r", failure) response = { "pdus": pdu_results, From c3e190ce671ed37a31a1543f42e26277fea582ce Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 11 Oct 2017 14:37:20 +0100 Subject: [PATCH 0247/1637] fix a logcontext leak in read receipt handling --- synapse/handlers/receipts.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index e1cd3a48e9..0525765272 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from synapse.util import logcontext from ._base import BaseHandler @@ -59,6 +60,8 @@ class ReceiptsHandler(BaseHandler): is_new = yield self._handle_new_receipts([receipt]) if is_new: + # fire off a process in the background to send the receipt to + # remote servers self._push_remotes([receipt]) @defer.inlineCallbacks @@ -126,6 +129,7 @@ class ReceiptsHandler(BaseHandler): defer.returnValue(True) + @logcontext.preserve_fn # caller should not yield on this @defer.inlineCallbacks def _push_remotes(self, receipts): """Given a list of receipts, works out which remote servers should be From c3b7a45e84f0aaf45e671eef295993e3d09d6908 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 11 Oct 2017 14:39:22 +0100 Subject: [PATCH 0248/1637] Allow error strings from spam checker --- synapse/handlers/message.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index fbf88b46ef..06672df3bc 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd +# Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -325,9 +326,12 @@ class MessageHandler(BaseHandler): txn_id=txn_id ) - if self.spam_checker.check_event_for_spam(event): + spam_error = self.spam_checker.check_event_for_spam(event) + if spam_error: + if not isinstance(spam_error, (str, basestring)): + spam_error = "Spam is not permitted here" raise SynapseError( - 403, "Spam is not permitted here", Codes.FORBIDDEN + 403, spam_error, Codes.FORBIDDEN ) yield self.send_nonmember_event( From 271f5601f3eb28a16e4b0d58017b4845856ab19c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Oct 2017 14:44:51 +0100 Subject: [PATCH 0249/1637] Fix typo in invite to group --- synapse/federation/transport/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 36f6eb75e9..f96561c1fe 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -572,7 +572,7 @@ class TransportLayerClient(object): return self.client.post_json( destination=destination, path=path, - args=requester_user_id, + args={"requester_user_id": requester_user_id}, data=content, ignore_backoff=True, ) From b78bae2d51bac7e1f75365d85cd67402adb5f408 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 11 Oct 2017 14:49:09 +0100 Subject: [PATCH 0250/1637] fix isinstance --- synapse/handlers/message.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 06672df3bc..28792788d9 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -328,7 +328,7 @@ class MessageHandler(BaseHandler): spam_error = self.spam_checker.check_event_for_spam(event) if spam_error: - if not isinstance(spam_error, (str, basestring)): + if not isinstance(spam_error, basestring): spam_error = "Spam is not permitted here" raise SynapseError( 403, spam_error, Codes.FORBIDDEN From 4fad8efbfb1726c72bdd7cbbacc894b8701efec3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 11 Oct 2017 15:05:05 +0100 Subject: [PATCH 0251/1637] Fix stackoverflow and logcontexts from linearizer 1. make it not blow out the stack when there are more than 50 things waiting for a lock. Fixes https://github.com/matrix-org/synapse/issues/2505. 2. Make it not mess up the log contexts. --- synapse/util/async.py | 24 ++++++++++++++++++++++-- tests/util/test_linearizer.py | 28 ++++++++++++++++++++++++---- 2 files changed, 46 insertions(+), 6 deletions(-) diff --git a/synapse/util/async.py b/synapse/util/async.py index bb252f75d7..0fd5b42523 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -203,7 +203,26 @@ class Linearizer(object): except: logger.exception("Unexpected exception in Linearizer") - logger.info("Acquired linearizer lock %r for key %r", self.name, key) + logger.info("Acquired linearizer lock %r for key %r", self.name, + key) + + # if the code holding the lock completes synchronously, then it + # will recursively run the next claimant on the list. That can + # relatively rapidly lead to stack exhaustion. This is essentially + # the same problem as http://twistedmatrix.com/trac/ticket/9304. + # + # In order to break the cycle, we add a cheeky sleep(0) here to + # ensure that we fall back to the reactor between each iteration. + # + # (There's no particular need for it to happen before we return + # the context manager, but it needs to happen while we hold the + # lock, and the context manager's exit code must be synchronous, + # so actually this is the only sensible place. + yield run_on_reactor() + + else: + logger.info("Acquired uncontended linearizer lock %r for key %r", + self.name, key) @contextmanager def _ctx_manager(): @@ -211,7 +230,8 @@ class Linearizer(object): yield finally: logger.info("Releasing linearizer lock %r for key %r", self.name, key) - new_defer.callback(None) + with PreserveLoggingContext(): + new_defer.callback(None) current_d = self.key_to_defer.get(key) if current_d is new_defer: self.key_to_defer.pop(key, None) diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index afcba482f9..793a88e462 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -12,8 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - +from synapse.util import async, logcontext from tests import unittest from twisted.internet import defer @@ -38,7 +37,28 @@ class LinearizerTestCase(unittest.TestCase): with cm1: self.assertFalse(d2.called) - self.assertTrue(d2.called) - with (yield d2): pass + + def test_lots_of_queued_things(self): + # we have one slow thing, and lots of fast things queued up behind it. + # it should *not* explode the stack. + linearizer = Linearizer() + + @defer.inlineCallbacks + def func(i, sleep=False): + with logcontext.LoggingContext("func(%s)" % i) as lc: + with (yield linearizer.queue("")): + self.assertEqual( + logcontext.LoggingContext.current_context(), lc) + if sleep: + yield async.sleep(0) + + self.assertEqual( + logcontext.LoggingContext.current_context(), lc) + + func(0, sleep=True) + for i in xrange(1, 100): + func(i) + + return func(1000) From ea18996f54194f920dc506201a65eb3d36bb161d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Oct 2017 15:44:37 +0100 Subject: [PATCH 0252/1637] Fix group stream replication The stream update functions expect the storage function to return a list of tuples. --- synapse/storage/group_server.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 4fe9172adc..22a6bc6261 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -1172,13 +1172,13 @@ class GroupServerStore(SQLBaseStore): LIMIT ? """ txn.execute(sql, (from_token, to_token, limit,)) - return [{ - "stream_id": stream_id, - "group_id": group_id, - "user_id": user_id, - "type": gtype, - "content": json.loads(content_json), - } for stream_id, group_id, user_id, gtype, content_json in txn] + return [( + stream_id, + group_id, + user_id, + gtype, + json.loads(content_json), + ) for stream_id, group_id, user_id, gtype, content_json in txn] return self.runInteraction( "get_all_groups_changes", _get_all_groups_changes_txn, ) From 818b08d0e4be7571008af4590542fd652f028dcd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Oct 2017 15:54:00 +0100 Subject: [PATCH 0253/1637] peeeeeeeeep8888888888888888888888888888 --- synapse/storage/group_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 22a6bc6261..3af372de59 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -1178,7 +1178,7 @@ class GroupServerStore(SQLBaseStore): user_id, gtype, json.loads(content_json), - ) for stream_id, group_id, user_id, gtype, content_json in txn] + ) for stream_id, group_id, user_id, gtype, content_json in txn] return self.runInteraction( "get_all_groups_changes", _get_all_groups_changes_txn, ) From b752507b488d223a0ab01ec3dbc7b30fee64c203 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 11 Oct 2017 16:54:36 +0100 Subject: [PATCH 0254/1637] Fix fetching remote summaries --- synapse/handlers/groups_local.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 50e40548c2..3b676d46bd 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -109,7 +109,7 @@ class GroupsLocalHandler(object): valid_users = [] for entry in chunk: g_user_id = entry["user_id"] - attestation = entry.pop("attestation") + attestation = entry.pop("attestation", {}) try: if get_domain_from_id(g_user_id) != group_server_name: yield self.attestations.verify_attestation( @@ -202,7 +202,7 @@ class GroupsLocalHandler(object): valid_entries = [] for entry in chunk: g_user_id = entry["user_id"] - attestation = entry.pop("attestation") + attestation = entry.pop("attestation", {}) try: if get_domain_from_id(g_user_id) != group_server_name: yield self.attestations.verify_attestation( From f30c4ed2bc2255dc7182bd026fb6437afec735a5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 11 Oct 2017 17:26:17 +0100 Subject: [PATCH 0255/1637] logformatter: fix AttributeError make sure we have the relevant fields before we try to log them. --- synapse/util/logformatter.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/synapse/util/logformatter.py b/synapse/util/logformatter.py index 60504162e9..cdbc4bffd7 100644 --- a/synapse/util/logformatter.py +++ b/synapse/util/logformatter.py @@ -33,9 +33,17 @@ class LogFormatter(logging.Formatter): def formatException(self, ei): sio = StringIO.StringIO() - sio.write("Capture point (most recent call last):\n") - traceback.print_stack(ei[2].tb_frame.f_back, None, sio) - traceback.print_exception(ei[0], ei[1], ei[2], None, sio) + (typ, val, tb) = ei + + # log the stack above the exception capture point if possible, but + # check that we actually have an f_back attribute to work around + # https://twistedmatrix.com/trac/ticket/9305 + + if tb and hasattr(tb.tb_frame, 'f_back'): + sio.write("Capture point (most recent call last):\n") + traceback.print_stack(tb.tb_frame.f_back, None, sio) + + traceback.print_exception(typ, val, tb, None, sio) s = sio.getvalue() sio.close() if s[-1:] == "\n": From f807f7f80442aec48a5e1f6b6b6f1a88e707b1e2 Mon Sep 17 00:00:00 2001 From: hera Date: Thu, 12 Oct 2017 10:50:44 +0000 Subject: [PATCH 0256/1637] log when we get an exception handling replication updates --- synapse/replication/tcp/resource.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 3ea3ca5a6f..6c1beca4e3 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -160,7 +160,11 @@ class ReplicationStreamer(object): "Getting stream: %s: %s -> %s", stream.NAME, stream.last_token, stream.upto_token ) - updates, current_token = yield stream.get_updates() + try: + updates, current_token = yield stream.get_updates() + except: + logger.info("Failed to handle stream %s", stream.NAME) + raise logger.debug( "Sending %d updates to %d connections", From bf4fb1fb400daad23702bc0b3231ec069d68e87e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 15:20:59 +0100 Subject: [PATCH 0257/1637] Basic implementation of backup media store --- synapse/config/repository.py | 18 ++ synapse/rest/media/v1/media_repository.py | 219 +++++++++++----------- synapse/rest/media/v1/thumbnailer.py | 16 +- synapse/rest/media/v1/upload_resource.py | 2 +- 4 files changed, 130 insertions(+), 125 deletions(-) diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 2c6f57168e..e3c83d56fa 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -70,7 +70,17 @@ class ContentRepositoryConfig(Config): self.max_upload_size = self.parse_size(config["max_upload_size"]) self.max_image_pixels = self.parse_size(config["max_image_pixels"]) self.max_spider_size = self.parse_size(config["max_spider_size"]) + self.media_store_path = self.ensure_directory(config["media_store_path"]) + + self.backup_media_store_path = config.get("backup_media_store_path") + if self.backup_media_store_path: + self.ensure_directory(self.backup_media_store_path) + + self.synchronous_backup_media_store = config.get( + "synchronous_backup_media_store", False + ) + self.uploads_path = self.ensure_directory(config["uploads_path"]) self.dynamic_thumbnails = config["dynamic_thumbnails"] self.thumbnail_requirements = parse_thumbnail_requirements( @@ -115,6 +125,14 @@ class ContentRepositoryConfig(Config): # Directory where uploaded images and attachments are stored. media_store_path: "%(media_store)s" + # A secondary directory where uploaded images and attachments are + # stored as a backup. + # backup_media_store_path: "%(media_store)s" + + # Whether to wait for successful write to backup media store before + # returning successfully. + # synchronous_backup_media_store: false + # Directory where in-progress uploads are stored. uploads_path: "%(uploads_path)s" diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 0ea1248ce6..3b442cc16b 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -33,7 +33,7 @@ from synapse.api.errors import SynapseError, HttpResponseException, \ from synapse.util.async import Linearizer from synapse.util.stringutils import is_ascii -from synapse.util.logcontext import preserve_context_over_fn +from synapse.util.logcontext import preserve_context_over_fn, preserve_fn from synapse.util.retryutils import NotRetryingDestination import os @@ -59,7 +59,12 @@ class MediaRepository(object): self.store = hs.get_datastore() self.max_upload_size = hs.config.max_upload_size self.max_image_pixels = hs.config.max_image_pixels + self.filepaths = MediaFilePaths(hs.config.media_store_path) + self.backup_filepaths = None + if hs.config.backup_media_store_path: + self.backup_filepaths = MediaFilePaths(hs.config.backup_media_store_path) + self.dynamic_thumbnails = hs.config.dynamic_thumbnails self.thumbnail_requirements = hs.config.thumbnail_requirements @@ -87,18 +92,43 @@ class MediaRepository(object): if not os.path.exists(dirname): os.makedirs(dirname) + @defer.inlineCallbacks + def _write_to_file(self, source, file_name_func): + def write_file_thread(file_name): + source.seek(0) # Ensure we read from the start of the file + with open(file_name, "wb") as f: + shutil.copyfileobj(source, f) + + fname = file_name_func(self.filepaths) + self._makedirs(fname) + + # Write to the main repository + yield preserve_context_over_fn(threads.deferToThread, write_file_thread, fname) + + # Write to backup repository + if self.backup_filepaths: + backup_fname = file_name_func(backup_filepaths) + self._makedirs(backup_fname) + + # We can either wait for successful writing to the backup repository + # or write in the background and immediately return + if hs.config.synchronous_backup_media_store: + yield preserve_context_over_fn( + threads.deferToThread, write_file_thread, backup_fname, + ) + else: + preserve_fn(threads.deferToThread)(write_file, backup_fname) + + defer.returnValue(fname) + @defer.inlineCallbacks def create_content(self, media_type, upload_name, content, content_length, auth_user): media_id = random_string(24) - fname = self.filepaths.local_media_filepath(media_id) - self._makedirs(fname) - - # This shouldn't block for very long because the content will have - # already been uploaded at this point. - with open(fname, "wb") as f: - f.write(content) + fname = yield self._write_to_file( + content, lambda f: f.local_media_filepath(media_id) + ) logger.info("Stored local media in file %r", fname) @@ -253,9 +283,8 @@ class MediaRepository(object): def _get_thumbnail_requirements(self, media_type): return self.thumbnail_requirements.get(media_type, ()) - def _generate_thumbnail(self, input_path, t_path, t_width, t_height, + def _generate_thumbnail(self, thumbnailer, t_width, t_height, t_method, t_type): - thumbnailer = Thumbnailer(input_path) m_width = thumbnailer.width m_height = thumbnailer.height @@ -267,36 +296,40 @@ class MediaRepository(object): return if t_method == "crop": - t_len = thumbnailer.crop(t_path, t_width, t_height, t_type) + t_byte_source = thumbnailer.crop(t_width, t_height, t_type) elif t_method == "scale": t_width, t_height = thumbnailer.aspect(t_width, t_height) t_width = min(m_width, t_width) t_height = min(m_height, t_height) - t_len = thumbnailer.scale(t_path, t_width, t_height, t_type) + t_byte_source = thumbnailer.scale(t_width, t_height, t_type) else: - t_len = None + t_byte_source = None - return t_len + return t_byte_source @defer.inlineCallbacks def generate_local_exact_thumbnail(self, media_id, t_width, t_height, t_method, t_type): input_path = self.filepaths.local_media_filepath(media_id) - t_path = self.filepaths.local_media_thumbnail( - media_id, t_width, t_height, t_type, t_method - ) - self._makedirs(t_path) - - t_len = yield preserve_context_over_fn( + thumbnailer = Thumbnailer(input_path) + t_byte_source = yield preserve_context_over_fn( threads.deferToThread, self._generate_thumbnail, - input_path, t_path, t_width, t_height, t_method, t_type + thumbnailer, t_width, t_height, t_method, t_type ) - if t_len: + if t_byte_source: + output_path = yield self._write_to_file( + content, + lambda f: f.local_media_thumbnail( + media_id, t_width, t_height, t_type, t_method + ) + ) + logger.info("Stored thumbnail in file %r", output_path) + yield self.store.store_local_thumbnail( - media_id, t_width, t_height, t_type, t_method, t_len + media_id, t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) ) defer.returnValue(t_path) @@ -306,21 +339,25 @@ class MediaRepository(object): t_width, t_height, t_method, t_type): input_path = self.filepaths.remote_media_filepath(server_name, file_id) - t_path = self.filepaths.remote_media_thumbnail( - server_name, file_id, t_width, t_height, t_type, t_method - ) - self._makedirs(t_path) - - t_len = yield preserve_context_over_fn( + thumbnailer = Thumbnailer(input_path) + t_byte_source = yield preserve_context_over_fn( threads.deferToThread, self._generate_thumbnail, - input_path, t_path, t_width, t_height, t_method, t_type + thumbnailer, t_width, t_height, t_method, t_type ) - if t_len: + if t_byte_source: + output_path = yield self._write_to_file( + content, + lambda f: f.remote_media_thumbnail( + server_name, file_id, t_width, t_height, t_type, t_method + ) + ) + logger.info("Stored thumbnail in file %r", output_path) + yield self.store.store_remote_media_thumbnail( server_name, media_id, file_id, - t_width, t_height, t_type, t_method, t_len + t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) ) defer.returnValue(t_path) @@ -351,59 +388,32 @@ class MediaRepository(object): local_thumbnails = [] def generate_thumbnails(): - scales = set() - crops = set() for r_width, r_height, r_method, r_type in requirements: - if r_method == "scale": - t_width, t_height = thumbnailer.aspect(r_width, r_height) - scales.add(( - min(m_width, t_width), min(m_height, t_height), r_type, - )) - elif r_method == "crop": - crops.add((r_width, r_height, r_type)) - - for t_width, t_height, t_type in scales: - t_method = "scale" - if url_cache: - t_path = self.filepaths.url_cache_thumbnail( - media_id, t_width, t_height, t_type, t_method - ) - else: - t_path = self.filepaths.local_media_thumbnail( - media_id, t_width, t_height, t_type, t_method - ) - self._makedirs(t_path) - t_len = thumbnailer.scale(t_path, t_width, t_height, t_type) + t_byte_source = self._generate_thumbnail( + thumbnailer, r_width, r_height, r_method, r_type, + ) local_thumbnails.append(( - media_id, t_width, t_height, t_type, t_method, t_len - )) - - for t_width, t_height, t_type in crops: - if (t_width, t_height, t_type) in scales: - # If the aspect ratio of the cropped thumbnail matches a purely - # scaled one then there is no point in calculating a separate - # thumbnail. - continue - t_method = "crop" - if url_cache: - t_path = self.filepaths.url_cache_thumbnail( - media_id, t_width, t_height, t_type, t_method - ) - else: - t_path = self.filepaths.local_media_thumbnail( - media_id, t_width, t_height, t_type, t_method - ) - self._makedirs(t_path) - t_len = thumbnailer.crop(t_path, t_width, t_height, t_type) - local_thumbnails.append(( - media_id, t_width, t_height, t_type, t_method, t_len + r_width, r_height, r_method, r_type, t_byte_source )) yield preserve_context_over_fn(threads.deferToThread, generate_thumbnails) - for l in local_thumbnails: - yield self.store.store_local_thumbnail(*l) + for t_width, t_height, t_method, t_type, t_byte_source in local_thumbnails: + if url_cache: + path_name_func = lambda f: f.url_cache_thumbnail( + media_id, t_width, t_height, t_type, t_method + ) + else: + path_name_func = lambda f: f.local_media_thumbnail( + media_id, t_width, t_height, t_type, t_method + ) + + yield self._write_to_file(t_byte_source, path_name_func) + + yield self.store.store_local_thumbnail( + media_id, t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) + ) defer.returnValue({ "width": m_width, @@ -433,51 +443,32 @@ class MediaRepository(object): ) return - scales = set() - crops = set() for r_width, r_height, r_method, r_type in requirements: - if r_method == "scale": - t_width, t_height = thumbnailer.aspect(r_width, r_height) - scales.add(( - min(m_width, t_width), min(m_height, t_height), r_type, - )) - elif r_method == "crop": - crops.add((r_width, r_height, r_type)) - - for t_width, t_height, t_type in scales: - t_method = "scale" - t_path = self.filepaths.remote_media_thumbnail( - server_name, file_id, t_width, t_height, t_type, t_method + t_byte_source = self._generate_thumbnail( + thumbnailer, r_width, r_height, r_method, r_type, ) - self._makedirs(t_path) - t_len = thumbnailer.scale(t_path, t_width, t_height, t_type) - remote_thumbnails.append([ - server_name, media_id, file_id, - t_width, t_height, t_type, t_method, t_len - ]) - for t_width, t_height, t_type in crops: - if (t_width, t_height, t_type) in scales: - # If the aspect ratio of the cropped thumbnail matches a purely - # scaled one then there is no point in calculating a separate - # thumbnail. - continue - t_method = "crop" - t_path = self.filepaths.remote_media_thumbnail( - server_name, file_id, t_width, t_height, t_type, t_method - ) - self._makedirs(t_path) - t_len = thumbnailer.crop(t_path, t_width, t_height, t_type) - remote_thumbnails.append([ - server_name, media_id, file_id, - t_width, t_height, t_type, t_method, t_len - ]) + remote_thumbnails.append(( + r_width, r_height, r_method, r_type, t_byte_source + )) yield preserve_context_over_fn(threads.deferToThread, generate_thumbnails) for r in remote_thumbnails: yield self.store.store_remote_media_thumbnail(*r) + for t_width, t_height, t_method, t_type, t_byte_source in local_thumbnails: + path_name_func = lambda f: f.remote_media_thumbnail( + server_name, media_id, file_id, t_width, t_height, t_type, t_method + ) + + yield self._write_to_file(t_byte_source, path_name_func) + + yield self.store.store_remote_media_thumbnail( + server_name, media_id, file_id, + t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) + ) + defer.returnValue({ "width": m_width, "height": m_height, diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 3868d4f65f..60498b08aa 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -50,12 +50,12 @@ class Thumbnailer(object): else: return ((max_height * self.width) // self.height, max_height) - def scale(self, output_path, width, height, output_type): + def scale(self, width, height, output_type): """Rescales the image to the given dimensions""" scaled = self.image.resize((width, height), Image.ANTIALIAS) - return self.save_image(scaled, output_type, output_path) + return self._encode_image(scaled, output_type) - def crop(self, output_path, width, height, output_type): + def crop(self, width, height, output_type): """Rescales and crops the image to the given dimensions preserving aspect:: (w_in / h_in) = (w_scaled / h_scaled) @@ -82,13 +82,9 @@ class Thumbnailer(object): crop_left = (scaled_width - width) // 2 crop_right = width + crop_left cropped = scaled_image.crop((crop_left, 0, crop_right, height)) - return self.save_image(cropped, output_type, output_path) + return self._encode_image(cropped, output_type) - def save_image(self, output_image, output_type, output_path): + def _encode_image(self, output_image, output_type): output_bytes_io = BytesIO() output_image.save(output_bytes_io, self.FORMATS[output_type], quality=80) - output_bytes = output_bytes_io.getvalue() - with open(output_path, "wb") as output_file: - output_file.write(output_bytes) - logger.info("Stored thumbnail in file %r", output_path) - return len(output_bytes) + return output_bytes_io diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 4ab33f73bf..f6f498cdc5 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -93,7 +93,7 @@ class UploadResource(Resource): # TODO(markjh): parse content-dispostion content_uri = yield self.media_repo.create_content( - media_type, upload_name, request.content.read(), + media_type, upload_name, request.content, content_length, requester.user ) From 67cb89fbdf62dfb2ff65f6f7f0ca23445cdac0ac Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 15:23:41 +0100 Subject: [PATCH 0258/1637] Fix typo --- synapse/rest/media/v1/media_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 3b442cc16b..f26f793bed 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -107,7 +107,7 @@ class MediaRepository(object): # Write to backup repository if self.backup_filepaths: - backup_fname = file_name_func(backup_filepaths) + backup_fname = file_name_func(self.backup_filepaths) self._makedirs(backup_fname) # We can either wait for successful writing to the backup repository From c8eeef6947af762c3eabef6ecca0f69833fbf8ab Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 15:28:24 +0100 Subject: [PATCH 0259/1637] Fix typos --- synapse/rest/media/v1/media_repository.py | 46 +++++++++++++---------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index f26f793bed..a16034fd67 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -65,6 +65,8 @@ class MediaRepository(object): if hs.config.backup_media_store_path: self.backup_filepaths = MediaFilePaths(hs.config.backup_media_store_path) + self.synchronous_backup_media_store = hs.config.synchronous_backup_media_store + self.dynamic_thumbnails = hs.config.dynamic_thumbnails self.thumbnail_requirements = hs.config.thumbnail_requirements @@ -112,12 +114,12 @@ class MediaRepository(object): # We can either wait for successful writing to the backup repository # or write in the background and immediately return - if hs.config.synchronous_backup_media_store: + if self.synchronous_backup_media_store: yield preserve_context_over_fn( threads.deferToThread, write_file_thread, backup_fname, ) else: - preserve_fn(threads.deferToThread)(write_file, backup_fname) + preserve_fn(threads.deferToThread)(write_file_thread, backup_fname) defer.returnValue(fname) @@ -321,7 +323,7 @@ class MediaRepository(object): if t_byte_source: output_path = yield self._write_to_file( - content, + t_byte_source, lambda f: f.local_media_thumbnail( media_id, t_width, t_height, t_type, t_method ) @@ -329,10 +331,11 @@ class MediaRepository(object): logger.info("Stored thumbnail in file %r", output_path) yield self.store.store_local_thumbnail( - media_id, t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) + media_id, t_width, t_height, t_type, t_method, + len(t_byte_source.getvalue()) ) - defer.returnValue(t_path) + defer.returnValue(output_path) @defer.inlineCallbacks def generate_remote_exact_thumbnail(self, server_name, file_id, media_id, @@ -348,7 +351,7 @@ class MediaRepository(object): if t_byte_source: output_path = yield self._write_to_file( - content, + t_byte_source, lambda f: f.remote_media_thumbnail( server_name, file_id, t_width, t_height, t_type, t_method ) @@ -360,7 +363,7 @@ class MediaRepository(object): t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) ) - defer.returnValue(t_path) + defer.returnValue(output_path) @defer.inlineCallbacks def _generate_local_thumbnails(self, media_id, media_info, url_cache=False): @@ -400,19 +403,21 @@ class MediaRepository(object): yield preserve_context_over_fn(threads.deferToThread, generate_thumbnails) for t_width, t_height, t_method, t_type, t_byte_source in local_thumbnails: - if url_cache: - path_name_func = lambda f: f.url_cache_thumbnail( - media_id, t_width, t_height, t_type, t_method - ) - else: - path_name_func = lambda f: f.local_media_thumbnail( - media_id, t_width, t_height, t_type, t_method - ) + def path_name_func(f): + if url_cache: + return f.url_cache_thumbnail( + media_id, t_width, t_height, t_type, t_method + ) + else: + return f.local_media_thumbnail( + media_id, t_width, t_height, t_type, t_method + ) yield self._write_to_file(t_byte_source, path_name_func) yield self.store.store_local_thumbnail( - media_id, t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) + media_id, t_width, t_height, t_type, t_method, + len(t_byte_source.getvalue()) ) defer.returnValue({ @@ -457,10 +462,11 @@ class MediaRepository(object): for r in remote_thumbnails: yield self.store.store_remote_media_thumbnail(*r) - for t_width, t_height, t_method, t_type, t_byte_source in local_thumbnails: - path_name_func = lambda f: f.remote_media_thumbnail( - server_name, media_id, file_id, t_width, t_height, t_type, t_method - ) + for t_width, t_height, t_method, t_type, t_byte_source in remote_thumbnails: + def path_name_func(f): + return f.remote_media_thumbnail( + server_name, media_id, file_id, t_width, t_height, t_type, t_method + ) yield self._write_to_file(t_byte_source, path_name_func) From 6dfde6d4856695890271232f8a2e4c5f32615dd1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 15:30:26 +0100 Subject: [PATCH 0260/1637] Remove dead code --- synapse/rest/media/v1/media_repository.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index a16034fd67..1eeb128d2a 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -459,9 +459,6 @@ class MediaRepository(object): yield preserve_context_over_fn(threads.deferToThread, generate_thumbnails) - for r in remote_thumbnails: - yield self.store.store_remote_media_thumbnail(*r) - for t_width, t_height, t_method, t_type, t_byte_source in remote_thumbnails: def path_name_func(f): return f.remote_media_thumbnail( From b77a13812c38b2e79b2ebfddb52ce88a2ac8e9b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 15:32:32 +0100 Subject: [PATCH 0261/1637] Typo --- synapse/rest/media/v1/media_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 1eeb128d2a..93b35af9cf 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -462,7 +462,7 @@ class MediaRepository(object): for t_width, t_height, t_method, t_type, t_byte_source in remote_thumbnails: def path_name_func(f): return f.remote_media_thumbnail( - server_name, media_id, file_id, t_width, t_height, t_type, t_method + server_name, file_id, t_width, t_height, t_type, t_method ) yield self._write_to_file(t_byte_source, path_name_func) From e283b555b1f20de4fd393fd947e82eb3c635b7e9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 17:31:24 +0100 Subject: [PATCH 0262/1637] Copy everything to backup --- synapse/config/repository.py | 4 +- synapse/rest/media/v1/filepath.py | 99 ++++++++++------ synapse/rest/media/v1/media_repository.py | 109 +++++++++++------- synapse/rest/media/v1/preview_url_resource.py | 7 +- synapse/rest/media/v1/thumbnailer.py | 9 +- 5 files changed, 151 insertions(+), 77 deletions(-) diff --git a/synapse/config/repository.py b/synapse/config/repository.py index e3c83d56fa..6baa474931 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -75,7 +75,9 @@ class ContentRepositoryConfig(Config): self.backup_media_store_path = config.get("backup_media_store_path") if self.backup_media_store_path: - self.ensure_directory(self.backup_media_store_path) + self.backup_media_store_path = self.ensure_directory( + self.backup_media_store_path + ) self.synchronous_backup_media_store = config.get( "synchronous_backup_media_store", False diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index d5cec10127..43d0eea00d 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -15,103 +15,134 @@ import os import re +import functools NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d") +def _wrap_in_base_path(func): + """Takes a function that returns a relative path and turns it into an + absolute path based on the location of the primary media store + """ + @functools.wraps(func) + def _wrapped(self, *args, **kwargs): + path = func(self, *args, **kwargs) + return os.path.join(self.primary_base_path, path) + + return _wrapped + + class MediaFilePaths(object): + """Describes where files are stored on disk. - def __init__(self, base_path): - self.base_path = base_path + Most of the function have a `*_rel` variant which returns a file path that + is relative to the base media store path. This is mainly used when we want + to write to the backup media store (when one is configured) + """ - def default_thumbnail(self, default_top_level, default_sub_type, width, - height, content_type, method): + def __init__(self, primary_base_path): + self.primary_base_path = primary_base_path + + def default_thumbnail_rel(self, default_top_level, default_sub_type, width, + height, content_type, method): top_level_type, sub_type = content_type.split("/") file_name = "%i-%i-%s-%s-%s" % ( width, height, top_level_type, sub_type, method ) return os.path.join( - self.base_path, "default_thumbnails", default_top_level, + "default_thumbnails", default_top_level, default_sub_type, file_name ) - def local_media_filepath(self, media_id): + default_thumbnail = _wrap_in_base_path(default_thumbnail_rel) + + def local_media_filepath_rel(self, media_id): return os.path.join( - self.base_path, "local_content", + "local_content", media_id[0:2], media_id[2:4], media_id[4:] ) - def local_media_thumbnail(self, media_id, width, height, content_type, - method): + local_media_filepath = _wrap_in_base_path(local_media_filepath_rel) + + def local_media_thumbnail_rel(self, media_id, width, height, content_type, + method): top_level_type, sub_type = content_type.split("/") file_name = "%i-%i-%s-%s-%s" % ( width, height, top_level_type, sub_type, method ) return os.path.join( - self.base_path, "local_thumbnails", + "local_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], file_name ) - def remote_media_filepath(self, server_name, file_id): + local_media_thumbnail = _wrap_in_base_path(local_media_thumbnail_rel) + + def remote_media_filepath_rel(self, server_name, file_id): return os.path.join( - self.base_path, "remote_content", server_name, + "remote_content", server_name, file_id[0:2], file_id[2:4], file_id[4:] ) - def remote_media_thumbnail(self, server_name, file_id, width, height, - content_type, method): + remote_media_filepath = _wrap_in_base_path(remote_media_filepath_rel) + + def remote_media_thumbnail_rel(self, server_name, file_id, width, height, + content_type, method): top_level_type, sub_type = content_type.split("/") file_name = "%i-%i-%s-%s" % (width, height, top_level_type, sub_type) return os.path.join( - self.base_path, "remote_thumbnail", server_name, + "remote_thumbnail", server_name, file_id[0:2], file_id[2:4], file_id[4:], file_name ) + remote_media_thumbnail = _wrap_in_base_path(remote_media_thumbnail_rel) + def remote_media_thumbnail_dir(self, server_name, file_id): return os.path.join( - self.base_path, "remote_thumbnail", server_name, + "remote_thumbnail", server_name, file_id[0:2], file_id[2:4], file_id[4:], ) - def url_cache_filepath(self, media_id): + def url_cache_filepath_rel(self, media_id): if NEW_FORMAT_ID_RE.match(media_id): # Media id is of the form # E.g.: 2017-09-28-fsdRDt24DS234dsf return os.path.join( - self.base_path, "url_cache", + "url_cache", media_id[:10], media_id[11:] ) else: return os.path.join( - self.base_path, "url_cache", + "url_cache", media_id[0:2], media_id[2:4], media_id[4:], ) + url_cache_filepath = _wrap_in_base_path(url_cache_filepath_rel) + def url_cache_filepath_dirs_to_delete(self, media_id): "The dirs to try and remove if we delete the media_id file" if NEW_FORMAT_ID_RE.match(media_id): return [ os.path.join( - self.base_path, "url_cache", + "url_cache", media_id[:10], ), ] else: return [ os.path.join( - self.base_path, "url_cache", + "url_cache", media_id[0:2], media_id[2:4], ), os.path.join( - self.base_path, "url_cache", + "url_cache", media_id[0:2], ), ] - def url_cache_thumbnail(self, media_id, width, height, content_type, - method): + def url_cache_thumbnail_rel(self, media_id, width, height, content_type, + method): # Media id is of the form # E.g.: 2017-09-28-fsdRDt24DS234dsf @@ -122,29 +153,31 @@ class MediaFilePaths(object): if NEW_FORMAT_ID_RE.match(media_id): return os.path.join( - self.base_path, "url_cache_thumbnails", + "url_cache_thumbnails", media_id[:10], media_id[11:], file_name ) else: return os.path.join( - self.base_path, "url_cache_thumbnails", + "url_cache_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], file_name ) + url_cache_thumbnail = _wrap_in_base_path(url_cache_thumbnail_rel) + def url_cache_thumbnail_directory(self, media_id): # Media id is of the form # E.g.: 2017-09-28-fsdRDt24DS234dsf if NEW_FORMAT_ID_RE.match(media_id): return os.path.join( - self.base_path, "url_cache_thumbnails", + "url_cache_thumbnails", media_id[:10], media_id[11:], ) else: return os.path.join( - self.base_path, "url_cache_thumbnails", + "url_cache_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], ) @@ -155,26 +188,26 @@ class MediaFilePaths(object): if NEW_FORMAT_ID_RE.match(media_id): return [ os.path.join( - self.base_path, "url_cache_thumbnails", + "url_cache_thumbnails", media_id[:10], media_id[11:], ), os.path.join( - self.base_path, "url_cache_thumbnails", + "url_cache_thumbnails", media_id[:10], ), ] else: return [ os.path.join( - self.base_path, "url_cache_thumbnails", + "url_cache_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], ), os.path.join( - self.base_path, "url_cache_thumbnails", + "url_cache_thumbnails", media_id[0:2], media_id[2:4], ), os.path.join( - self.base_path, "url_cache_thumbnails", + "url_cache_thumbnails", media_id[0:2], ), ] diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 93b35af9cf..398e973ca9 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -60,10 +60,12 @@ class MediaRepository(object): self.max_upload_size = hs.config.max_upload_size self.max_image_pixels = hs.config.max_image_pixels - self.filepaths = MediaFilePaths(hs.config.media_store_path) - self.backup_filepaths = None + self.primary_base_path = hs.config.media_store_path + self.filepaths = MediaFilePaths(self.primary_base_path) + + self.backup_base_path = None if hs.config.backup_media_store_path: - self.backup_filepaths = MediaFilePaths(hs.config.backup_media_store_path) + self.backup_base_path = hs.config.backup_media_store_path self.synchronous_backup_media_store = hs.config.synchronous_backup_media_store @@ -94,42 +96,63 @@ class MediaRepository(object): if not os.path.exists(dirname): os.makedirs(dirname) - @defer.inlineCallbacks - def _write_to_file(self, source, file_name_func): - def write_file_thread(file_name): - source.seek(0) # Ensure we read from the start of the file - with open(file_name, "wb") as f: - shutil.copyfileobj(source, f) + @staticmethod + def write_file_synchronously(source, fname): + source.seek(0) # Ensure we read from the start of the file + with open(fname, "wb") as f: + shutil.copyfileobj(source, f) - fname = file_name_func(self.filepaths) + @defer.inlineCallbacks + def write_to_file(self, source, path): + """Write `source` to the on disk media store, and also the backup store + if configured. + + Args: + source: A file like object that should be written + path: Relative path to write file to + + Returns: + string: the file path written to in the primary media store + """ + fname = os.path.join(self.primary_base_path, path) self._makedirs(fname) # Write to the main repository - yield preserve_context_over_fn(threads.deferToThread, write_file_thread, fname) + yield preserve_context_over_fn( + threads.deferToThread, + self.write_file_synchronously, source, fname, + ) # Write to backup repository - if self.backup_filepaths: - backup_fname = file_name_func(self.backup_filepaths) + yield self.copy_to_backup(source, path) + + defer.returnValue(fname) + + @defer.inlineCallbacks + def copy_to_backup(self, source, path): + if self.backup_base_path: + backup_fname = os.path.join(self.backup_base_path, path) self._makedirs(backup_fname) # We can either wait for successful writing to the backup repository # or write in the background and immediately return if self.synchronous_backup_media_store: yield preserve_context_over_fn( - threads.deferToThread, write_file_thread, backup_fname, + threads.deferToThread, + self.write_file_synchronously, source, backup_fname, ) else: - preserve_fn(threads.deferToThread)(write_file_thread, backup_fname) - - defer.returnValue(fname) + preserve_fn(threads.deferToThread)( + self.write_file_synchronously, source, backup_fname, + ) @defer.inlineCallbacks def create_content(self, media_type, upload_name, content, content_length, auth_user): media_id = random_string(24) - fname = yield self._write_to_file( - content, lambda f: f.local_media_filepath(media_id) + fname = yield self.write_to_file( + content, self.filepaths.local_media_filepath_rel(media_id) ) logger.info("Stored local media in file %r", fname) @@ -180,9 +203,10 @@ class MediaRepository(object): def _download_remote_file(self, server_name, media_id): file_id = random_string(24) - fname = self.filepaths.remote_media_filepath( + fpath = self.filepaths.remote_media_filepath_rel( server_name, file_id ) + fname = os.path.join(self.primary_base_path, fpath) self._makedirs(fname) try: @@ -224,6 +248,9 @@ class MediaRepository(object): server_name, media_id) raise SynapseError(502, "Failed to fetch remote media") + with open(fname) as f: + yield self.copy_to_backup(f, fpath) + media_type = headers["Content-Type"][0] time_now_ms = self.clock.time_msec() @@ -322,15 +349,15 @@ class MediaRepository(object): ) if t_byte_source: - output_path = yield self._write_to_file( + output_path = yield self.write_to_file( t_byte_source, - lambda f: f.local_media_thumbnail( + self.filepaths.local_media_thumbnail_rel( media_id, t_width, t_height, t_type, t_method ) ) logger.info("Stored thumbnail in file %r", output_path) - yield self.store.store_local_thumbnail( + yield self.store.store_local_thumbnail_rel( media_id, t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) ) @@ -350,15 +377,15 @@ class MediaRepository(object): ) if t_byte_source: - output_path = yield self._write_to_file( + output_path = yield self.write_to_file( t_byte_source, - lambda f: f.remote_media_thumbnail( + self.filepaths.remote_media_thumbnail_rel( server_name, file_id, t_width, t_height, t_type, t_method ) ) logger.info("Stored thumbnail in file %r", output_path) - yield self.store.store_remote_media_thumbnail( + yield self.store.store_remote_media_thumbnail_rel( server_name, media_id, file_id, t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) ) @@ -403,17 +430,16 @@ class MediaRepository(object): yield preserve_context_over_fn(threads.deferToThread, generate_thumbnails) for t_width, t_height, t_method, t_type, t_byte_source in local_thumbnails: - def path_name_func(f): - if url_cache: - return f.url_cache_thumbnail( - media_id, t_width, t_height, t_type, t_method - ) - else: - return f.local_media_thumbnail( - media_id, t_width, t_height, t_type, t_method - ) + if url_cache: + file_path = self.filepaths.url_cache_thumbnail_rel( + media_id, t_width, t_height, t_type, t_method + ) + else: + file_path = self.filepaths.local_media_thumbnail_rel( + media_id, t_width, t_height, t_type, t_method + ) - yield self._write_to_file(t_byte_source, path_name_func) + yield self.write_to_file(t_byte_source, file_path) yield self.store.store_local_thumbnail( media_id, t_width, t_height, t_type, t_method, @@ -460,12 +486,11 @@ class MediaRepository(object): yield preserve_context_over_fn(threads.deferToThread, generate_thumbnails) for t_width, t_height, t_method, t_type, t_byte_source in remote_thumbnails: - def path_name_func(f): - return f.remote_media_thumbnail( - server_name, file_id, t_width, t_height, t_type, t_method - ) + file_path = self.filepaths.remote_media_thumbnail_rel( + server_name, file_id, t_width, t_height, t_type, t_method + ) - yield self._write_to_file(t_byte_source, path_name_func) + yield self.write_to_file(t_byte_source, file_path) yield self.store.store_remote_media_thumbnail( server_name, media_id, file_id, @@ -491,6 +516,8 @@ class MediaRepository(object): logger.info("Deleting: %r", key) + # TODO: Should we delete from the backup store + with (yield self.remote_media_linearizer.queue(key)): full_path = self.filepaths.remote_media_filepath(origin, file_id) try: diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 895b480d5c..f82b8fbc51 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -59,6 +59,7 @@ class PreviewUrlResource(Resource): self.store = hs.get_datastore() self.client = SpiderHttpClient(hs) self.media_repo = media_repo + self.primary_base_path = media_repo.primary_base_path self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist @@ -262,7 +263,8 @@ class PreviewUrlResource(Resource): file_id = datetime.date.today().isoformat() + '_' + random_string(16) - fname = self.filepaths.url_cache_filepath(file_id) + fpath = self.filepaths.url_cache_filepath_rel(file_id) + fname = os.path.join(self.primary_base_path, fpath) self.media_repo._makedirs(fname) try: @@ -273,6 +275,9 @@ class PreviewUrlResource(Resource): ) # FIXME: pass through 404s and other error messages nicely + with open(fname) as f: + yield self.media_repo.copy_to_backup(f, fpath) + media_type = headers["Content-Type"][0] time_now_ms = self.clock.time_msec() diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 60498b08aa..e1ee535b9a 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -51,7 +51,11 @@ class Thumbnailer(object): return ((max_height * self.width) // self.height, max_height) def scale(self, width, height, output_type): - """Rescales the image to the given dimensions""" + """Rescales the image to the given dimensions. + + Returns: + BytesIO: the bytes of the encoded image ready to be written to disk + """ scaled = self.image.resize((width, height), Image.ANTIALIAS) return self._encode_image(scaled, output_type) @@ -65,6 +69,9 @@ class Thumbnailer(object): Args: max_width: The largest possible width. max_height: The larget possible height. + + Returns: + BytesIO: the bytes of the encoded image ready to be written to disk """ if width * self.height > height * self.width: scaled_height = (width * self.height) // self.width From 802ca12d0551ff761e01d9af8348df1dc96fc372 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 17:37:21 +0100 Subject: [PATCH 0263/1637] Don't close file prematurely --- synapse/rest/media/v1/media_repository.py | 22 ++++++++++++++----- synapse/rest/media/v1/preview_url_resource.py | 4 ++-- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 398e973ca9..63ed1c4268 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -97,16 +97,20 @@ class MediaRepository(object): os.makedirs(dirname) @staticmethod - def write_file_synchronously(source, fname): + def _write_file_synchronously(source, fname): source.seek(0) # Ensure we read from the start of the file with open(fname, "wb") as f: shutil.copyfileobj(source, f) + source.close() + @defer.inlineCallbacks def write_to_file(self, source, path): """Write `source` to the on disk media store, and also the backup store if configured. + Will close source once finished. + Args: source: A file like object that should be written path: Relative path to write file to @@ -120,7 +124,7 @@ class MediaRepository(object): # Write to the main repository yield preserve_context_over_fn( threads.deferToThread, - self.write_file_synchronously, source, fname, + self._write_file_synchronously, source, fname, ) # Write to backup repository @@ -130,6 +134,10 @@ class MediaRepository(object): @defer.inlineCallbacks def copy_to_backup(self, source, path): + """Copy file like object source to the backup media store, if configured. + + Will close source after its done. + """ if self.backup_base_path: backup_fname = os.path.join(self.backup_base_path, path) self._makedirs(backup_fname) @@ -139,12 +147,14 @@ class MediaRepository(object): if self.synchronous_backup_media_store: yield preserve_context_over_fn( threads.deferToThread, - self.write_file_synchronously, source, backup_fname, + self._write_file_synchronously, source, backup_fname, ) else: preserve_fn(threads.deferToThread)( - self.write_file_synchronously, source, backup_fname, + self._write_file_synchronously, source, backup_fname, ) + else: + source.close() @defer.inlineCallbacks def create_content(self, media_type, upload_name, content, content_length, @@ -248,8 +258,8 @@ class MediaRepository(object): server_name, media_id) raise SynapseError(502, "Failed to fetch remote media") - with open(fname) as f: - yield self.copy_to_backup(f, fpath) + # Will close the file after its done + yield self.copy_to_backup(open(fname), fpath) media_type = headers["Content-Type"][0] time_now_ms = self.clock.time_msec() diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index f82b8fbc51..a3288c9cc6 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -275,8 +275,8 @@ class PreviewUrlResource(Resource): ) # FIXME: pass through 404s and other error messages nicely - with open(fname) as f: - yield self.media_repo.copy_to_backup(f, fpath) + # Will close the file after its done + yield self.media_repo.copy_to_backup(open(fname), fpath) media_type = headers["Content-Type"][0] time_now_ms = self.clock.time_msec() From 1259a76047a0a718ce0c9fb26513c9127f8ea919 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 17:39:23 +0100 Subject: [PATCH 0264/1637] Get len before close --- synapse/rest/media/v1/media_repository.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 63ed1c4268..d25b98db45 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -359,6 +359,8 @@ class MediaRepository(object): ) if t_byte_source: + t_len = len(t_byte_source.getvalue()) + output_path = yield self.write_to_file( t_byte_source, self.filepaths.local_media_thumbnail_rel( @@ -368,8 +370,7 @@ class MediaRepository(object): logger.info("Stored thumbnail in file %r", output_path) yield self.store.store_local_thumbnail_rel( - media_id, t_width, t_height, t_type, t_method, - len(t_byte_source.getvalue()) + media_id, t_width, t_height, t_type, t_method, t_len ) defer.returnValue(output_path) @@ -387,6 +388,7 @@ class MediaRepository(object): ) if t_byte_source: + t_len = len(t_byte_source.getvalue()) output_path = yield self.write_to_file( t_byte_source, self.filepaths.remote_media_thumbnail_rel( @@ -397,7 +399,7 @@ class MediaRepository(object): yield self.store.store_remote_media_thumbnail_rel( server_name, media_id, file_id, - t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) + t_width, t_height, t_type, t_method, t_len ) defer.returnValue(output_path) @@ -449,11 +451,12 @@ class MediaRepository(object): media_id, t_width, t_height, t_type, t_method ) + t_len = len(t_byte_source.getvalue()) + yield self.write_to_file(t_byte_source, file_path) yield self.store.store_local_thumbnail( - media_id, t_width, t_height, t_type, t_method, - len(t_byte_source.getvalue()) + media_id, t_width, t_height, t_type, t_method, t_len ) defer.returnValue({ @@ -500,11 +503,13 @@ class MediaRepository(object): server_name, file_id, t_width, t_height, t_type, t_method ) + t_len = len(t_byte_source.getvalue()) + yield self.write_to_file(t_byte_source, file_path) yield self.store.store_remote_media_thumbnail( server_name, media_id, file_id, - t_width, t_height, t_type, t_method, len(t_byte_source.getvalue()) + t_width, t_height, t_type, t_method, t_len ) defer.returnValue({ From cc505b4b5e98ba70d8576a562fc36b03d6aa5150 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 17:52:30 +0100 Subject: [PATCH 0265/1637] getvalue closes buffer --- synapse/rest/media/v1/media_repository.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index d25b98db45..ff2ddd2f18 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -258,7 +258,7 @@ class MediaRepository(object): server_name, media_id) raise SynapseError(502, "Failed to fetch remote media") - # Will close the file after its done + # Will close the file after its done yield self.copy_to_backup(open(fname), fpath) media_type = headers["Content-Type"][0] @@ -359,8 +359,6 @@ class MediaRepository(object): ) if t_byte_source: - t_len = len(t_byte_source.getvalue()) - output_path = yield self.write_to_file( t_byte_source, self.filepaths.local_media_thumbnail_rel( @@ -369,6 +367,8 @@ class MediaRepository(object): ) logger.info("Stored thumbnail in file %r", output_path) + t_len = os.path.getsize(output_path) + yield self.store.store_local_thumbnail_rel( media_id, t_width, t_height, t_type, t_method, t_len ) @@ -388,7 +388,6 @@ class MediaRepository(object): ) if t_byte_source: - t_len = len(t_byte_source.getvalue()) output_path = yield self.write_to_file( t_byte_source, self.filepaths.remote_media_thumbnail_rel( @@ -397,7 +396,9 @@ class MediaRepository(object): ) logger.info("Stored thumbnail in file %r", output_path) - yield self.store.store_remote_media_thumbnail_rel( + t_len = os.path.getsize(output_path) + + yield self.store.store_remote_media_thumbnail( server_name, media_id, file_id, t_width, t_height, t_type, t_method, t_len ) @@ -451,9 +452,8 @@ class MediaRepository(object): media_id, t_width, t_height, t_type, t_method ) - t_len = len(t_byte_source.getvalue()) - - yield self.write_to_file(t_byte_source, file_path) + output_path = yield self.write_to_file(t_byte_source, file_path) + t_len = os.path.getsize(output_path) yield self.store.store_local_thumbnail( media_id, t_width, t_height, t_type, t_method, t_len @@ -503,9 +503,8 @@ class MediaRepository(object): server_name, file_id, t_width, t_height, t_type, t_method ) - t_len = len(t_byte_source.getvalue()) - - yield self.write_to_file(t_byte_source, file_path) + output_path = yield self.write_to_file(t_byte_source, file_path) + t_len = os.path.getsize(output_path) yield self.store.store_remote_media_thumbnail( server_name, media_id, file_id, From 4ae85ae12190015595f979f1a302ee608de6fd65 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 17:57:31 +0100 Subject: [PATCH 0266/1637] Don't close prematurely.. --- synapse/rest/media/v1/media_repository.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index ff2ddd2f18..80b14a6739 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -97,12 +97,13 @@ class MediaRepository(object): os.makedirs(dirname) @staticmethod - def _write_file_synchronously(source, fname): + def _write_file_synchronously(source, fname, close_source=False): source.seek(0) # Ensure we read from the start of the file with open(fname, "wb") as f: shutil.copyfileobj(source, f) - source.close() + if close_source: + source.close() @defer.inlineCallbacks def write_to_file(self, source, path): @@ -148,10 +149,12 @@ class MediaRepository(object): yield preserve_context_over_fn( threads.deferToThread, self._write_file_synchronously, source, backup_fname, + close_source=True, ) else: preserve_fn(threads.deferToThread)( self._write_file_synchronously, source, backup_fname, + close_source=True, ) else: source.close() From d76621a47b7b4b778055760d43df9d02614dac19 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Oct 2017 18:16:25 +0100 Subject: [PATCH 0267/1637] Fix comments --- synapse/rest/media/v1/filepath.py | 2 +- synapse/rest/media/v1/preview_url_resource.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index 43d0eea00d..6923a3fbd3 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -35,7 +35,7 @@ def _wrap_in_base_path(func): class MediaFilePaths(object): """Describes where files are stored on disk. - Most of the function have a `*_rel` variant which returns a file path that + Most of the functions have a `*_rel` variant which returns a file path that is relative to the base media store path. This is mainly used when we want to write to the backup media store (when one is configured) """ diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index a3288c9cc6..e986e855a7 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -343,6 +343,9 @@ class PreviewUrlResource(Resource): def _expire_url_cache_data(self): """Clean up expired url cache content, media and thumbnails. """ + + # TODO: Delete from backup media store + now = self.clock.time_msec() # First we delete expired url cache entries From b60859d6cc429ea1934b94a8749caadd9a96ee21 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 10:24:19 +0100 Subject: [PATCH 0268/1637] Use make_deferred_yieldable --- synapse/rest/media/v1/media_repository.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 80b14a6739..5c5020fe9d 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -33,7 +33,7 @@ from synapse.api.errors import SynapseError, HttpResponseException, \ from synapse.util.async import Linearizer from synapse.util.stringutils import is_ascii -from synapse.util.logcontext import preserve_context_over_fn, preserve_fn +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.util.retryutils import NotRetryingDestination import os @@ -123,7 +123,7 @@ class MediaRepository(object): self._makedirs(fname) # Write to the main repository - yield preserve_context_over_fn( + yield make_deferred_yieldable( threads.deferToThread, self._write_file_synchronously, source, fname, ) @@ -146,7 +146,7 @@ class MediaRepository(object): # We can either wait for successful writing to the backup repository # or write in the background and immediately return if self.synchronous_backup_media_store: - yield preserve_context_over_fn( + yield make_deferred_yieldable( threads.deferToThread, self._write_file_synchronously, source, backup_fname, close_source=True, @@ -355,7 +355,7 @@ class MediaRepository(object): input_path = self.filepaths.local_media_filepath(media_id) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield preserve_context_over_fn( + t_byte_source = yield make_deferred_yieldable( threads.deferToThread, self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type @@ -384,7 +384,7 @@ class MediaRepository(object): input_path = self.filepaths.remote_media_filepath(server_name, file_id) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield preserve_context_over_fn( + t_byte_source = yield make_deferred_yieldable( threads.deferToThread, self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type @@ -443,7 +443,7 @@ class MediaRepository(object): r_width, r_height, r_method, r_type, t_byte_source )) - yield preserve_context_over_fn(threads.deferToThread, generate_thumbnails) + yield make_deferred_yieldable(threads.deferToThread, generate_thumbnails) for t_width, t_height, t_method, t_type, t_byte_source in local_thumbnails: if url_cache: @@ -499,7 +499,7 @@ class MediaRepository(object): r_width, r_height, r_method, r_type, t_byte_source )) - yield preserve_context_over_fn(threads.deferToThread, generate_thumbnails) + yield make_deferred_yieldable(threads.deferToThread, generate_thumbnails) for t_width, t_height, t_method, t_type, t_byte_source in remote_thumbnails: file_path = self.filepaths.remote_media_thumbnail_rel( From 64db043a71238db3f65f575c40f29260b83145be Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 10:25:01 +0100 Subject: [PATCH 0269/1637] Move makedirs to thread --- synapse/rest/media/v1/media_repository.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 5c5020fe9d..72aad221a8 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -98,6 +98,7 @@ class MediaRepository(object): @staticmethod def _write_file_synchronously(source, fname, close_source=False): + MediaRepository._makedirs(fname) source.seek(0) # Ensure we read from the start of the file with open(fname, "wb") as f: shutil.copyfileobj(source, f) @@ -120,7 +121,6 @@ class MediaRepository(object): string: the file path written to in the primary media store """ fname = os.path.join(self.primary_base_path, path) - self._makedirs(fname) # Write to the main repository yield make_deferred_yieldable( @@ -141,7 +141,6 @@ class MediaRepository(object): """ if self.backup_base_path: backup_fname = os.path.join(self.backup_base_path, path) - self._makedirs(backup_fname) # We can either wait for successful writing to the backup repository # or write in the background and immediately return From 35332298ef6a9828aa1fdb10f59230f47763084e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 10:39:32 +0100 Subject: [PATCH 0270/1637] Fix up comments --- synapse/rest/media/v1/media_repository.py | 28 +++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 72aad221a8..f3a5b19a80 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -98,6 +98,14 @@ class MediaRepository(object): @staticmethod def _write_file_synchronously(source, fname, close_source=False): + """Write `source` to the path `fname` synchronously. Should be called + from a thread. + + Args: + source: A file like object to be written + fname (str): Path to write to + close_source (bool): Whether to close source after writing + """ MediaRepository._makedirs(fname) source.seek(0) # Ensure we read from the start of the file with open(fname, "wb") as f: @@ -115,10 +123,10 @@ class MediaRepository(object): Args: source: A file like object that should be written - path: Relative path to write file to + path(str): Relative path to write file to Returns: - string: the file path written to in the primary media store + Deferred[str]: the file path written to in the primary media store """ fname = os.path.join(self.primary_base_path, path) @@ -138,6 +146,10 @@ class MediaRepository(object): """Copy file like object source to the backup media store, if configured. Will close source after its done. + + Args: + source: A file like object that should be written + path(str): Relative path to write file to """ if self.backup_base_path: backup_fname = os.path.join(self.backup_base_path, path) @@ -161,6 +173,18 @@ class MediaRepository(object): @defer.inlineCallbacks def create_content(self, media_type, upload_name, content, content_length, auth_user): + """Store uploaded content for a local user and return the mxc URL + + Args: + media_type(str): The content type of the file + upload_name(str): The name of the file + content: A file like object that is the content to store + content_length(int): The length of the content + auth_user(str): The user_id of the uploader + + Returns: + Deferred[str]: The mxc url of the stored content + """ media_id = random_string(24) fname = yield self.write_to_file( From e3428d26ca5a23a3dac6d106aff8ac19f9839f32 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 10:39:59 +0100 Subject: [PATCH 0271/1637] Fix typo --- synapse/rest/media/v1/media_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index f3a5b19a80..76220a5531 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -395,7 +395,7 @@ class MediaRepository(object): t_len = os.path.getsize(output_path) - yield self.store.store_local_thumbnail_rel( + yield self.store.store_local_thumbnail( media_id, t_width, t_height, t_type, t_method, t_len ) From 505371414f6ba9aeaa95eb8d34f7893c4cc2b07e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 11:23:53 +0100 Subject: [PATCH 0272/1637] Fix up thumbnailing function --- synapse/rest/media/v1/media_repository.py | 127 ++++++++---------- synapse/rest/media/v1/preview_url_resource.py | 8 +- synapse/rest/media/v1/thumbnailer.py | 13 +- 3 files changed, 73 insertions(+), 75 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 76220a5531..36f42c73be 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -206,7 +206,7 @@ class MediaRepository(object): "media_length": content_length, } - yield self._generate_local_thumbnails(media_id, media_info) + yield self._generate_thumbnails(None, media_id, media_info) defer.returnValue("mxc://%s/%s" % (self.server_name, media_id)) @@ -339,7 +339,7 @@ class MediaRepository(object): "filesystem_id": file_id, } - yield self._generate_remote_thumbnails( + yield self._generate_thumbnails( server_name, media_id, media_info ) @@ -385,6 +385,8 @@ class MediaRepository(object): ) if t_byte_source: + t_width, t_height = t_byte_source.dimensions + output_path = yield self.write_to_file( t_byte_source, self.filepaths.local_media_thumbnail_rel( @@ -414,6 +416,8 @@ class MediaRepository(object): ) if t_byte_source: + t_width, t_height = t_byte_source.dimensions + output_path = yield self.write_to_file( t_byte_source, self.filepaths.remote_media_thumbnail_rel( @@ -432,13 +436,28 @@ class MediaRepository(object): defer.returnValue(output_path) @defer.inlineCallbacks - def _generate_local_thumbnails(self, media_id, media_info, url_cache=False): + def _generate_thumbnails(self, server_name, media_id, media_info, url_cache=False): + """Generate and store thumbnails for an image. + + Args: + server_name(str|None): The server name if remote media, else None if local + media_id(str) + media_info(dict) + url_cache(bool): If we are thumbnailing images downloaded for the URL cache, + used exclusively by the url previewer + + Returns: + Deferred[dict]: Dict with "width" and "height" keys of original image + """ media_type = media_info["media_type"] + file_id = media_info.get("filesystem_id") requirements = self._get_thumbnail_requirements(media_type) if not requirements: return - if url_cache: + if server_name: + input_path = self.filepaths.remote_media_filepath(server_name, file_id) + elif url_cache: input_path = self.filepaths.url_cache_filepath(media_id) else: input_path = self.filepaths.local_media_filepath(media_id) @@ -454,22 +473,40 @@ class MediaRepository(object): ) return - local_thumbnails = [] + # We deduplicate the thumbnail sizes by ignoring the cropped versions if + # they have the same dimensions of a scaled one. + thumbnails = {} + for r_width, r_height, r_method, r_type in requirements: + if r_method == "crop": + thumbnails.setdefault[(r_width, r_height)] = (r_method, r_type) + elif r_method == "scale": + t_width, t_height = thumbnailer.aspect(t_width, t_height) + t_width = min(m_width, t_width) + t_height = min(m_height, t_height) + thumbnails[(t_width, t_height)] = (r_method, r_type) - def generate_thumbnails(): - for r_width, r_height, r_method, r_type in requirements: - t_byte_source = self._generate_thumbnail( - thumbnailer, r_width, r_height, r_method, r_type, + # Now we generate the thumbnails for each dimension, store it + for (r_width, r_height), (r_method, r_type) in thumbnails.iteritems(): + t_byte_source = thumbnailer.crop(t_width, t_height, t_type) + + if r_type == "crop": + t_byte_source = yield make_deferred_yieldable( + threads.deferToThread, thumbnailer.crop, + r_width, r_height, r_type, + ) + else: + t_byte_source = yield make_deferred_yieldable( + threads.deferToThread, thumbnailer.scale, + r_width, r_height, r_type, ) - local_thumbnails.append(( - r_width, r_height, r_method, r_type, t_byte_source - )) + t_width, t_height = t_byte_source.dimensions - yield make_deferred_yieldable(threads.deferToThread, generate_thumbnails) - - for t_width, t_height, t_method, t_type, t_byte_source in local_thumbnails: - if url_cache: + if server_name: + file_path = self.filepaths.remote_media_thumbnail_rel( + server_name, file_id, t_width, t_height, t_type, t_method + ) + elif url_cache: file_path = self.filepaths.url_cache_thumbnail_rel( media_id, t_width, t_height, t_type, t_method ) @@ -481,61 +518,15 @@ class MediaRepository(object): output_path = yield self.write_to_file(t_byte_source, file_path) t_len = os.path.getsize(output_path) - yield self.store.store_local_thumbnail( - media_id, t_width, t_height, t_type, t_method, t_len - ) - - defer.returnValue({ - "width": m_width, - "height": m_height, - }) - - @defer.inlineCallbacks - def _generate_remote_thumbnails(self, server_name, media_id, media_info): - media_type = media_info["media_type"] - file_id = media_info["filesystem_id"] - requirements = self._get_thumbnail_requirements(media_type) - if not requirements: - return - - remote_thumbnails = [] - - input_path = self.filepaths.remote_media_filepath(server_name, file_id) - thumbnailer = Thumbnailer(input_path) - m_width = thumbnailer.width - m_height = thumbnailer.height - - def generate_thumbnails(): - if m_width * m_height >= self.max_image_pixels: - logger.info( - "Image too large to thumbnail %r x %r > %r", - m_width, m_height, self.max_image_pixels - ) - return - - for r_width, r_height, r_method, r_type in requirements: - t_byte_source = self._generate_thumbnail( - thumbnailer, r_width, r_height, r_method, r_type, - ) - - remote_thumbnails.append(( - r_width, r_height, r_method, r_type, t_byte_source - )) - - yield make_deferred_yieldable(threads.deferToThread, generate_thumbnails) - - for t_width, t_height, t_method, t_type, t_byte_source in remote_thumbnails: - file_path = self.filepaths.remote_media_thumbnail_rel( - server_name, file_id, t_width, t_height, t_type, t_method - ) - - output_path = yield self.write_to_file(t_byte_source, file_path) - t_len = os.path.getsize(output_path) - - yield self.store.store_remote_media_thumbnail( + if server_name: + yield self.store.store_remote_media_thumbnail( server_name, media_id, file_id, t_width, t_height, t_type, t_method, t_len ) + else: + yield self.store.store_local_thumbnail( + media_id, t_width, t_height, t_type, t_method, t_len + ) defer.returnValue({ "width": m_width, diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index e986e855a7..c734f6b7cd 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -171,8 +171,8 @@ class PreviewUrlResource(Resource): logger.debug("got media_info of '%s'" % media_info) if _is_media(media_info['media_type']): - dims = yield self.media_repo._generate_local_thumbnails( - media_info['filesystem_id'], media_info, url_cache=True, + dims = yield self.media_repo._generate_thumbnails( + None, media_info['filesystem_id'], media_info, url_cache=True, ) og = { @@ -217,8 +217,8 @@ class PreviewUrlResource(Resource): if _is_media(image_info['media_type']): # TODO: make sure we don't choke on white-on-transparent images - dims = yield self.media_repo._generate_local_thumbnails( - image_info['filesystem_id'], image_info, url_cache=True, + dims = yield self.media_repo._generate_thumbnails( + None, image_info['filesystem_id'], image_info, url_cache=True, ) if dims: og["og:image:width"] = dims['width'] diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index e1ee535b9a..09650bd527 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -54,7 +54,7 @@ class Thumbnailer(object): """Rescales the image to the given dimensions. Returns: - BytesIO: the bytes of the encoded image ready to be written to disk + ImageIO: the bytes of the encoded image ready to be written to disk """ scaled = self.image.resize((width, height), Image.ANTIALIAS) return self._encode_image(scaled, output_type) @@ -71,7 +71,7 @@ class Thumbnailer(object): max_height: The larget possible height. Returns: - BytesIO: the bytes of the encoded image ready to be written to disk + ImageIO: the bytes of the encoded image ready to be written to disk """ if width * self.height > height * self.width: scaled_height = (width * self.height) // self.width @@ -92,6 +92,13 @@ class Thumbnailer(object): return self._encode_image(cropped, output_type) def _encode_image(self, output_image, output_type): - output_bytes_io = BytesIO() + output_bytes_io = ImageIO(output_image.size) output_image.save(output_bytes_io, self.FORMATS[output_type], quality=80) + output_image.close() return output_bytes_io + + +class ImageIO(BytesIO): + def __init__(self, dimensions): + super(ImageIO, self).__init__() + self.dimensions = dimensions From 0e28281a021101ac199cbf2d0d130190110921bb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 11:33:49 +0100 Subject: [PATCH 0273/1637] Fix up --- synapse/rest/media/v1/media_repository.py | 62 +++++++++++------------ synapse/rest/media/v1/thumbnailer.py | 13 ++--- 2 files changed, 32 insertions(+), 43 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 36f42c73be..a310d08f5f 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -131,10 +131,9 @@ class MediaRepository(object): fname = os.path.join(self.primary_base_path, path) # Write to the main repository - yield make_deferred_yieldable( - threads.deferToThread, + yield make_deferred_yieldable(threads.deferToThread( self._write_file_synchronously, source, fname, - ) + )) # Write to backup repository yield self.copy_to_backup(source, path) @@ -157,11 +156,10 @@ class MediaRepository(object): # We can either wait for successful writing to the backup repository # or write in the background and immediately return if self.synchronous_backup_media_store: - yield make_deferred_yieldable( - threads.deferToThread, + yield make_deferred_yieldable(threads.deferToThread( self._write_file_synchronously, source, backup_fname, close_source=True, - ) + )) else: preserve_fn(threads.deferToThread)( self._write_file_synchronously, source, backup_fname, @@ -378,11 +376,10 @@ class MediaRepository(object): input_path = self.filepaths.local_media_filepath(media_id) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield make_deferred_yieldable( - threads.deferToThread, + t_byte_source = yield make_deferred_yieldable(threads.deferToThread( self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type - ) + )) if t_byte_source: t_width, t_height = t_byte_source.dimensions @@ -409,11 +406,10 @@ class MediaRepository(object): input_path = self.filepaths.remote_media_filepath(server_name, file_id) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield make_deferred_yieldable( - threads.deferToThread, + t_byte_source = yield make_deferred_yieldable(threads.deferToThread( self._generate_thumbnail, thumbnailer, t_width, t_height, t_method, t_type - ) + )) if t_byte_source: t_width, t_height = t_byte_source.dimensions @@ -478,34 +474,32 @@ class MediaRepository(object): thumbnails = {} for r_width, r_height, r_method, r_type in requirements: if r_method == "crop": - thumbnails.setdefault[(r_width, r_height)] = (r_method, r_type) + thumbnails.setdefault((r_width, r_height), (r_method, r_type)) elif r_method == "scale": - t_width, t_height = thumbnailer.aspect(t_width, t_height) + t_width, t_height = thumbnailer.aspect(r_width, r_height) t_width = min(m_width, t_width) t_height = min(m_height, t_height) thumbnails[(t_width, t_height)] = (r_method, r_type) # Now we generate the thumbnails for each dimension, store it - for (r_width, r_height), (r_method, r_type) in thumbnails.iteritems(): - t_byte_source = thumbnailer.crop(t_width, t_height, t_type) - - if r_type == "crop": - t_byte_source = yield make_deferred_yieldable( - threads.deferToThread, thumbnailer.crop, - r_width, r_height, r_type, - ) + for (t_width, t_height), (t_method, t_type) in thumbnails.iteritems(): + # Generate the thumbnail + if t_type == "crop": + t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + thumbnailer.crop, + r_width, r_height, t_type, + )) else: - t_byte_source = yield make_deferred_yieldable( - threads.deferToThread, thumbnailer.scale, - r_width, r_height, r_type, - ) - - t_width, t_height = t_byte_source.dimensions + t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + thumbnailer.scale, + r_width, r_height, t_type, + )) + # Work out the correct file name for thumbnail if server_name: file_path = self.filepaths.remote_media_thumbnail_rel( - server_name, file_id, t_width, t_height, t_type, t_method - ) + server_name, file_id, t_width, t_height, t_type, t_method + ) elif url_cache: file_path = self.filepaths.url_cache_thumbnail_rel( media_id, t_width, t_height, t_type, t_method @@ -515,14 +509,16 @@ class MediaRepository(object): media_id, t_width, t_height, t_type, t_method ) + # Write to disk output_path = yield self.write_to_file(t_byte_source, file_path) t_len = os.path.getsize(output_path) + # Write to database if server_name: yield self.store.store_remote_media_thumbnail( - server_name, media_id, file_id, - t_width, t_height, t_type, t_method, t_len - ) + server_name, media_id, file_id, + t_width, t_height, t_type, t_method, t_len + ) else: yield self.store.store_local_thumbnail( media_id, t_width, t_height, t_type, t_method, t_len diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 09650bd527..e1ee535b9a 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -54,7 +54,7 @@ class Thumbnailer(object): """Rescales the image to the given dimensions. Returns: - ImageIO: the bytes of the encoded image ready to be written to disk + BytesIO: the bytes of the encoded image ready to be written to disk """ scaled = self.image.resize((width, height), Image.ANTIALIAS) return self._encode_image(scaled, output_type) @@ -71,7 +71,7 @@ class Thumbnailer(object): max_height: The larget possible height. Returns: - ImageIO: the bytes of the encoded image ready to be written to disk + BytesIO: the bytes of the encoded image ready to be written to disk """ if width * self.height > height * self.width: scaled_height = (width * self.height) // self.width @@ -92,13 +92,6 @@ class Thumbnailer(object): return self._encode_image(cropped, output_type) def _encode_image(self, output_image, output_type): - output_bytes_io = ImageIO(output_image.size) + output_bytes_io = BytesIO() output_image.save(output_bytes_io, self.FORMATS[output_type], quality=80) - output_image.close() return output_bytes_io - - -class ImageIO(BytesIO): - def __init__(self, dimensions): - super(ImageIO, self).__init__() - self.dimensions = dimensions From 9732ec6797339dd33bc472cef5081a858ddccb30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 11:34:41 +0100 Subject: [PATCH 0274/1637] s/write_to_file/write_to_file_and_backup/ --- synapse/rest/media/v1/media_repository.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index a310d08f5f..c9753ebb52 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -115,7 +115,7 @@ class MediaRepository(object): source.close() @defer.inlineCallbacks - def write_to_file(self, source, path): + def write_to_file_and_backup(self, source, path): """Write `source` to the on disk media store, and also the backup store if configured. @@ -185,7 +185,7 @@ class MediaRepository(object): """ media_id = random_string(24) - fname = yield self.write_to_file( + fname = yield self.write_to_file_and_backup( content, self.filepaths.local_media_filepath_rel(media_id) ) @@ -384,7 +384,7 @@ class MediaRepository(object): if t_byte_source: t_width, t_height = t_byte_source.dimensions - output_path = yield self.write_to_file( + output_path = yield self.write_to_file_and_backup( t_byte_source, self.filepaths.local_media_thumbnail_rel( media_id, t_width, t_height, t_type, t_method @@ -414,7 +414,7 @@ class MediaRepository(object): if t_byte_source: t_width, t_height = t_byte_source.dimensions - output_path = yield self.write_to_file( + output_path = yield self.write_to_file_and_backup( t_byte_source, self.filepaths.remote_media_thumbnail_rel( server_name, file_id, t_width, t_height, t_type, t_method @@ -510,7 +510,7 @@ class MediaRepository(object): ) # Write to disk - output_path = yield self.write_to_file(t_byte_source, file_path) + output_path = yield self.write_to_file_and_backup(t_byte_source, file_path) t_len = os.path.getsize(output_path) # Write to database From ae5d18617afd98bb5d51b43c2a12a99e9d96da39 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 11:35:44 +0100 Subject: [PATCH 0275/1637] Make things be absolute paths again --- synapse/rest/media/v1/filepath.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index 6923a3fbd3..a3a15ac302 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -172,12 +172,12 @@ class MediaFilePaths(object): if NEW_FORMAT_ID_RE.match(media_id): return os.path.join( - "url_cache_thumbnails", + self.primary_base_path, "url_cache_thumbnails", media_id[:10], media_id[11:], ) else: return os.path.join( - "url_cache_thumbnails", + self.primary_base_path, "url_cache_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], ) @@ -188,26 +188,26 @@ class MediaFilePaths(object): if NEW_FORMAT_ID_RE.match(media_id): return [ os.path.join( - "url_cache_thumbnails", + self.primary_base_path, "url_cache_thumbnails", media_id[:10], media_id[11:], ), os.path.join( - "url_cache_thumbnails", + self.primary_base_path, "url_cache_thumbnails", media_id[:10], ), ] else: return [ os.path.join( - "url_cache_thumbnails", + self.primary_base_path, "url_cache_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], ), os.path.join( - "url_cache_thumbnails", + self.primary_base_path, "url_cache_thumbnails", media_id[0:2], media_id[2:4], ), os.path.join( - "url_cache_thumbnails", + self.primary_base_path, "url_cache_thumbnails", media_id[0:2], ), ] From 4d7e1dde70e6f2300ab83fb3208152f3d73bde71 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 11:36:32 +0100 Subject: [PATCH 0276/1637] Remove unnecessary diff --- synapse/rest/media/v1/media_repository.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index c9753ebb52..f06813c48c 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -63,9 +63,7 @@ class MediaRepository(object): self.primary_base_path = hs.config.media_store_path self.filepaths = MediaFilePaths(self.primary_base_path) - self.backup_base_path = None - if hs.config.backup_media_store_path: - self.backup_base_path = hs.config.backup_media_store_path + self.backup_base_path = hs.config.backup_media_store_path self.synchronous_backup_media_store = hs.config.synchronous_backup_media_store From a675bd08bd1a016a16bd0e10547e8c26be391ee0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 11:41:06 +0100 Subject: [PATCH 0277/1637] Add paths back in... --- synapse/rest/media/v1/filepath.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index a3a15ac302..fec0bbf572 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -100,7 +100,7 @@ class MediaFilePaths(object): def remote_media_thumbnail_dir(self, server_name, file_id): return os.path.join( - "remote_thumbnail", server_name, + self.primary_base_path, "remote_thumbnail", server_name, file_id[0:2], file_id[2:4], file_id[4:], ) @@ -125,18 +125,18 @@ class MediaFilePaths(object): if NEW_FORMAT_ID_RE.match(media_id): return [ os.path.join( - "url_cache", + self.primary_base_path, "url_cache", media_id[:10], ), ] else: return [ os.path.join( - "url_cache", + self.primary_base_path, "url_cache", media_id[0:2], media_id[2:4], ), os.path.join( - "url_cache", + self.primary_base_path, "url_cache", media_id[0:2], ), ] From 1f43d2239757db0b376a3582066190221942cddc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 11:42:07 +0100 Subject: [PATCH 0278/1637] Don't needlessly rename variable --- synapse/rest/media/v1/filepath.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index fec0bbf572..d5164e47e0 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -27,7 +27,7 @@ def _wrap_in_base_path(func): @functools.wraps(func) def _wrapped(self, *args, **kwargs): path = func(self, *args, **kwargs) - return os.path.join(self.primary_base_path, path) + return os.path.join(self.base_path, path) return _wrapped @@ -41,7 +41,7 @@ class MediaFilePaths(object): """ def __init__(self, primary_base_path): - self.primary_base_path = primary_base_path + self.base_path = primary_base_path def default_thumbnail_rel(self, default_top_level, default_sub_type, width, height, content_type, method): @@ -100,7 +100,7 @@ class MediaFilePaths(object): def remote_media_thumbnail_dir(self, server_name, file_id): return os.path.join( - self.primary_base_path, "remote_thumbnail", server_name, + self.base_path, "remote_thumbnail", server_name, file_id[0:2], file_id[2:4], file_id[4:], ) @@ -125,18 +125,18 @@ class MediaFilePaths(object): if NEW_FORMAT_ID_RE.match(media_id): return [ os.path.join( - self.primary_base_path, "url_cache", + self.base_path, "url_cache", media_id[:10], ), ] else: return [ os.path.join( - self.primary_base_path, "url_cache", + self.base_path, "url_cache", media_id[0:2], media_id[2:4], ), os.path.join( - self.primary_base_path, "url_cache", + self.base_path, "url_cache", media_id[0:2], ), ] @@ -172,12 +172,12 @@ class MediaFilePaths(object): if NEW_FORMAT_ID_RE.match(media_id): return os.path.join( - self.primary_base_path, "url_cache_thumbnails", + self.base_path, "url_cache_thumbnails", media_id[:10], media_id[11:], ) else: return os.path.join( - self.primary_base_path, "url_cache_thumbnails", + self.base_path, "url_cache_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], ) @@ -188,26 +188,26 @@ class MediaFilePaths(object): if NEW_FORMAT_ID_RE.match(media_id): return [ os.path.join( - self.primary_base_path, "url_cache_thumbnails", + self.base_path, "url_cache_thumbnails", media_id[:10], media_id[11:], ), os.path.join( - self.primary_base_path, "url_cache_thumbnails", + self.base_path, "url_cache_thumbnails", media_id[:10], ), ] else: return [ os.path.join( - self.primary_base_path, "url_cache_thumbnails", + self.base_path, "url_cache_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], ), os.path.join( - self.primary_base_path, "url_cache_thumbnails", + self.base_path, "url_cache_thumbnails", media_id[0:2], media_id[2:4], ), os.path.join( - self.primary_base_path, "url_cache_thumbnails", + self.base_path, "url_cache_thumbnails", media_id[0:2], ), ] From c021c39cbd0bf1f0a85c9699275600ac35aa9ec4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 13:46:53 +0100 Subject: [PATCH 0279/1637] Remove spurious addition --- synapse/rest/media/v1/media_repository.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index f06813c48c..3b8fe5ddb4 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -380,8 +380,6 @@ class MediaRepository(object): )) if t_byte_source: - t_width, t_height = t_byte_source.dimensions - output_path = yield self.write_to_file_and_backup( t_byte_source, self.filepaths.local_media_thumbnail_rel( @@ -410,8 +408,6 @@ class MediaRepository(object): )) if t_byte_source: - t_width, t_height = t_byte_source.dimensions - output_path = yield self.write_to_file_and_backup( t_byte_source, self.filepaths.remote_media_thumbnail_rel( From ad1911bbf46f658ee343ee26e3011b3f1bcbd572 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 13:47:05 +0100 Subject: [PATCH 0280/1637] Comment --- synapse/rest/media/v1/media_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 3b8fe5ddb4..700fd0dd24 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -121,7 +121,7 @@ class MediaRepository(object): Args: source: A file like object that should be written - path(str): Relative path to write file to + path (str): Relative path to write file to Returns: Deferred[str]: the file path written to in the primary media store From 31aa7bd8d1748548b2523f58b348bb6787dcc019 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 13:47:38 +0100 Subject: [PATCH 0281/1637] Move type into key --- synapse/rest/media/v1/media_repository.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 700fd0dd24..dee834389f 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -468,15 +468,15 @@ class MediaRepository(object): thumbnails = {} for r_width, r_height, r_method, r_type in requirements: if r_method == "crop": - thumbnails.setdefault((r_width, r_height), (r_method, r_type)) + thumbnails.setdefault((r_width, r_height,r_type), r_method) elif r_method == "scale": t_width, t_height = thumbnailer.aspect(r_width, r_height) t_width = min(m_width, t_width) t_height = min(m_height, t_height) - thumbnails[(t_width, t_height)] = (r_method, r_type) + thumbnails[(t_width, t_height, r_type)] = r_method # Now we generate the thumbnails for each dimension, store it - for (t_width, t_height), (t_method, t_type) in thumbnails.iteritems(): + for (t_width, t_height, t_type), t_method in thumbnails.iteritems(): # Generate the thumbnail if t_type == "crop": t_byte_source = yield make_deferred_yieldable(threads.deferToThread( From 931fc43cc840f40402c78e9478cf3adac6559b27 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 13 Oct 2017 13:54:19 +0100 Subject: [PATCH 0282/1637] fix copyright to companies which actually exist(ed) --- docs/sphinx/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py index 15c19834fc..06e1b3c33d 100644 --- a/docs/sphinx/conf.py +++ b/docs/sphinx/conf.py @@ -50,7 +50,7 @@ master_doc = 'index' # General information about the project. project = u'Synapse' -copyright = u'2014, TNG' +copyright = u'Copyright 2014-2017 OpenMarket, 2017 Vector Creations Ltd, 2017 New Vector Ltd' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the From b92a8e6e4aa2283daaa4e6050f1dbd503ddc9434 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 13:58:57 +0100 Subject: [PATCH 0283/1637] PEP8 --- synapse/rest/media/v1/media_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index dee834389f..d2ac0175d7 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -468,7 +468,7 @@ class MediaRepository(object): thumbnails = {} for r_width, r_height, r_method, r_type in requirements: if r_method == "crop": - thumbnails.setdefault((r_width, r_height,r_type), r_method) + thumbnails.setdefault((r_width, r_height, r_type), r_method) elif r_method == "scale": t_width, t_height = thumbnailer.aspect(r_width, r_height) t_width = min(m_width, t_width) From 2b24416e90b0bf1ee6d29cfc384670f4eeca0ced Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 14:11:34 +0100 Subject: [PATCH 0284/1637] Don't reuse source but instead copy from primary media store to backup --- synapse/rest/media/v1/media_repository.py | 28 ++++++------------- synapse/rest/media/v1/preview_url_resource.py | 3 +- 2 files changed, 9 insertions(+), 22 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index d2ac0175d7..e32a67e16a 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -95,7 +95,7 @@ class MediaRepository(object): os.makedirs(dirname) @staticmethod - def _write_file_synchronously(source, fname, close_source=False): + def _write_file_synchronously(source, fname): """Write `source` to the path `fname` synchronously. Should be called from a thread. @@ -109,16 +109,11 @@ class MediaRepository(object): with open(fname, "wb") as f: shutil.copyfileobj(source, f) - if close_source: - source.close() - @defer.inlineCallbacks def write_to_file_and_backup(self, source, path): """Write `source` to the on disk media store, and also the backup store if configured. - Will close source once finished. - Args: source: A file like object that should be written path (str): Relative path to write file to @@ -134,37 +129,31 @@ class MediaRepository(object): )) # Write to backup repository - yield self.copy_to_backup(source, path) + yield self.copy_to_backup(path) defer.returnValue(fname) @defer.inlineCallbacks - def copy_to_backup(self, source, path): - """Copy file like object source to the backup media store, if configured. - - Will close source after its done. + def copy_to_backup(self, path): + """Copy a file from the primary to backup media store, if configured. Args: - source: A file like object that should be written path(str): Relative path to write file to """ if self.backup_base_path: + primary_fname = os.path.join(self.primary_base_path, path) backup_fname = os.path.join(self.backup_base_path, path) # We can either wait for successful writing to the backup repository # or write in the background and immediately return if self.synchronous_backup_media_store: yield make_deferred_yieldable(threads.deferToThread( - self._write_file_synchronously, source, backup_fname, - close_source=True, + shutil.copyfile, primary_fname, backup_fname, )) else: preserve_fn(threads.deferToThread)( - self._write_file_synchronously, source, backup_fname, - close_source=True, + shutil.copyfile, primary_fname, backup_fname, ) - else: - source.close() @defer.inlineCallbacks def create_content(self, media_type, upload_name, content, content_length, @@ -280,8 +269,7 @@ class MediaRepository(object): server_name, media_id) raise SynapseError(502, "Failed to fetch remote media") - # Will close the file after its done - yield self.copy_to_backup(open(fname), fpath) + yield self.copy_to_backup(fpath) media_type = headers["Content-Type"][0] time_now_ms = self.clock.time_msec() diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index c734f6b7cd..2a3e37fdf4 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -275,8 +275,7 @@ class PreviewUrlResource(Resource): ) # FIXME: pass through 404s and other error messages nicely - # Will close the file after its done - yield self.media_repo.copy_to_backup(open(fname), fpath) + yield self.media_repo.copy_to_backup(fpath) media_type = headers["Content-Type"][0] time_now_ms = self.clock.time_msec() From 64665b57d0902c44235994d9cbf16ae61a784ab1 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 13 Oct 2017 14:26:07 +0100 Subject: [PATCH 0285/1637] oops --- docs/sphinx/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py index 06e1b3c33d..0b15bd8912 100644 --- a/docs/sphinx/conf.py +++ b/docs/sphinx/conf.py @@ -50,7 +50,7 @@ master_doc = 'index' # General information about the project. project = u'Synapse' -copyright = u'Copyright 2014-2017 OpenMarket, 2017 Vector Creations Ltd, 2017 New Vector Ltd' +copyright = u'Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the From 6b725cf56aaf090f9cc9d5409dec7912feae8869 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 15:23:41 +0100 Subject: [PATCH 0286/1637] Remove old comment --- synapse/rest/media/v1/media_repository.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index e32a67e16a..cc267d0c16 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -102,7 +102,6 @@ class MediaRepository(object): Args: source: A file like object to be written fname (str): Path to write to - close_source (bool): Whether to close source after writing """ MediaRepository._makedirs(fname) source.seek(0) # Ensure we read from the start of the file From 1b6b0b1e66ab1cd5682ad1fa99020474afd6db7b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Oct 2017 15:34:08 +0100 Subject: [PATCH 0287/1637] Add try/finally block to close t_byte_source --- synapse/rest/media/v1/media_repository.py | 65 ++++++++++++++--------- 1 file changed, 41 insertions(+), 24 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index cc267d0c16..515b3d3e74 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -367,12 +367,16 @@ class MediaRepository(object): )) if t_byte_source: - output_path = yield self.write_to_file_and_backup( - t_byte_source, - self.filepaths.local_media_thumbnail_rel( - media_id, t_width, t_height, t_type, t_method + try: + output_path = yield self.write_to_file_and_backup( + t_byte_source, + self.filepaths.local_media_thumbnail_rel( + media_id, t_width, t_height, t_type, t_method + ) ) - ) + finally: + t_byte_source.close() + logger.info("Stored thumbnail in file %r", output_path) t_len = os.path.getsize(output_path) @@ -395,12 +399,16 @@ class MediaRepository(object): )) if t_byte_source: - output_path = yield self.write_to_file_and_backup( - t_byte_source, - self.filepaths.remote_media_thumbnail_rel( - server_name, file_id, t_width, t_height, t_type, t_method + try: + output_path = yield self.write_to_file_and_backup( + t_byte_source, + self.filepaths.remote_media_thumbnail_rel( + server_name, file_id, t_width, t_height, t_type, t_method + ) ) - ) + finally: + t_byte_source.close() + logger.info("Stored thumbnail in file %r", output_path) t_len = os.path.getsize(output_path) @@ -464,18 +472,6 @@ class MediaRepository(object): # Now we generate the thumbnails for each dimension, store it for (t_width, t_height, t_type), t_method in thumbnails.iteritems(): - # Generate the thumbnail - if t_type == "crop": - t_byte_source = yield make_deferred_yieldable(threads.deferToThread( - thumbnailer.crop, - r_width, r_height, t_type, - )) - else: - t_byte_source = yield make_deferred_yieldable(threads.deferToThread( - thumbnailer.scale, - r_width, r_height, t_type, - )) - # Work out the correct file name for thumbnail if server_name: file_path = self.filepaths.remote_media_thumbnail_rel( @@ -490,8 +486,29 @@ class MediaRepository(object): media_id, t_width, t_height, t_type, t_method ) - # Write to disk - output_path = yield self.write_to_file_and_backup(t_byte_source, file_path) + # Generate the thumbnail + if t_type == "crop": + t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + thumbnailer.crop, + r_width, r_height, t_type, + )) + else: + t_byte_source = yield make_deferred_yieldable(threads.deferToThread( + thumbnailer.scale, + r_width, r_height, t_type, + )) + + if not t_byte_source: + continue + + try: + # Write to disk + output_path = yield self.write_to_file_and_backup( + t_byte_source, file_path, + ) + finally: + t_byte_source.close() + t_len = os.path.getsize(output_path) # Write to database From b2e02084b82ca4340c1ccc039a0768a6c9c5fbd5 Mon Sep 17 00:00:00 2001 From: Ander Punnar <4ND3R@users.noreply.github.com> Date: Sat, 14 Oct 2017 13:25:42 +0300 Subject: [PATCH 0288/1637] make it absolutely clear that Purge History API does not remove all traces of events and message contents because this topic pops up too often #890 #1621 #1730 #2260 #2315 and so on --- docs/admin_api/purge_history_api.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst index 986efe40f9..08b3306366 100644 --- a/docs/admin_api/purge_history_api.rst +++ b/docs/admin_api/purge_history_api.rst @@ -4,6 +4,8 @@ Purge History API The purge history API allows server admins to purge historic events from their database, reclaiming disk space. +**NB!** This will not delete local events (locally sent messages content etc) from the database, but will remove lots of the metadata about them and does dramatically reduce the on disk space usage + Depending on the amount of history being purged a call to the API may take several minutes or longer. During this period users will not be able to paginate further back in the room from the point being purged from. From 9342bcfce08c862ea5027e865b23bf2bd6ad3a8c Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 16 Oct 2017 13:38:10 +0100 Subject: [PATCH 0289/1637] Omit the *s for @room notifications They're just redundant --- synapse/push/baserules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 9dce99ebec..7a18afe5f9 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -245,7 +245,7 @@ BASE_APPEND_OVERRIDE_RULES = [ { 'kind': 'event_match', 'key': 'content.body', - 'pattern': '*@room*', + 'pattern': '@room', '_id': '_roomnotif_content', }, { From 6079d0027aa79a6e2e41254ceda42206e307add7 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Mon, 16 Oct 2017 14:20:45 +0100 Subject: [PATCH 0290/1637] Log a warning when no profile for invited member And return empty profile --- synapse/handlers/groups_local.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 3b676d46bd..97a20f2b04 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -313,8 +313,11 @@ class GroupsLocalHandler(object): self.notifier.on_new_event( "groups_key", token, users=[user_id], ) - - user_profile = yield self.profile_handler.get_profile(user_id) + try: + user_profile = yield self.profile_handler.get_profile(user_id) + except Exception as e: + logger.warn("No profile for user %s: %s", user_id, e) + user_profile = {} defer.returnValue({"state": "invite", "user_profile": user_profile}) From 2c5972f87f0541aaeff43846f7050ab91d11cf0e Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Mon, 16 Oct 2017 15:31:11 +0100 Subject: [PATCH 0291/1637] Implement GET /groups/$groupId/invited_users --- synapse/federation/transport/client.py | 13 ++++++++++ synapse/federation/transport/server.py | 18 ++++++++++++- synapse/groups/groups_server.py | 35 ++++++++++++++++++++++++++ synapse/handlers/groups_local.py | 17 +++++++++++++ synapse/rest/client/v2_alpha/groups.py | 21 ++++++++++++++++ synapse/storage/group_server.py | 12 +++++++++ 6 files changed, 115 insertions(+), 1 deletion(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index f96561c1fe..125d8f3598 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -550,6 +550,19 @@ class TransportLayerClient(object): ignore_backoff=True, ) + @log_function + def get_invited_users_in_group(self, destination, group_id, requester_user_id): + """Get users that have been invited to a group + """ + path = PREFIX + "/groups/%s/invited_users" % (group_id,) + + return self.client.get_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + ignore_backoff=True, + ) + @log_function def accept_group_invite(self, destination, group_id, user_id, content): """Accept a group invite diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index c7565e0737..625a2fe27f 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -720,6 +720,22 @@ class FederationGroupsUsersServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class FederationGroupsInvitedUsersServlet(BaseFederationServlet): + """Get the users that have been invited to a group + """ + PATH = "/groups/(?P[^/]*)/invited_users$" + + @defer.inlineCallbacks + def on_GET(self, origin, content, query, group_id): + requester_user_id = parse_string_from_args(query, "requester_user_id") + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.get_invited_users_in_group( + group_id, requester_user_id + ) + + defer.returnValue((200, new_content)) class FederationGroupsInviteServlet(BaseFederationServlet): """Ask a group server to invite someone to the group @@ -1109,12 +1125,12 @@ ROOM_LIST_CLASSES = ( PublicRoomList, ) - GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsProfileServlet, FederationGroupsSummaryServlet, FederationGroupsRoomsServlet, FederationGroupsUsersServlet, + FederationGroupsInvitedUsersServlet, FederationGroupsInviteServlet, FederationGroupsAcceptInviteServlet, FederationGroupsRemoveUserServlet, diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 1083bc2990..bfa46b7cb2 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -420,6 +420,41 @@ class GroupsServerHandler(object): "total_user_count_estimate": len(user_results), }) + @defer.inlineCallbacks + def get_invited_users_in_group(self, group_id, requester_user_id): + """Get the users that have been invited to a group as seen by requester_user_id. + + The ordering is arbitrary at the moment + """ + + yield self.check_group_is_ours(group_id, and_exists=True) + + is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) + + if not is_user_in_group: + raise SynapseError(403, "User not in group") + + invited_users = yield self.store.get_invited_users_in_group(group_id) + + user_profiles = [] + + for user_id in invited_users: + user_profile = { + "user_id": user_id + } + try: + profile = yield self.profile_handler.get_profile_from_cache(user) + user_profile.update(profile) + except Exception as e: + pass + user_profiles.append(user_profile) + + defer.returnValue({ + "chunk": user_profiles, + "total_user_count_estimate": len(invited_users), + }) + + @defer.inlineCallbacks def get_rooms_in_group(self, group_id, requester_user_id): """Get the rooms in group as seen by requester_user_id diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 97a20f2b04..5263e769bb 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -219,6 +219,23 @@ class GroupsLocalHandler(object): defer.returnValue(res) + @defer.inlineCallbacks + def get_invited_users_in_group(self, group_id, requester_user_id): + """Get users invited to a group + """ + if self.is_mine_id(group_id): + res = yield self.groups_server_handler.get_invited_users_in_group( + group_id, requester_user_id + ) + defer.returnValue(res) + + group_server_name = get_domain_from_id(group_id) + + res = yield self.transport_client.get_users_in_group( + get_domain_from_id(group_id), group_id, requester_user_id, + ) + defer.returnValue(res) + @defer.inlineCallbacks def join_group(self, group_id, user_id, content): """Request to join a group diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 8f3ce15b02..4532112cfc 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -370,6 +370,26 @@ class GroupUsersServlet(RestServlet): defer.returnValue((200, result)) +class GroupInvitedUsersServlet(RestServlet): + """Get users invited to a group + """ + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/invited_users$") + + def __init__(self, hs): + super(GroupInvitedUsersServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_GET(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + result = yield self.groups_handler.get_invited_users_in_group(group_id, user_id) + + defer.returnValue((200, result)) + class GroupCreateServlet(RestServlet): """Create a group @@ -674,6 +694,7 @@ class GroupsForUserServlet(RestServlet): def register_servlets(hs, http_server): GroupServlet(hs).register(http_server) GroupSummaryServlet(hs).register(http_server) + GroupInvitedUsersServlet(hs).register(http_server) GroupUsersServlet(hs).register(http_server) GroupRoomServlet(hs).register(http_server) GroupCreateServlet(hs).register(http_server) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 3af372de59..9e63db5c6c 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -56,6 +56,18 @@ class GroupServerStore(SQLBaseStore): desc="get_users_in_group", ) + def get_invited_users_in_group(self, group_id): + # TODO: Pagination + + return self._simple_select_onecol( + table="group_invites", + keyvalues={ + "group_id": group_id, + }, + retcol="user_id", + desc="get_invited_users_in_group", + ) + def get_rooms_in_group(self, group_id, include_private=False): # TODO: Pagination From a3ac4f6b0ad9cce172678fa87c10a1100d16236f Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Mon, 16 Oct 2017 15:41:03 +0100 Subject: [PATCH 0292/1637] _create_rererouter for get_invited_users_in_group --- synapse/handlers/groups_local.py | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 5263e769bb..6699d0888f 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -68,6 +68,8 @@ class GroupsLocalHandler(object): update_group_profile = _create_rerouter("update_group_profile") get_rooms_in_group = _create_rerouter("get_rooms_in_group") + get_invited_users_in_group = _create_rerouter("get_invited_users_in_group") + add_room_to_group = _create_rerouter("add_room_to_group") remove_room_from_group = _create_rerouter("remove_room_from_group") @@ -219,23 +221,6 @@ class GroupsLocalHandler(object): defer.returnValue(res) - @defer.inlineCallbacks - def get_invited_users_in_group(self, group_id, requester_user_id): - """Get users invited to a group - """ - if self.is_mine_id(group_id): - res = yield self.groups_server_handler.get_invited_users_in_group( - group_id, requester_user_id - ) - defer.returnValue(res) - - group_server_name = get_domain_from_id(group_id) - - res = yield self.transport_client.get_users_in_group( - get_domain_from_id(group_id), group_id, requester_user_id, - ) - defer.returnValue(res) - @defer.inlineCallbacks def join_group(self, group_id, user_id, content): """Request to join a group From c43e8a97367e939cd1e0b7819095950a156c9f49 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Mon, 16 Oct 2017 15:50:39 +0100 Subject: [PATCH 0293/1637] Make it work. Warn about lack of user profile --- synapse/groups/groups_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index bfa46b7cb2..31fc711876 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -443,10 +443,10 @@ class GroupsServerHandler(object): "user_id": user_id } try: - profile = yield self.profile_handler.get_profile_from_cache(user) + profile = yield self.profile_handler.get_profile_from_cache(user_id) user_profile.update(profile) except Exception as e: - pass + logger.warn("Error getting profile for %s: %s", user_id, e) user_profiles.append(user_profile) defer.returnValue({ From 85f5674e44d695177cbff74e11b4ce6dac85d53a Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Mon, 16 Oct 2017 15:52:12 +0100 Subject: [PATCH 0294/1637] Delint --- synapse/federation/transport/server.py | 2 ++ synapse/groups/groups_server.py | 1 - synapse/rest/client/v2_alpha/groups.py | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 625a2fe27f..6a0bd8d222 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -720,6 +720,7 @@ class FederationGroupsUsersServlet(BaseFederationServlet): defer.returnValue((200, new_content)) + class FederationGroupsInvitedUsersServlet(BaseFederationServlet): """Get the users that have been invited to a group """ @@ -737,6 +738,7 @@ class FederationGroupsInvitedUsersServlet(BaseFederationServlet): defer.returnValue((200, new_content)) + class FederationGroupsInviteServlet(BaseFederationServlet): """Ask a group server to invite someone to the group """ diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 31fc711876..a3a500b9d6 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -454,7 +454,6 @@ class GroupsServerHandler(object): "total_user_count_estimate": len(invited_users), }) - @defer.inlineCallbacks def get_rooms_in_group(self, group_id, requester_user_id): """Get the rooms in group as seen by requester_user_id diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 4532112cfc..d11bccc1da 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -370,6 +370,7 @@ class GroupUsersServlet(RestServlet): defer.returnValue((200, result)) + class GroupInvitedUsersServlet(RestServlet): """Get users invited to a group """ From c05e6015cc522b838ab2db6bffd494b017cf8ec6 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 16 Oct 2017 17:57:27 +0100 Subject: [PATCH 0295/1637] Add config option to auto-join new users to rooms New users who register on the server will be dumped into all rooms in auto_join_rooms in the config. --- synapse/config/registration.py | 6 +++++ synapse/rest/client/v2_alpha/register.py | 34 ++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index f7e03c4cde..9e2a6d1ae5 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -41,6 +41,8 @@ class RegistrationConfig(Config): self.allow_guest_access and config.get("invite_3pid_guest", False) ) + self.auto_join_rooms = config.get("auto_join_rooms", []) + def default_config(self, **kwargs): registration_shared_secret = random_string_with_symbols(50) @@ -70,6 +72,10 @@ class RegistrationConfig(Config): - matrix.org - vector.im - riot.im + + # Users who register on this homeserver will automatically be joined to these rooms + #auto_join_rooms: + # - "#example:example.com" """ % locals() def add_arguments(self, parser): diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 1421c18152..d9a8cdbbb5 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -17,8 +17,10 @@ from twisted.internet import defer import synapse +import synapse.types from synapse.api.auth import get_access_token_from_request, has_access_token from synapse.api.constants import LoginType +from synapse.types import RoomID, RoomAlias from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, assert_params_in_request, parse_string @@ -170,6 +172,7 @@ class RegisterRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_handlers().registration_handler self.identity_handler = hs.get_handlers().identity_handler + self.room_member_handler = hs.get_handlers().room_member_handler self.device_handler = hs.get_device_handler() self.macaroon_gen = hs.get_macaroon_generator() @@ -340,6 +343,14 @@ class RegisterRestServlet(RestServlet): generate_token=False, ) + # auto-join the user to any rooms we're supposed to dump them into + fake_requester = synapse.types.create_requester(registered_user_id) + for r in self.hs.config.auto_join_rooms: + try: + yield self._join_user_to_room(fake_requester, r) + except Exception as e: + logger.error("Failed to join new user to %r: %r", r, e) + # remember that we've now registered that user account, and with # what user ID (since the user may not have specified) self.auth_handler.set_session_data( @@ -372,6 +383,29 @@ class RegisterRestServlet(RestServlet): def on_OPTIONS(self, _): return 200, {} + @defer.inlineCallbacks + def _join_user_to_room(self, requester, room_identifier): + room_id = None + if RoomID.is_valid(room_identifier): + room_id = room_identifier + elif RoomAlias.is_valid(room_identifier): + room_alias = RoomAlias.from_string(room_identifier) + room_id, remote_room_hosts = ( + yield self.room_member_handler.lookup_room_alias(room_alias) + ) + room_id = room_id.to_string() + else: + raise SynapseError(400, "%s was not legal room ID or room alias" % ( + room_identifier, + )) + + yield self.room_member_handler.update_membership( + requester=requester, + target=requester.user, + room_id=room_id, + action="join", + ) + @defer.inlineCallbacks def _do_appservice_registration(self, username, as_token, body): user_id = yield self.registration_handler.appservice_register( From a9c2e930ac455db13ce37a90b3ff0d93c0d1ea43 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 17 Oct 2017 10:13:13 +0100 Subject: [PATCH 0296/1637] pep8 --- synapse/config/registration.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 9e2a6d1ae5..ef917fc9f2 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -73,7 +73,8 @@ class RegistrationConfig(Config): - vector.im - riot.im - # Users who register on this homeserver will automatically be joined to these rooms + # Users who register on this homeserver will automatically be joined + # to these rooms #auto_join_rooms: # - "#example:example.com" """ % locals() From 33122c5a1b3642712546bc290d591077ce4cc847 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 17 Oct 2017 10:39:50 +0100 Subject: [PATCH 0297/1637] Fix test --- tests/rest/client/v2_alpha/test_register.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index b6173ab2ee..821c735528 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -47,6 +47,7 @@ class RegisterRestServletTestCase(unittest.TestCase): self.hs.get_auth_handler = Mock(return_value=self.auth_handler) self.hs.get_device_handler = Mock(return_value=self.device_handler) self.hs.config.enable_registration = True + self.hs.config.auto_join_rooms = [] # init the thing we're testing self.servlet = RegisterRestServlet(self.hs) From 9b714abf357fbe16482a729933aa8e139ed278ad Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 17 Oct 2017 10:43:36 +0100 Subject: [PATCH 0298/1637] Remove dead class This isn't used anywhere. --- tests/storage/event_injector.py | 76 --------------------------------- 1 file changed, 76 deletions(-) delete mode 100644 tests/storage/event_injector.py diff --git a/tests/storage/event_injector.py b/tests/storage/event_injector.py deleted file mode 100644 index 024ac15069..0000000000 --- a/tests/storage/event_injector.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from twisted.internet import defer - -from synapse.api.constants import EventTypes - - -class EventInjector: - def __init__(self, hs): - self.hs = hs - self.store = hs.get_datastore() - self.message_handler = hs.get_handlers().message_handler - self.event_builder_factory = hs.get_event_builder_factory() - - @defer.inlineCallbacks - def create_room(self, room, user): - builder = self.event_builder_factory.new({ - "type": EventTypes.Create, - "sender": user.to_string(), - "room_id": room.to_string(), - "content": {}, - }) - - event, context = yield self.message_handler._create_new_client_event( - builder - ) - - yield self.store.persist_event(event, context) - - @defer.inlineCallbacks - def inject_room_member(self, room, user, membership): - builder = self.event_builder_factory.new({ - "type": EventTypes.Member, - "sender": user.to_string(), - "state_key": user.to_string(), - "room_id": room.to_string(), - "content": {"membership": membership}, - }) - - event, context = yield self.message_handler._create_new_client_event( - builder - ) - - yield self.store.persist_event(event, context) - - defer.returnValue(event) - - @defer.inlineCallbacks - def inject_message(self, room, user, body): - builder = self.event_builder_factory.new({ - "type": EventTypes.Message, - "sender": user.to_string(), - "state_key": user.to_string(), - "room_id": room.to_string(), - "content": {"body": body, "msgtype": u"message"}, - }) - - event, context = yield self.message_handler._create_new_client_event( - builder - ) - - yield self.store.persist_event(event, context) From 5b5f35ccc0cdd74fe8c8902988da222a7b8e2a67 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 17 Oct 2017 10:52:31 +0100 Subject: [PATCH 0299/1637] Add some tests for make_deferred_yieldable --- tests/util/test_log_context.py | 38 ++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/util/test_log_context.py b/tests/util/test_log_context.py index 9ffe209c4d..e2f7765f49 100644 --- a/tests/util/test_log_context.py +++ b/tests/util/test_log_context.py @@ -94,3 +94,41 @@ class LoggingContextTestCase(unittest.TestCase): yield defer.succeed(None) return self._test_preserve_fn(nonblocking_function) + + @defer.inlineCallbacks + def test_make_deferred_yieldable(self): + # a function which retuns an incomplete deferred, but doesn't follow + # the synapse rules. + def blocking_function(): + d = defer.Deferred() + reactor.callLater(0, d.callback, None) + return d + + sentinel_context = LoggingContext.current_context() + + with LoggingContext() as context_one: + context_one.test_key = "one" + + d1 = logcontext.make_deferred_yieldable(blocking_function()) + # make sure that the context was reset by make_deferred_yieldable + self.assertIs(LoggingContext.current_context(), sentinel_context) + + yield d1 + + # now it should be restored + self._check_test_key("one") + + @defer.inlineCallbacks + def test_make_deferred_yieldable_on_non_deferred(self): + """Check that make_deferred_yieldable does the right thing when its + argument isn't actually a deferred""" + + with LoggingContext() as context_one: + context_one.test_key = "one" + + d1 = logcontext.make_deferred_yieldable("bum") + self._check_test_key("one") + + r = yield d1 + self.assertEqual(r, "bum") + self._check_test_key("one") From a6ad8148b9d3058e316589a952744ecb15aa2057 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 17 Oct 2017 10:53:34 +0100 Subject: [PATCH 0300/1637] Fix name of test_logcontext The file under test is logcontext.py, not log_context.py --- tests/util/{test_log_context.py => test_logcontext.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/util/{test_log_context.py => test_logcontext.py} (100%) diff --git a/tests/util/test_log_context.py b/tests/util/test_logcontext.py similarity index 100% rename from tests/util/test_log_context.py rename to tests/util/test_logcontext.py From 2e9f5ea31a9c66eceb6276c5241cc6537cb0ae4c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 17 Oct 2017 10:59:30 +0100 Subject: [PATCH 0301/1637] Fix logcontext handling for persist_events * don't use preserve_context_over_deferred, which is known broken. * remove a redundant preserve_fn. * add/improve some comments --- synapse/storage/events.py | 24 +++++++++++++++++------- synapse/util/async.py | 5 +++++ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 4f0b43c36d..637640ec2a 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -21,7 +21,7 @@ from synapse.events.utils import prune_event from synapse.util.async import ObservableDeferred from synapse.util.logcontext import ( - preserve_fn, PreserveLoggingContext, preserve_context_over_deferred + preserve_fn, PreserveLoggingContext, make_deferred_yieldable ) from synapse.util.logutils import log_function from synapse.util.metrics import Measure @@ -88,13 +88,23 @@ class _EventPeristenceQueue(object): def add_to_queue(self, room_id, events_and_contexts, backfilled): """Add events to the queue, with the given persist_event options. + NB: due to the normal usage pattern of this method, it does *not* + follow the synapse logcontext rules, and leaves the logcontext in + place whether or not the returned deferred is ready. + Args: room_id (str): events_and_contexts (list[(EventBase, EventContext)]): backfilled (bool): + + Returns: + defer.Deferred: a deferred which will resolve once the events are + persisted. Runs its callbacks *without* a logcontext. """ queue = self._event_persist_queues.setdefault(room_id, deque()) if queue: + # if the last item in the queue has the same `backfilled` setting, + # we can just add these new events to that item. end_item = queue[-1] if end_item.backfilled == backfilled: end_item.events_and_contexts.extend(events_and_contexts) @@ -113,11 +123,11 @@ class _EventPeristenceQueue(object): def handle_queue(self, room_id, per_item_callback): """Attempts to handle the queue for a room if not already being handled. - The given callback will be invoked with for each item in the queue,1 + The given callback will be invoked with for each item in the queue, of type _EventPersistQueueItem. The per_item_callback will continuously be called with new items, unless the queue becomnes empty. The return value of the function will be given to the deferreds waiting on the item, - exceptions will be passed to the deferres as well. + exceptions will be passed to the deferreds as well. This function should therefore be called whenever anything is added to the queue. @@ -233,7 +243,7 @@ class EventsStore(SQLBaseStore): deferreds = [] for room_id, evs_ctxs in partitioned.iteritems(): - d = preserve_fn(self._event_persist_queue.add_to_queue)( + d = self._event_persist_queue.add_to_queue( room_id, evs_ctxs, backfilled=backfilled, ) @@ -242,7 +252,7 @@ class EventsStore(SQLBaseStore): for room_id in partitioned: self._maybe_start_persisting(room_id) - return preserve_context_over_deferred( + return make_deferred_yieldable( defer.gatherResults(deferreds, consumeErrors=True) ) @@ -267,7 +277,7 @@ class EventsStore(SQLBaseStore): self._maybe_start_persisting(event.room_id) - yield preserve_context_over_deferred(deferred) + yield make_deferred_yieldable(deferred) max_persisted_id = yield self._stream_id_gen.get_current_token() defer.returnValue((event.internal_metadata.stream_ordering, max_persisted_id)) @@ -1526,7 +1536,7 @@ class EventsStore(SQLBaseStore): if not allow_rejected: rows[:] = [r for r in rows if not r["rejects"]] - res = yield preserve_context_over_deferred(defer.gatherResults( + res = yield make_deferred_yieldable(defer.gatherResults( [ preserve_fn(self._get_event_from_row)( row["internal_metadata"], row["json"], row["redacts"], diff --git a/synapse/util/async.py b/synapse/util/async.py index 0fd5b42523..a0a9039475 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -53,6 +53,11 @@ class ObservableDeferred(object): Cancelling or otherwise resolving an observer will not affect the original ObservableDeferred. + + NB that it does not attempt to do anything with logcontexts; in general + you should probably make_deferred_yieldable the deferreds + returned by `observe`, and ensure that the original deferred runs its + callbacks in the sentinel logcontext. """ __slots__ = ["_deferred", "_observers", "_result"] From a6245478c82d8bd2c9abea34b9ec4a94ccc5ed09 Mon Sep 17 00:00:00 2001 From: Krombel Date: Tue, 17 Oct 2017 12:45:33 +0200 Subject: [PATCH 0302/1637] fix thumbnailing (#2548) in commit 0e28281a the code for thumbnailing got refactored and the renaming of this variables was not done correctly. Signed-Off-by: Matthias Kesler --- synapse/rest/media/v1/media_repository.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 515b3d3e74..057c925b7b 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -490,12 +490,12 @@ class MediaRepository(object): if t_type == "crop": t_byte_source = yield make_deferred_yieldable(threads.deferToThread( thumbnailer.crop, - r_width, r_height, t_type, + t_width, t_height, t_type, )) else: t_byte_source = yield make_deferred_yieldable(threads.deferToThread( thumbnailer.scale, - r_width, r_height, t_type, + t_width, t_height, t_type, )) if not t_byte_source: From 7216c76654bbb57bc0ebc27498de7eda247f8ffb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 17 Oct 2017 14:46:17 +0100 Subject: [PATCH 0303/1637] Improve error handling for missing files (#2551) `os.path.exists` doesn't allow us to distinguish between permissions errors and the path actually not existing, which repeatedly confuses people. It also means that we try to overwrite existing key files, which is super-confusing. (cf issues #2455, #2379). Use os.stat instead. Also, don't recomemnd the the use of --generate-config, which screws everything up if you're using debian (cf #2455). --- synapse/config/_base.py | 36 ++++++++++++++++++++++++++---------- synapse/config/key.py | 8 ++++---- synapse/config/tls.py | 6 +++--- 3 files changed, 33 insertions(+), 17 deletions(-) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 1ab5593c6e..fa105bce72 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -81,22 +81,38 @@ class Config(object): def abspath(file_path): return os.path.abspath(file_path) if file_path else file_path + @classmethod + def path_exists(cls, file_path): + """Check if a file exists + + Unlike os.path.exists, this throws an exception if there is an error + checking if the file exists (for example, if there is a perms error on + the parent dir). + + Returns: + bool: True if the file exists; False if not. + """ + try: + os.stat(file_path) + return True + except OSError as e: + if e.errno != errno.ENOENT: + raise e + return False + @classmethod def check_file(cls, file_path, config_name): if file_path is None: raise ConfigError( "Missing config for %s." - " You must specify a path for the config file. You can " - "do this with the -c or --config-path option. " - "Adding --generate-config along with --server-name " - " will generate a config file at the given path." % (config_name,) ) - if not os.path.exists(file_path): + try: + os.stat(file_path) + except OSError as e: raise ConfigError( - "File %s config for %s doesn't exist." - " Try running again with --generate-config" - % (file_path, config_name,) + "Error accessing file '%s' (config for %s): %s" + % (file_path, config_name, e.strerror) ) return cls.abspath(file_path) @@ -248,7 +264,7 @@ class Config(object): " -c CONFIG-FILE\"" ) (config_path,) = config_files - if not os.path.exists(config_path): + if not cls.path_exists(config_path): if config_args.keys_directory: config_dir_path = config_args.keys_directory else: @@ -261,7 +277,7 @@ class Config(object): "Must specify a server_name to a generate config for." " Pass -H server.name." ) - if not os.path.exists(config_dir_path): + if not cls.path_exists(config_dir_path): os.makedirs(config_dir_path) with open(config_path, "wb") as config_file: config_bytes, config = obj.generate_config( diff --git a/synapse/config/key.py b/synapse/config/key.py index 6ee643793e..4b8fc063d0 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -118,10 +118,9 @@ class KeyConfig(Config): signing_keys = self.read_file(signing_key_path, "signing_key") try: return read_signing_keys(signing_keys.splitlines(True)) - except Exception: + except Exception as e: raise ConfigError( - "Error reading signing_key." - " Try running again with --generate-config" + "Error reading signing_key: %s" % (str(e)) ) def read_old_signing_keys(self, old_signing_keys): @@ -141,7 +140,8 @@ class KeyConfig(Config): def generate_files(self, config): signing_key_path = config["signing_key_path"] - if not os.path.exists(signing_key_path): + + if not self.path_exists(signing_key_path): with open(signing_key_path, "w") as signing_key_file: key_id = "a_" + random_string(4) write_signing_keys( diff --git a/synapse/config/tls.py b/synapse/config/tls.py index e081840a83..247f18f454 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -126,7 +126,7 @@ class TlsConfig(Config): tls_private_key_path = config["tls_private_key_path"] tls_dh_params_path = config["tls_dh_params_path"] - if not os.path.exists(tls_private_key_path): + if not self.path_exists(tls_private_key_path): with open(tls_private_key_path, "w") as private_key_file: tls_private_key = crypto.PKey() tls_private_key.generate_key(crypto.TYPE_RSA, 2048) @@ -141,7 +141,7 @@ class TlsConfig(Config): crypto.FILETYPE_PEM, private_key_pem ) - if not os.path.exists(tls_certificate_path): + if not self.path_exists(tls_certificate_path): with open(tls_certificate_path, "w") as certificate_file: cert = crypto.X509() subject = cert.get_subject() @@ -159,7 +159,7 @@ class TlsConfig(Config): certificate_file.write(cert_pem) - if not os.path.exists(tls_dh_params_path): + if not self.path_exists(tls_dh_params_path): if GENERATE_DH_PARAMS: subprocess.check_call([ "openssl", "dhparam", From c2bd177ea0e58d9831167273faa9bf9155466513 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 17 Oct 2017 17:05:42 +0100 Subject: [PATCH 0304/1637] Fix 500 error when fields missing from power_levels event If the users or events keys were missing from a power_levels event, then we would throw 500s when trying to auth them. --- synapse/event_auth.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 4096c606f1..9e746a28bf 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -470,14 +470,14 @@ def _check_power_levels(event, auth_events): ("invite", None), ] - old_list = current_state.content.get("users") + old_list = current_state.content.get("users", {}) for user in set(old_list.keys() + user_list.keys()): levels_to_check.append( (user, "users") ) - old_list = current_state.content.get("events") - new_list = event.content.get("events") + old_list = current_state.content.get("events", {}) + new_list = event.content.get("events", {}) for ev_id in set(old_list.keys() + new_list.keys()): levels_to_check.append( (ev_id, "events") From 74f99f227cb700e41fc6c309398c0e5a6a42439a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 17 Oct 2017 19:46:56 +0100 Subject: [PATCH 0305/1637] Doc some more dynamic Homeserver methods --- synapse/server.pyi | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/synapse/server.pyi b/synapse/server.pyi index 9570df5537..e8c0386b7f 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -1,4 +1,6 @@ import synapse.api.auth +import synapse.federation.transaction_queue +import synapse.federation.transport.client import synapse.handlers import synapse.handlers.auth import synapse.handlers.device @@ -27,3 +29,9 @@ class HomeServer(object): def get_state_handler(self) -> synapse.state.StateHandler: pass + + def get_federation_sender(self) -> synapse.federation.transaction_queue.TransactionQueue: + pass + + def get_federation_transport_client(self) -> synapse.federation.transport.client.TransportLayerClient: + pass From 582bd19ee9de4850010b58e0bb0e2a93f01b2f44 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 17 Oct 2017 20:46:55 +0100 Subject: [PATCH 0306/1637] Fix 500 error when we get an error handling a PDU FederationServer doesn't have a send_failure (and nor does its subclass, ReplicationLayer), so this was failing. I'm not really sure what the idea behind send_failure is, given (a) we don't do anything at the other end with it except log it, and (b) we also send back the failure via the transaction response. I suspect there's a whole lot of dead code around it, but for now I'm just removing the broken bit. --- synapse/federation/federation_server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index a8034bddc6..e15228e70b 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -192,7 +192,6 @@ class FederationServer(FederationBase): pdu_results[event_id] = {} except FederationError as e: logger.warn("Error handling PDU %s: %s", event_id, e) - self.send_failure(e, transaction.origin) pdu_results[event_id] = {"error": str(e)} except Exception as e: pdu_results[event_id] = {"error": str(e)} From 161a862ffb250b5387f1a2c57e07e999ba00571e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 10:17:43 +0100 Subject: [PATCH 0307/1637] Fix typo --- synapse/groups/attestations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 5ef7a12cb7..02e2e17589 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -126,7 +126,7 @@ class GroupAttestionRenewer(object): ) @defer.inlineCallbacks - def _renew_attestation(self, group_id, user_id): + def _renew_attestation(group_id, user_id): attestation = self.attestations.create_attestation(group_id, user_id) if self.hs.is_mine_id(group_id): From bd5718d0ad2a9380ae292507a1022a230f8b2011 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 10:27:18 +0100 Subject: [PATCH 0308/1637] Fix typo in thumbnail generation --- synapse/rest/media/v1/media_repository.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 057c925b7b..6b50b45b1f 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -487,16 +487,19 @@ class MediaRepository(object): ) # Generate the thumbnail - if t_type == "crop": + if t_method == "crop": t_byte_source = yield make_deferred_yieldable(threads.deferToThread( thumbnailer.crop, t_width, t_height, t_type, )) - else: + elif t_method == "scale": t_byte_source = yield make_deferred_yieldable(threads.deferToThread( thumbnailer.scale, t_width, t_height, t_type, )) + else: + logger.error("Unrecognized method: %r", t_method) + continue if not t_byte_source: continue From 9ab859f27b177bbcd4fe0b3eb1bef0ce649ea41e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 10:55:44 +0100 Subject: [PATCH 0309/1637] Fix typo in group attestation handling --- synapse/federation/transport/server.py | 2 +- synapse/groups/attestations.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 6a0bd8d222..09b97138c3 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -835,7 +835,7 @@ class FederationGroupsRenewAttestaionServlet(BaseFederationServlet): def on_POST(self, origin, content, query, group_id, user_id): # We don't need to check auth here as we check the attestation signatures - new_content = yield self.handler.on_renew_group_attestation( + new_content = yield self.handler.on_renew_attestation( origin, content, group_id, user_id ) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 02e2e17589..b751cf5e43 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -90,6 +90,7 @@ class GroupAttestionRenewer(object): self.assestations = hs.get_groups_attestation_signing() self.transport_client = hs.get_federation_transport_client() self.is_mine_id = hs.is_mine_id + self.attestations = hs.get_groups_attestation_signing() self._renew_attestations_loop = self.clock.looping_call( self._renew_attestations, 30 * 60 * 1000, @@ -129,7 +130,7 @@ class GroupAttestionRenewer(object): def _renew_attestation(group_id, user_id): attestation = self.attestations.create_attestation(group_id, user_id) - if self.hs.is_mine_id(group_id): + if self.is_mine_id(group_id): destination = get_domain_from_id(user_id) else: destination = get_domain_from_id(group_id) From 011d03a0f6519676998fd32e6b553f7a043e40a3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 11:22:48 +0100 Subject: [PATCH 0310/1637] Fix typo --- synapse/federation/transport/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 09b97138c3..f0778c65c5 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -836,7 +836,7 @@ class FederationGroupsRenewAttestaionServlet(BaseFederationServlet): # We don't need to check auth here as we check the attestation signatures new_content = yield self.handler.on_renew_attestation( - origin, content, group_id, user_id + group_id, user_id, content ) defer.returnValue((200, new_content)) From 513c23bfd90b9386f59dea96e5f1ccc609da1c03 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 12:01:01 +0100 Subject: [PATCH 0311/1637] Enforce sensible group IDs --- synapse/groups/groups_server.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index a3a500b9d6..e9b44c0971 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -16,10 +16,11 @@ from twisted.internet import defer from synapse.api.errors import SynapseError -from synapse.types import UserID, get_domain_from_id, RoomID +from synapse.types import UserID, get_domain_from_id, RoomID, GroupID import logging +import urllib logger = logging.getLogger(__name__) @@ -697,6 +698,8 @@ class GroupsServerHandler(object): def create_group(self, group_id, user_id, content): group = yield self.check_group_is_ours(group_id) + _validate_group_id(group_id) + logger.info("Attempting to create group with ID: %r", group_id) if group: raise SynapseError(400, "Group already exists") @@ -773,3 +776,18 @@ def _parse_visibility_from_contents(content): is_public = True return is_public + + +def _validate_group_id(group_id): + """Validates the group ID is valid for creation on this home server + """ + localpart = GroupID.from_string(group_id).localpart + + if localpart.lower() != localpart: + raise SynapseError(400, "Group ID must be lower case") + + if urllib.quote(localpart.encode('utf-8')) != localpart: + raise SynapseError( + 400, + "Group ID can only contain characters a-z, 0-9, or '_-./'", + ) From 29bafe2f7e82e48b9aad03fb23a790b3719faf78 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 12:13:44 +0100 Subject: [PATCH 0312/1637] Add config to enable group creation --- synapse/config/homeserver.py | 3 ++- synapse/groups/groups_server.py | 12 ++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 3f9d9d5f8b..05e242aef6 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -35,6 +35,7 @@ from .emailconfig import EmailConfig from .workers import WorkerConfig from .push import PushConfig from .spam_checker import SpamCheckerConfig +from .groups import GroupsConfig class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, @@ -43,7 +44,7 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, AppServiceConfig, KeyConfig, SAML2Config, CasConfig, JWTConfig, PasswordConfig, EmailConfig, WorkerConfig, PasswordAuthProviderConfig, PushConfig, - SpamCheckerConfig,): + SpamCheckerConfig, GroupsConfig,): pass diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index e9b44c0971..c19d733d76 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -704,10 +704,18 @@ class GroupsServerHandler(object): if group: raise SynapseError(400, "Group already exists") - # TODO: Add config to enforce that only server admins can create rooms is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) if not is_admin: - raise SynapseError(403, "Only server admin can create group on this server") + if not self.hs.config.enable_group_creation: + raise SynapseError(403, "Only server admin can create group on this server") + localpart = GroupID.from_string(group_id).localpart + if not localpart.startswith(self.hs.config.group_creation_prefix): + raise SynapseError( + 400, + "Can only create groups with prefix %r on this server" % ( + self.hs.config.group_creation_prefix, + ), + ) profile = content.get("profile", {}) name = profile.get("name") From ffd3f1a7838eb12a30abe40831275f360b528d1f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 12:17:30 +0100 Subject: [PATCH 0313/1637] Add missing file... --- synapse/config/groups.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 synapse/config/groups.py diff --git a/synapse/config/groups.py b/synapse/config/groups.py new file mode 100644 index 0000000000..7683a37534 --- /dev/null +++ b/synapse/config/groups.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util.module_loader import load_module + +from ._base import Config + +from distutils.util import strtobool + + +class GroupsConfig(Config): + def read_config(self, config): + self.enable_group_creation = config.get("enable_group_creation", False) + self.group_creation_prefix = config.get("group_creation_prefix", "") + + def default_config(self, **kwargs): + return """\ + # Whether to allow non server admins to create groups on this server + enable_group_creation: false + + # If enabled, non server admins can only create groups with local parts + # starting with this prefix + # group_creation_prefix: "unofficial/" + """ From c7d46510d7a700fda9730d90b010e1e1e596c58e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 13:36:06 +0100 Subject: [PATCH 0314/1637] Flake8 --- synapse/config/groups.py | 4 ---- synapse/groups/groups_server.py | 6 ++++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/synapse/config/groups.py b/synapse/config/groups.py index 7683a37534..997fa2881f 100644 --- a/synapse/config/groups.py +++ b/synapse/config/groups.py @@ -13,12 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.module_loader import load_module - from ._base import Config -from distutils.util import strtobool - class GroupsConfig(Config): def read_config(self, config): diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index c19d733d76..fc4edb7f04 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -707,7 +707,9 @@ class GroupsServerHandler(object): is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) if not is_admin: if not self.hs.config.enable_group_creation: - raise SynapseError(403, "Only server admin can create group on this server") + raise SynapseError( + 403, "Only server admin can create group on this server", + ) localpart = GroupID.from_string(group_id).localpart if not localpart.startswith(self.hs.config.group_creation_prefix): raise SynapseError( @@ -715,7 +717,7 @@ class GroupsServerHandler(object): "Can only create groups with prefix %r on this server" % ( self.hs.config.group_creation_prefix, ), - ) + ) profile = content.get("profile", {}) name = profile.get("name") From 0ef0aeceacb2c1298561ab52e68a046cc78028de Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 11:49:14 +0100 Subject: [PATCH 0315/1637] Bump version and changelog --- CHANGES.rst | 35 +++++++++++++++++++++++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index f1529e79bd..aaa4a2c219 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,38 @@ +Changes in synapse v0.24.0-rc1 (2017-10-19) +=========================================== + +Features: + +* Add Group Server (PR #2352, #2363, #2374, #2377, #2378, #2382, #2410, #2426, + #2430, #2454, #2471, #2472, #2544) +* Add support for channel notifications (PR #2501) +* Add basic implementation of backup media store (PR #2538) +* Add config option to auto-join new users to rooms (PR #2545) + + +Changes: + +* Make the spam checker a module (PR #2474) +* Delete expired url cache data (PR #2478) +* Ignore incoming events for rooms that we have left (PR #2490) +* Allow spam checker to reject invites too (PR #2492) +* Add room creation checks to spam checker (PR #2495) +* Spam checking: add the invitee to user_may_invite (PR #2502) +* Process events from federation for different rooms in parallel (PR #2520) +* Allow error strings from spam checker (PR #2531) + + +Bug fixes: + +* Fix handling SERVFAILs when doing AAAA lookups for federation (PR #2477) +* Fix incompatibility with newer versions of ujson (PR #2483) Thanks to + @jeremycline! +* Fix notification keywords that start/end with non-word chars (PR #2500) +* Fix stack overflow and logcontexts from linearizer (PR #2532) +* Fix 500 error when fields missing from power_levels event (PR #2552) +* Fix 500 error when we get an error handling a PDU (PR #2553) + + Changes in synapse v0.23.1 (2017-10-02) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index bee4aba625..c867d1cfd8 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.23.1" +__version__ = "0.24.0" From d6237859f6cc220f7a1e0b1d63e60421d14eb07f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 14:19:52 +0100 Subject: [PATCH 0316/1637] Update changelog --- CHANGES.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.rst b/CHANGES.rst index aaa4a2c219..85830d832e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -20,6 +20,7 @@ Changes: * Spam checking: add the invitee to user_may_invite (PR #2502) * Process events from federation for different rooms in parallel (PR #2520) * Allow error strings from spam checker (PR #2531) +* Improve error handling for missing files in config (PR #2551) Bug fixes: From 6070647774d335d36de640aefce7a291a111d908 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2017 16:40:20 +0100 Subject: [PATCH 0317/1637] Correctly bump version --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index c867d1cfd8..d2480271f7 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.24.0" +__version__ = "0.24.0-rc1" From 631d7b87b50b7263a2f9a3f89fc196272011bf37 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 20 Oct 2017 16:33:15 +0100 Subject: [PATCH 0318/1637] Remove pointless create() method It just calls the constructor, so we may as well kill it rather than having random codepaths. --- synapse/events/builder.py | 2 +- synapse/handlers/auth.py | 2 +- synapse/handlers/room.py | 4 ++-- synapse/rest/client/v1/login.py | 6 +++--- synapse/rest/client/v2_alpha/groups.py | 2 +- synapse/types.py | 4 ---- 6 files changed, 8 insertions(+), 12 deletions(-) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 365fd96bd2..13fbba68c0 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -55,7 +55,7 @@ class EventBuilderFactory(object): local_part = str(int(self.clock.time())) + i + random_string(5) - e_id = EventID.create(local_part, self.hostname) + e_id = EventID(local_part, self.hostname) return e_id.to_string() diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index b00446bec0..9cef9d184b 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -267,7 +267,7 @@ class AuthHandler(BaseHandler): user_id = authdict["user"] password = authdict["password"] if not user_id.startswith('@'): - user_id = UserID.create(user_id, self.hs.hostname).to_string() + user_id = UserID(user_id, self.hs.hostname).to_string() return self._check_password(user_id, password) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 535ba9517c..e945bd35bc 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -91,7 +91,7 @@ class RoomCreationHandler(BaseHandler): if wchar in config["room_alias_name"]: raise SynapseError(400, "Invalid characters in room alias") - room_alias = RoomAlias.create( + room_alias = RoomAlias( config["room_alias_name"], self.hs.hostname, ) @@ -123,7 +123,7 @@ class RoomCreationHandler(BaseHandler): while attempts < 5: try: random_string = stringutils.random_string(18) - gen_room_id = RoomID.create( + gen_room_id = RoomID( random_string, self.hs.hostname, ) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index a43410fb37..9536e8ade6 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -211,7 +211,7 @@ class LoginRestServlet(ClientV1RestServlet): user_id = identifier["user"] if not user_id.startswith('@'): - user_id = UserID.create( + user_id = UserID( user_id, self.hs.hostname ).to_string() @@ -278,7 +278,7 @@ class LoginRestServlet(ClientV1RestServlet): if user is None: raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED) - user_id = UserID.create(user, self.hs.hostname).to_string() + user_id = UserID(user, self.hs.hostname).to_string() auth_handler = self.auth_handler registered_user_id = yield auth_handler.check_user_exists(user_id) if registered_user_id: @@ -444,7 +444,7 @@ class CasTicketServlet(ClientV1RestServlet): if required_value != actual_value: raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED) - user_id = UserID.create(user, self.hs.hostname).to_string() + user_id = UserID(user, self.hs.hostname).to_string() auth_handler = self.auth_handler registered_user_id = yield auth_handler.check_user_exists(user_id) if not registered_user_id: diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index d11bccc1da..100f47ca9e 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -412,7 +412,7 @@ class GroupCreateServlet(RestServlet): # TODO: Create group on remote server content = parse_json_object_from_request(request) localpart = content.pop("localpart") - group_id = GroupID.create(localpart, self.server_name).to_string() + group_id = GroupID(localpart, self.server_name).to_string() result = yield self.groups_handler.create_group(group_id, user_id, content) diff --git a/synapse/types.py b/synapse/types.py index 37d5fa7f9f..1aa426fcbb 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -131,10 +131,6 @@ class DomainSpecificString( __str__ = to_string - @classmethod - def create(cls, localpart, domain,): - return cls(localpart=localpart, domain=domain) - class UserID(DomainSpecificString): """Structure representing a user ID.""" From 58fbbe0f1db78d9dc91a319874dc8409e77cbf4c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 20 Oct 2017 23:37:22 +0100 Subject: [PATCH 0319/1637] Disallow capital letters in userids Factor out a common function for checking user ids and group ids, which forbids capitals. --- synapse/groups/groups_server.py | 17 ++++++----------- synapse/handlers/register.py | 10 ++++------ synapse/types.py | 16 ++++++++++++++++ 3 files changed, 26 insertions(+), 17 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index fc4edb7f04..c359bfa72b 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -13,14 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - -from synapse.api.errors import SynapseError -from synapse.types import UserID, get_domain_from_id, RoomID, GroupID - - import logging -import urllib + +from synapse import types +from synapse.api.errors import SynapseError +from synapse.types import GroupID, RoomID, UserID, get_domain_from_id +from twisted.internet import defer logger = logging.getLogger(__name__) @@ -793,10 +791,7 @@ def _validate_group_id(group_id): """ localpart = GroupID.from_string(group_id).localpart - if localpart.lower() != localpart: - raise SynapseError(400, "Group ID must be lower case") - - if urllib.quote(localpart.encode('utf-8')) != localpart: + if types.contains_invalid_mxid_characters(localpart): raise SynapseError( 400, "Group ID can only contain characters a-z, 0-9, or '_-./'", diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 560fb36254..c7c091f43e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -15,7 +15,6 @@ """Contains functions for registering clients.""" import logging -import urllib from twisted.internet import defer @@ -23,6 +22,7 @@ from synapse.api.errors import ( AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError ) from synapse.http.client import CaptchaServerHttpClient +from synapse import types from synapse.types import UserID from synapse.util.async import run_on_reactor from ._base import BaseHandler @@ -46,9 +46,7 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def check_username(self, localpart, guest_access_token=None, assigned_user_id=None): - yield run_on_reactor() - - if urllib.quote(localpart.encode('utf-8')) != localpart: + if types.contains_invalid_mxid_characters(localpart): raise SynapseError( 400, "User ID can only contain characters a-z, 0-9, or '_-./'", @@ -81,7 +79,7 @@ class RegistrationHandler(BaseHandler): "A different user ID has already been registered for this session", ) - yield self.check_user_id_not_appservice_exclusive(user_id) + self.check_user_id_not_appservice_exclusive(user_id) users = yield self.store.get_users_by_id_case_insensitive(user_id) if users: @@ -254,7 +252,7 @@ class RegistrationHandler(BaseHandler): """ Registers email_id as SAML2 Based Auth. """ - if urllib.quote(localpart) != localpart: + if types.contains_invalid_mxid_characters(localpart): raise SynapseError( 400, "User ID must only contain characters which do not" diff --git a/synapse/types.py b/synapse/types.py index 37d5fa7f9f..efa721273d 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import string from synapse.api.errors import SynapseError @@ -161,6 +162,21 @@ class GroupID(DomainSpecificString): SIGIL = "+" +mxid_localpart_allowed_characters = set("_-./" + string.ascii_lowercase + string.digits) + + +def contains_invalid_mxid_characters(localpart): + """Check for characters not allowed in an mxid or groupid localpart + + Args: + localpart (basestring): the localpart to be checked + + Returns: + bool: True if there are any naughty characters + """ + return any(c not in mxid_localpart_allowed_characters for c in localpart) + + class StreamToken( namedtuple("Token", ( "room_key", From 29812c628ba924448719f5d2cfe7e05a5b1d0f45 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 20 Oct 2017 23:42:53 +0100 Subject: [PATCH 0320/1637] Allow = in mxids and groupids ... because the spec says we should. --- synapse/groups/groups_server.py | 2 +- synapse/handlers/register.py | 5 ++--- synapse/types.py | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index c359bfa72b..3599bfe9cf 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -794,5 +794,5 @@ def _validate_group_id(group_id): if types.contains_invalid_mxid_characters(localpart): raise SynapseError( 400, - "Group ID can only contain characters a-z, 0-9, or '_-./'", + "Group ID can only contain characters a-z, 0-9, or '=_-./'", ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c7c091f43e..52aa9964d9 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -49,7 +49,7 @@ class RegistrationHandler(BaseHandler): if types.contains_invalid_mxid_characters(localpart): raise SynapseError( 400, - "User ID can only contain characters a-z, 0-9, or '_-./'", + "User ID can only contain characters a-z, 0-9, or '=_-./'", Codes.INVALID_USERNAME ) @@ -255,8 +255,7 @@ class RegistrationHandler(BaseHandler): if types.contains_invalid_mxid_characters(localpart): raise SynapseError( 400, - "User ID must only contain characters which do not" - " require URL encoding." + "User ID can only contain characters a-z, 0-9, or '=_-./'", ) user = UserID(localpart, self.hs.hostname) user_id = user.to_string() diff --git a/synapse/types.py b/synapse/types.py index efa721273d..88eb818de4 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -162,7 +162,7 @@ class GroupID(DomainSpecificString): SIGIL = "+" -mxid_localpart_allowed_characters = set("_-./" + string.ascii_lowercase + string.digits) +mxid_localpart_allowed_characters = set("_-./=" + string.ascii_lowercase + string.digits) def contains_invalid_mxid_characters(localpart): From 1135193dfde2a844d38dab4bd50a69658891abcb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 20 Oct 2017 23:51:07 +0100 Subject: [PATCH 0321/1637] Validate group ids when parsing May as well do it whenever we parse a Group ID. We check the sigil and basic structure here so it makes sense to check the grammar in the same place. --- synapse/groups/groups_server.py | 21 +++++---------------- synapse/types.py | 17 +++++++++++++++++ tests/test_types.py | 24 +++++++++++++++++++++++- 3 files changed, 45 insertions(+), 17 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 3599bfe9cf..23beb3187e 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -15,7 +15,6 @@ import logging -from synapse import types from synapse.api.errors import SynapseError from synapse.types import GroupID, RoomID, UserID, get_domain_from_id from twisted.internet import defer @@ -696,9 +695,11 @@ class GroupsServerHandler(object): def create_group(self, group_id, user_id, content): group = yield self.check_group_is_ours(group_id) - _validate_group_id(group_id) - logger.info("Attempting to create group with ID: %r", group_id) + + # parsing the id into a GroupID validates it. + group_id_obj = GroupID.from_string(group_id) + if group: raise SynapseError(400, "Group already exists") @@ -708,7 +709,7 @@ class GroupsServerHandler(object): raise SynapseError( 403, "Only server admin can create group on this server", ) - localpart = GroupID.from_string(group_id).localpart + localpart = group_id_obj.localpart if not localpart.startswith(self.hs.config.group_creation_prefix): raise SynapseError( 400, @@ -784,15 +785,3 @@ def _parse_visibility_from_contents(content): is_public = True return is_public - - -def _validate_group_id(group_id): - """Validates the group ID is valid for creation on this home server - """ - localpart = GroupID.from_string(group_id).localpart - - if types.contains_invalid_mxid_characters(localpart): - raise SynapseError( - 400, - "Group ID can only contain characters a-z, 0-9, or '=_-./'", - ) diff --git a/synapse/types.py b/synapse/types.py index 88eb818de4..5e3d1fc0b2 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -161,6 +161,23 @@ class GroupID(DomainSpecificString): """Structure representing a group ID.""" SIGIL = "+" + @classmethod + def from_string(cls, s): + group_id = super(GroupID, cls).from_string(s) + if not group_id.localpart: + raise SynapseError( + 400, + "Group ID cannot be empty", + ) + + if contains_invalid_mxid_characters(group_id.localpart): + raise SynapseError( + 400, + "Group ID can only contain characters a-z, 0-9, or '=_-./'", + ) + + return group_id + mxid_localpart_allowed_characters = set("_-./=" + string.ascii_lowercase + string.digits) diff --git a/tests/test_types.py b/tests/test_types.py index 24d61dbe54..115def2287 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -17,7 +17,7 @@ from tests import unittest from synapse.api.errors import SynapseError from synapse.server import HomeServer -from synapse.types import UserID, RoomAlias +from synapse.types import UserID, RoomAlias, GroupID mock_homeserver = HomeServer(hostname="my.domain") @@ -60,3 +60,25 @@ class RoomAliasTestCase(unittest.TestCase): room = RoomAlias("channel", "my.domain") self.assertEquals(room.to_string(), "#channel:my.domain") + + +class GroupIDTestCase(unittest.TestCase): + def test_parse(self): + group_id = GroupID.from_string("+group/=_-.123:my.domain") + self.assertEqual("group/=_-.123", group_id.localpart) + self.assertEqual("my.domain", group_id.domain) + + def test_validate(self): + bad_ids = [ + "$badsigil:domain", + "+:empty", + ] + [ + "+group" + c + ":domain" for c in "A%?æ£" + ] + for id_string in bad_ids: + try: + GroupID.from_string(id_string) + self.fail("Parsing '%s' should raise exception" % id_string) + except SynapseError as exc: + self.assertEqual(400, exc.code) + self.assertEqual("M_UNKNOWN", exc.errcode) From 13e16cf3025136e17ad4a7a06fafd683b3aa0456 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 23 Oct 2017 13:13:31 +0100 Subject: [PATCH 0322/1637] Bump version and changelog --- CHANGES.rst | 6 ++++++ synapse/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 85830d832e..80518b7bae 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,9 @@ +Changes in synapse v0.24.0 (2017-10-23) +======================================= + +No changes since v0.24.0-rc1 + + Changes in synapse v0.24.0-rc1 (2017-10-19) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index d2480271f7..c867d1cfd8 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.24.0-rc1" +__version__ = "0.24.0" From d03cfc4258d7a9578b454db2a35d18772dfdcdbf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Oct 2017 14:34:27 +0100 Subject: [PATCH 0323/1637] Fix a logcontext leak in the media repo --- synapse/rest/media/v1/_base.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index b9600f2167..bce077becf 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -17,6 +17,7 @@ from synapse.http.server import respond_with_json, finish_request from synapse.api.errors import ( cs_error, Codes, SynapseError ) +from synapse.util import logcontext from twisted.internet import defer from twisted.protocols.basic import FileSender @@ -103,7 +104,9 @@ def respond_with_file(request, media_type, file_path, ) with open(file_path, "rb") as f: - yield FileSender().beginFileTransfer(f, request) + yield logcontext.make_deferred_yieldable( + FileSender().beginFileTransfer(f, request) + ) finish_request(request) else: From ce6d4914f4e02a812643de252d8d456a84102893 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 23 Oct 2017 15:19:28 +0100 Subject: [PATCH 0324/1637] Correctly wire in update group profile over federation --- synapse/federation/transport/client.py | 20 ++++++++++++++++++++ synapse/federation/transport/server.py | 26 +++++++++++++------------- 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 125d8f3598..d25ae1b282 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -485,6 +485,26 @@ class TransportLayerClient(object): ignore_backoff=True, ) + @log_function + def update_group_profile(self, destination, group_id, requester_user_id, content): + """Update a remote group profile + + Args: + destination (str) + group_id (str) + requester_user_id (str) + content (dict): The new profile of the group + """ + path = PREFIX + "/groups/%s/profile" % (group_id,) + + return self.client.post_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + data=content, + ignore_backoff=True, + ) + @log_function def get_group_summary(self, destination, group_id, requester_user_id): """Get a group summary diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index f0778c65c5..8e08321fe8 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -610,7 +610,7 @@ class FederationVersionServlet(BaseFederationServlet): class FederationGroupsProfileServlet(BaseFederationServlet): - """Get the basic profile of a group on behalf of a user + """Get/set the basic profile of a group on behalf of a user """ PATH = "/groups/(?P[^/]*)/profile$" @@ -626,6 +626,18 @@ class FederationGroupsProfileServlet(BaseFederationServlet): defer.returnValue((200, new_content)) + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id): + requester_user_id = parse_string_from_args(query, "requester_user_id") + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.update_group_profile( + group_id, requester_user_id, content + ) + + defer.returnValue((200, new_content)) + class FederationGroupsSummaryServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/summary$" @@ -642,18 +654,6 @@ class FederationGroupsSummaryServlet(BaseFederationServlet): defer.returnValue((200, new_content)) - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id): - requester_user_id = parse_string_from_args(query, "requester_user_id") - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = yield self.handler.update_group_profile( - group_id, requester_user_id, content - ) - - defer.returnValue((200, new_content)) - class FederationGroupsRoomsServlet(BaseFederationServlet): """Get the rooms in a group on behalf of a user From eaaabc6c4f93d49f090ef7ff930fa2739428a36f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Oct 2017 15:52:32 +0100 Subject: [PATCH 0325/1637] replace 'except:' with 'except Exception:' what could possibly go wrong --- synapse/app/_base.py | 2 +- synapse/appservice/scheduler.py | 2 +- synapse/config/server.py | 2 +- synapse/crypto/context_factory.py | 2 +- synapse/crypto/event_signing.py | 2 +- synapse/crypto/keyring.py | 2 +- synapse/event_auth.py | 4 ++-- synapse/events/spamcheck.py | 2 +- synapse/federation/transport/server.py | 6 +++--- synapse/handlers/federation.py | 16 ++++++++-------- synapse/handlers/initial_sync.py | 2 +- synapse/handlers/message.py | 2 +- synapse/handlers/presence.py | 2 +- synapse/handlers/profile.py | 6 +++--- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/search.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/http/server.py | 2 +- synapse/http/servlet.py | 6 +++--- synapse/http/site.py | 2 +- synapse/notifier.py | 2 +- synapse/push/emailpusher.py | 2 +- synapse/push/httppusher.py | 6 +++--- synapse/push/pusher.py | 2 +- synapse/push/pusherpool.py | 6 +++--- synapse/replication/tcp/resource.py | 2 +- synapse/rest/client/v1/directory.py | 2 +- synapse/rest/client/v1/presence.py | 2 +- synapse/rest/client/v1/profile.py | 4 ++-- synapse/rest/client/v1/room.py | 6 +++--- synapse/rest/client/v2_alpha/filter.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/client/v2_alpha/user_directory.py | 2 +- synapse/rest/key/v2/remote_key_resource.py | 2 +- synapse/rest/media/v1/_base.py | 2 +- synapse/rest/media/v1/media_repository.py | 2 +- synapse/rest/media/v1/preview_url_resource.py | 6 +++--- synapse/state.py | 2 +- synapse/storage/_base.py | 2 +- synapse/storage/background_updates.py | 2 +- synapse/storage/events.py | 2 +- synapse/storage/prepare_database.py | 2 +- synapse/storage/roommember.py | 2 +- synapse/storage/schema/delta/30/as_users.py | 2 +- synapse/storage/search.py | 4 ++-- synapse/streams/config.py | 6 +++--- synapse/types.py | 8 ++++---- synapse/util/__init__.py | 12 ++++++------ synapse/util/async.py | 6 +++--- synapse/util/logcontext.py | 2 +- synapse/util/retryutils.py | 2 +- tests/storage/test_appservice.py | 2 +- tests/utils.py | 8 ++++---- 54 files changed, 93 insertions(+), 93 deletions(-) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index cf4730730d..9477737759 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -19,7 +19,7 @@ import sys try: import affinity -except: +except Exception: affinity = None from daemonize import Daemonize diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 68a9de17b8..6da315473d 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -123,7 +123,7 @@ class _ServiceQueuer(object): with Measure(self.clock, "servicequeuer.send"): try: yield self.txn_ctrl.send(service, events) - except: + except Exception: logger.exception("AS request failed") finally: self.requests_in_flight.discard(service.id) diff --git a/synapse/config/server.py b/synapse/config/server.py index c9a1715f1f..b66993dab9 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -303,7 +303,7 @@ def read_gc_thresholds(thresholds): return ( int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), ) - except: + except Exception: raise ConfigError( "Value of `gc_threshold` must be a list of three integers if set" ) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index aad4752fe7..cff3ca809a 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -34,7 +34,7 @@ class ServerContextFactory(ssl.ContextFactory): try: _ecCurve = _OpenSSLECCurve(_defaultCurveName) _ecCurve.addECKeyToContext(context) - except: + except Exception: logger.exception("Failed to enable elliptic curve for TLS") context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3) context.use_certificate_chain_file(config.tls_certificate_file) diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index ec7711ba7d..0d0e7b5286 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -43,7 +43,7 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256): message_hash_base64 = event.hashes[name] try: message_hash_bytes = decode_base64(message_hash_base64) - except: + except Exception: raise SynapseError( 400, "Invalid base64: %s" % (message_hash_base64,), diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 054bac456d..35f810b07b 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -759,7 +759,7 @@ def _handle_key_deferred(verify_request): )) try: verify_signed_json(json_object, server_name, verify_key) - except: + except Exception: raise SynapseError( 401, "Invalid signature for server %s with key %s:%s" % ( diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 9e746a28bf..061ee86b16 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -443,12 +443,12 @@ def _check_power_levels(event, auth_events): for k, v in user_list.items(): try: UserID.from_string(k) - except: + except Exception: raise SynapseError(400, "Not a valid user_id: %s" % (k,)) try: int(v) - except: + except Exception: raise SynapseError(400, "Not a valid power level: %s" % (v,)) key = (event.type, event.state_key, ) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index dccc579eac..633e068eb8 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -22,7 +22,7 @@ class SpamChecker(object): config = None try: module, config = hs.config.spam_checker - except: + except Exception: pass if module is not None: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index f0778c65c5..1e68a898d5 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -112,7 +112,7 @@ class Authenticator(object): key = strip_quotes(param_dict["key"]) sig = strip_quotes(param_dict["sig"]) return (origin, key, sig) - except: + except Exception: raise AuthenticationError( 400, "Malformed Authorization header", Codes.UNAUTHORIZED ) @@ -177,7 +177,7 @@ class BaseFederationServlet(object): if self.REQUIRE_AUTH: logger.exception("authenticate_request failed") raise - except: + except Exception: logger.exception("authenticate_request failed") raise @@ -270,7 +270,7 @@ class FederationSendServlet(BaseFederationServlet): code, response = yield self.handler.on_incoming_transaction( transaction_data ) - except: + except Exception: logger.exception("on_incoming_transaction failed") raise diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7711cded01..8b1e606754 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -227,7 +227,7 @@ class FederationHandler(BaseHandler): state, auth_chain = yield self.replication_layer.get_state_for_room( origin, pdu.room_id, pdu.event_id, ) - except: + except Exception: logger.exception("Failed to get state for event: %s", pdu.event_id) yield self._process_received_pdu( @@ -461,7 +461,7 @@ class FederationHandler(BaseHandler): def check_match(id): try: return server_name == get_domain_from_id(id) - except: + except Exception: return False # Parses mapping `event_id -> (type, state_key) -> state event_id` @@ -499,7 +499,7 @@ class FederationHandler(BaseHandler): continue try: domain = get_domain_from_id(ev.state_key) - except: + except Exception: continue if domain != server_name: @@ -738,7 +738,7 @@ class FederationHandler(BaseHandler): joined_domains[dom] = min(d, old_d) else: joined_domains[dom] = d - except: + except Exception: pass return sorted(joined_domains.items(), key=lambda d: d[1]) @@ -940,7 +940,7 @@ class FederationHandler(BaseHandler): room_creator_user_id="", is_public=False ) - except: + except Exception: # FIXME pass @@ -1775,7 +1775,7 @@ class FederationHandler(BaseHandler): [e_id for e_id, _ in event.auth_events] ) seen_events = set(have_events.keys()) - except: + except Exception: # FIXME: logger.exception("Failed to get auth chain") @@ -1899,7 +1899,7 @@ class FederationHandler(BaseHandler): except AuthError: pass - except: + except Exception: # FIXME: logger.exception("Failed to query auth chain") @@ -1966,7 +1966,7 @@ class FederationHandler(BaseHandler): def get_next(it, opt=None): try: return it.next() - except: + except Exception: return opt current_local = get_next(local_iter) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 10f5f35a69..9718d4abc5 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -214,7 +214,7 @@ class InitialSyncHandler(BaseHandler): }) d["account_data"] = account_data_events - except: + except Exception: logger.exception("Failed to get snapshot") yield concurrently_execute(handle_room, room_list, 10) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 28792788d9..21f1717dd2 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -563,7 +563,7 @@ class MessageHandler(BaseHandler): try: dump = ujson.dumps(unfreeze(event.content)) ujson.loads(dump) - except: + except Exception: logger.exception("Failed to encode content: %r", event.content) raise diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c7c0b0a1e2..fa96ea69cd 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -364,7 +364,7 @@ class PresenceHandler(object): ) preserve_fn(self._update_states)(changes) - except: + except Exception: logger.exception("Exception in _handle_timeouts loop") @defer.inlineCallbacks diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index e56e0a52bf..62b9bd503e 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -118,7 +118,7 @@ class ProfileHandler(BaseHandler): logger.exception("Failed to get displayname") raise - except: + except Exception: logger.exception("Failed to get displayname") else: defer.returnValue(result["displayname"]) @@ -165,7 +165,7 @@ class ProfileHandler(BaseHandler): if e.code != 404: logger.exception("Failed to get avatar_url") raise - except: + except Exception: logger.exception("Failed to get avatar_url") defer.returnValue(result["avatar_url"]) @@ -266,7 +266,7 @@ class ProfileHandler(BaseHandler): }, ignore_backoff=True, ) - except: + except Exception: logger.exception("Failed to get avatar_url") yield self.store.update_remote_profile_cache( diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 52aa9964d9..49dc33c147 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -289,7 +289,7 @@ class RegistrationHandler(BaseHandler): try: identity_handler = self.hs.get_handlers().identity_handler threepid = yield identity_handler.threepid_from_creds(c) - except: + except Exception: logger.exception("Couldn't validate 3pid") raise RegistrationError(400, "Couldn't validate 3pid") diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index e945bd35bc..496f1fc39b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -108,7 +108,7 @@ class RoomCreationHandler(BaseHandler): for i in invite_list: try: UserID.from_string(i) - except: + except Exception: raise SynapseError(400, "Invalid user_id: %s" % (i,)) invite_3pid_list = config.get("invite_3pid", []) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index df75d70fac..9772ed1a0e 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -61,7 +61,7 @@ class SearchHandler(BaseHandler): assert batch_group is not None assert batch_group_key is not None assert batch_token is not None - except: + except Exception: raise SynapseError(400, "Invalid batch") try: diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 8c8b7fa656..833496b72d 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -550,7 +550,7 @@ class MatrixFederationHttpClient(object): length = yield _readBodyToFile( response, output_stream, max_size ) - except: + except Exception: logger.exception("Failed to download body") raise diff --git a/synapse/http/server.py b/synapse/http/server.py index 8a27e3b422..3ca1c9947c 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -130,7 +130,7 @@ def wrap_request_handler(request_handler, include_metrics=False): pretty_print=_request_user_agent_is_curl(request), version_string=self.version_string, ) - except: + except Exception: logger.exception( "Failed handle request %s.%s on %r: %r", request_handler.__module__, diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 9a4c36ad5d..8118ee7cc2 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -48,7 +48,7 @@ def parse_integer_from_args(args, name, default=None, required=False): if name in args: try: return int(args[name][0]) - except: + except Exception: message = "Query parameter %r must be an integer" % (name,) raise SynapseError(400, message) else: @@ -88,7 +88,7 @@ def parse_boolean_from_args(args, name, default=None, required=False): "true": True, "false": False, }[args[name][0]] - except: + except Exception: message = ( "Boolean query parameter %r must be one of" " ['true', 'false']" @@ -162,7 +162,7 @@ def parse_json_value_from_request(request): """ try: content_bytes = request.content.read() - except: + except Exception: raise SynapseError(400, "Error reading JSON content.") try: diff --git a/synapse/http/site.py b/synapse/http/site.py index 4b09d7ee66..cd1492b1c3 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -67,7 +67,7 @@ class SynapseRequest(Request): ru_utime, ru_stime = context.get_resource_usage() db_txn_count = context.db_txn_count db_txn_duration = context.db_txn_duration - except: + except Exception: ru_utime, ru_stime = (0, 0) db_txn_count, db_txn_duration = (0, 0) diff --git a/synapse/notifier.py b/synapse/notifier.py index 385208b574..626da778cd 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -289,7 +289,7 @@ class Notifier(object): for user_stream in user_streams: try: user_stream.notify(stream_key, new_token, time_now_ms) - except: + except Exception: logger.exception("Failed to notify listener") self.notify_replication() diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index a69dda7b09..58df98a793 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -121,7 +121,7 @@ class EmailPusher(object): starting_max_ordering = self.max_stream_ordering try: yield self._unsafe_process() - except: + except Exception: logger.exception("Exception processing notifs") if self.max_stream_ordering == starting_max_ordering: break diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 62c41cd9db..74c0bc462c 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -131,7 +131,7 @@ class HttpPusher(object): starting_max_ordering = self.max_stream_ordering try: yield self._unsafe_process() - except: + except Exception: logger.exception("Exception processing notifs") if self.max_stream_ordering == starting_max_ordering: break @@ -314,7 +314,7 @@ class HttpPusher(object): defer.returnValue([]) try: resp = yield self.http_client.post_json_get_json(self.url, notification_dict) - except: + except Exception: logger.warn("Failed to push %s ", self.url) defer.returnValue(False) rejected = [] @@ -345,7 +345,7 @@ class HttpPusher(object): } try: resp = yield self.http_client.post_json_get_json(self.url, d) - except: + except Exception: logger.exception("Failed to push %s ", self.url) defer.returnValue(False) rejected = [] diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index 491f27bded..71576330a9 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -27,7 +27,7 @@ logger = logging.getLogger(__name__) try: from synapse.push.emailpusher import EmailPusher from synapse.push.mailer import Mailer, load_jinja2_templates -except: +except Exception: pass diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 43cb6e9c01..7c069b662e 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -137,7 +137,7 @@ class PusherPool: ) yield preserve_context_over_deferred(defer.gatherResults(deferreds)) - except: + except Exception: logger.exception("Exception in pusher on_new_notifications") @defer.inlineCallbacks @@ -162,7 +162,7 @@ class PusherPool: ) yield preserve_context_over_deferred(defer.gatherResults(deferreds)) - except: + except Exception: logger.exception("Exception in pusher on_new_receipts") @defer.inlineCallbacks @@ -188,7 +188,7 @@ class PusherPool: for pusherdict in pushers: try: p = self.pusher_factory.create_pusher(pusherdict) - except: + except Exception: logger.exception("Couldn't start a pusher: caught Exception") continue if p: diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 6c1beca4e3..1d03e79b85 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -162,7 +162,7 @@ class ReplicationStreamer(object): ) try: updates, current_token = yield stream.get_updates() - except: + except Exception: logger.info("Failed to handle stream %s", stream.NAME) raise diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index f15aa5c13f..1c3933380f 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -93,7 +93,7 @@ class ClientDirectoryServer(ClientV1RestServlet): ) except SynapseError as e: raise e - except: + except Exception: logger.exception("Failed to create association") raise except AuthError: diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 47b2dc45e7..4a73813c58 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -78,7 +78,7 @@ class PresenceStatusRestServlet(ClientV1RestServlet): raise KeyError() except SynapseError as e: raise e - except: + except Exception: raise SynapseError(400, "Unable to parse state") yield self.presence_handler.set_state(user, state) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index d7edc34245..e4e3611a14 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -52,7 +52,7 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): try: new_name = content["displayname"] - except: + except Exception: defer.returnValue((400, "Unable to parse name")) yield self.profile_handler.set_displayname( @@ -94,7 +94,7 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): content = parse_json_object_from_request(request) try: new_name = content["avatar_url"] - except: + except Exception: defer.returnValue((400, "Unable to parse name")) yield self.profile_handler.set_avatar_url( diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 6c379d53ac..75b735b47d 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -238,7 +238,7 @@ class JoinRoomAliasServlet(ClientV1RestServlet): try: content = parse_json_object_from_request(request) - except: + except Exception: # Turns out we used to ignore the body entirely, and some clients # cheekily send invalid bodies. content = {} @@ -247,7 +247,7 @@ class JoinRoomAliasServlet(ClientV1RestServlet): room_id = room_identifier try: remote_room_hosts = request.args["server_name"] - except: + except Exception: remote_room_hosts = None elif RoomAlias.is_valid(room_identifier): handler = self.handlers.room_member_handler @@ -587,7 +587,7 @@ class RoomMembershipRestServlet(ClientV1RestServlet): try: content = parse_json_object_from_request(request) - except: + except Exception: # Turns out we used to ignore the body entirely, and some clients # cheekily send invalid bodies. content = {} diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index d2b2fd66e6..1b9dc4528d 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -50,7 +50,7 @@ class GetFilterRestServlet(RestServlet): try: filter_id = int(filter_id) - except: + except Exception: raise SynapseError(400, "Invalid filter_id") try: diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index a1e0e53b33..a0a8e4b8e4 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -125,7 +125,7 @@ class SyncRestServlet(RestServlet): filter_object = json.loads(filter_id) set_timeline_upper_limit(filter_object, self.hs.config.filter_timeline_limit) - except: + except Exception: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter = FilterCollection(filter_object) diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index 6e012da4aa..2d4a43c353 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -65,7 +65,7 @@ class UserDirectorySearchRestServlet(RestServlet): try: search_term = body["search_term"] - except: + except Exception: raise SynapseError(400, "`search_term` is required field") results = yield self.user_directory_handler.search_users( diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 9fe2013657..cc2842aa72 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -213,7 +213,7 @@ class RemoteKey(Resource): ) except KeyLookupError as e: logger.info("Failed to fetch key: %s", e) - except: + except Exception: logger.exception("Failed to get key for %r", server_name) yield self.query_keys( request, query, query_remote_on_cache_miss=False diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index bce077becf..95fa95fce3 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -45,7 +45,7 @@ def parse_media_id(request): except UnicodeDecodeError: pass return server_name, media_id, file_name - except: + except Exception: raise SynapseError( 404, "Invalid media id token %r" % (request.postpath,), diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 6b50b45b1f..eed9056a2f 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -310,7 +310,7 @@ class MediaRepository(object): media_length=length, filesystem_id=file_id, ) - except: + except Exception: os.remove(fname) raise diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 2a3e37fdf4..80114fca0d 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -367,7 +367,7 @@ class PreviewUrlResource(Resource): dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) for dir in dirs: os.rmdir(dir) - except: + except Exception: pass yield self.store.delete_url_cache(removed_media) @@ -397,7 +397,7 @@ class PreviewUrlResource(Resource): dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) for dir in dirs: os.rmdir(dir) - except: + except Exception: pass thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id) @@ -415,7 +415,7 @@ class PreviewUrlResource(Resource): dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id) for dir in dirs: os.rmdir(dir) - except: + except Exception: pass yield self.store.delete_url_cache_media(removed_media) diff --git a/synapse/state.py b/synapse/state.py index dcdcdef65e..9e624b4937 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -560,7 +560,7 @@ def _resolve_with_state(unconflicted_state_ids, conflicted_state_ds, auth_event_ resolved_state = _resolve_state_events( conflicted_state, auth_events ) - except: + except Exception: logger.exception("Failed to resolve state") raise diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 5124a833a5..6caf7b3356 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -103,7 +103,7 @@ class LoggingTransaction(object): "[SQL values] {%s} %r", self.name, args[0] ) - except: + except Exception: # Don't let logging failures stop SQL from working pass diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 7157fb1dfb..a6e6f52a6a 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -98,7 +98,7 @@ class BackgroundUpdateStore(SQLBaseStore): result = yield self.do_next_background_update( self.BACKGROUND_UPDATE_DURATION_MS ) - except: + except Exception: logger.exception("Error doing update") else: if result is None: diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 637640ec2a..4298d8baf1 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1481,7 +1481,7 @@ class EventsStore(SQLBaseStore): for i in ids if i in res ]) - except: + except Exception: logger.exception("Failed to callback") with PreserveLoggingContext(): reactor.callFromThread(fire, event_list, row_dict) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index ccaaabcfa0..817c2185c8 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -66,7 +66,7 @@ def prepare_database(db_conn, database_engine, config): cur.close() db_conn.commit() - except: + except Exception: db_conn.rollback() raise diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index a0fc9a6867..3fa8019eb7 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -636,7 +636,7 @@ class RoomMemberStore(SQLBaseStore): room_id = row["room_id"] try: content = json.loads(row["content"]) - except: + except Exception: continue display_name = content.get("displayname", None) diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py index 5b7d8d1ab5..c53e53c94f 100644 --- a/synapse/storage/schema/delta/30/as_users.py +++ b/synapse/storage/schema/delta/30/as_users.py @@ -22,7 +22,7 @@ def run_create(cur, database_engine, *args, **kwargs): # NULL indicates user was not registered by an appservice. try: cur.execute("ALTER TABLE users ADD COLUMN appservice_id TEXT") - except: + except Exception: # Maybe we already added the column? Hope so... pass diff --git a/synapse/storage/search.py b/synapse/storage/search.py index 8f2b3c4435..05d4ef586e 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -81,7 +81,7 @@ class SearchStore(BackgroundUpdateStore): etype = row["type"] try: content = json.loads(row["content"]) - except: + except Exception: continue if etype == "m.room.message": @@ -407,7 +407,7 @@ class SearchStore(BackgroundUpdateStore): origin_server_ts, stream = pagination_token.split(",") origin_server_ts = int(origin_server_ts) stream = int(stream) - except: + except Exception: raise SynapseError(400, "Invalid pagination token") clauses.append( diff --git a/synapse/streams/config.py b/synapse/streams/config.py index 4f089bfb94..ca78e551cb 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -80,13 +80,13 @@ class PaginationConfig(object): from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) - except: + except Exception: raise SynapseError(400, "'from' paramater is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) - except: + except Exception: raise SynapseError(400, "'to' paramater is invalid") limit = get_param("limit", None) @@ -98,7 +98,7 @@ class PaginationConfig(object): try: return PaginationConfig(from_tok, to_tok, direction, limit) - except: + except Exception: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.") diff --git a/synapse/types.py b/synapse/types.py index 1eeda0b72f..6e76c016d9 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -127,7 +127,7 @@ class DomainSpecificString( try: cls.from_string(s) return True - except: + except Exception: return False __str__ = to_string @@ -213,7 +213,7 @@ class StreamToken( # i.e. old token from before receipt_key keys.append("0") return cls(*keys) - except: + except Exception: raise SynapseError(400, "Invalid Token") def to_string(self): @@ -299,7 +299,7 @@ class RoomStreamToken(namedtuple("_StreamToken", "topological stream")): if string[0] == 't': parts = string[1:].split('-', 1) return cls(topological=int(parts[0]), stream=int(parts[1])) - except: + except Exception: pass raise SynapseError(400, "Invalid token %r" % (string,)) @@ -308,7 +308,7 @@ class RoomStreamToken(namedtuple("_StreamToken", "topological stream")): try: if string[0] == 's': return cls(topological=None, stream=int(string[1:])) - except: + except Exception: pass raise SynapseError(400, "Invalid token %r" % (string,)) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 2a2360ab5d..cd1ce62bdb 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -82,7 +82,7 @@ class Clock(object): def cancel_call_later(self, timer, ignore_errs=False): try: timer.cancel() - except: + except Exception: if not ignore_errs: raise @@ -97,12 +97,12 @@ class Clock(object): try: ret_deferred.errback(e) - except: + except Exception: pass try: given_deferred.cancel() - except: + except Exception: pass timer = None @@ -110,7 +110,7 @@ class Clock(object): def cancel(res): try: self.cancel_call_later(timer) - except: + except Exception: pass return res @@ -119,7 +119,7 @@ class Clock(object): def success(res): try: ret_deferred.callback(res) - except: + except Exception: pass return res @@ -127,7 +127,7 @@ class Clock(object): def err(res): try: ret_deferred.errback(res) - except: + except Exception: pass given_deferred.addCallbacks(callback=success, errback=err) diff --git a/synapse/util/async.py b/synapse/util/async.py index a0a9039475..1a884e96ee 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -73,7 +73,7 @@ class ObservableDeferred(object): try: # TODO: Handle errors here. self._observers.pop().callback(r) - except: + except Exception: pass return r @@ -83,7 +83,7 @@ class ObservableDeferred(object): try: # TODO: Handle errors here. self._observers.pop().errback(f) - except: + except Exception: pass if consumeErrors: @@ -205,7 +205,7 @@ class Linearizer(object): try: with PreserveLoggingContext(): yield current_defer - except: + except Exception: logger.exception("Unexpected exception in Linearizer") logger.info("Acquired linearizer lock %r for key %r", self.name, diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 990216145e..9683cc7265 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -42,7 +42,7 @@ try: def get_thread_resource_usage(): return resource.getrusage(RUSAGE_THREAD) -except: +except Exception: # If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we # won't track resource usage by returning None. def get_thread_resource_usage(): diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 4fa9d1a03c..1adedbb361 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -189,7 +189,7 @@ class RetryDestinationLimiter(object): yield self.store.set_destination_retry_timings( self.destination, retry_last_ts, self.retry_interval ) - except: + except Exception: logger.exception( "Failed to store set_destination_retry_timings", ) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 9e98d0e330..79f569e787 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -65,7 +65,7 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): for f in self.as_yaml_files: try: os.remove(f) - except: + except Exception: pass def _add_appservice(self, as_token, id, url, hs_token, sender): diff --git a/tests/utils.py b/tests/utils.py index 3c81a3e16d..d2ebce4b2e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -184,7 +184,7 @@ class MockHttpResource(HttpServer): mock_request.args = urlparse.parse_qs(path.split('?')[1]) mock_request.path = path.split('?')[0] path = mock_request.path - except: + except Exception: pass for (method, pattern, func) in self.callbacks: @@ -364,13 +364,13 @@ class MemoryDataStore(object): return { "name": self.tokens_to_users[token], } - except: + except Exception: raise StoreError(400, "User does not exist.") def get_room(self, room_id): try: return self.rooms[room_id] - except: + except Exception: return None def store_room(self, room_id, room_creator_user_id, is_public): @@ -499,7 +499,7 @@ class DeferredMockCallable(object): for _, _, d in self.expectations: try: d.errback(failure) - except: + except Exception: pass raise failure From 0be99858f34165258af5d2865818f5baee35118b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Oct 2017 15:56:38 +0100 Subject: [PATCH 0326/1637] fix vars named `l` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit E741 says "do not use variables named ‘l’, ‘O’, or ‘I’". --- synapse/util/__init__.py | 6 +++--- synapse/util/wheel_timer.py | 5 +---- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index cd1ce62bdb..756d8ffa32 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -59,9 +59,9 @@ class Clock(object): f(function): The function to call repeatedly. msec(float): How long to wait between calls in milliseconds. """ - l = task.LoopingCall(f) - l.start(msec / 1000.0, now=False) - return l + call = task.LoopingCall(f) + call.start(msec / 1000.0, now=False) + return call def call_later(self, delay, callback, *args, **kwargs): """Call something later diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index 7412fc57a4..b70f9a6b0a 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -91,7 +91,4 @@ class WheelTimer(object): return ret def __len__(self): - l = 0 - for entry in self.entries: - l += len(entry.queue) - return l + return sum(len(entry.queue) for entry in self.entries) From 6ba4fabdb9feadec7d83bf3854e427482ca05762 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 Oct 2017 14:15:27 +0100 Subject: [PATCH 0327/1637] Bump version and changelog --- CHANGES.rst | 8 ++++++++ synapse/__init__.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 80518b7bae..4911cfa284 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,11 @@ +Changes in synapse v0.24.1 (2017-10-24) +======================================= + +Bug fixes: + +* Fix updating group profiles over federation (PR #2567) + + Changes in synapse v0.24.0 (2017-10-23) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index c867d1cfd8..e74abe0130 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.24.0" +__version__ = "0.24.1" From efd0f5a3c58b62344c6981c4076eb23873ad57e3 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 24 Oct 2017 18:49:44 +0100 Subject: [PATCH 0328/1637] tip for generating tls_fingerprints --- synapse/config/tls.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 247f18f454..4748f71c2f 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -109,6 +109,12 @@ class TlsConfig(Config): # key. It may be necessary to publish the fingerprints of a new # certificate and wait until the "valid_until_ts" of the previous key # responses have passed before deploying it. + # + # You can calculate a fingerprint from a given TLS listener via: + # openssl s_client -connect $host:$port < /dev/null 2> /dev/null | + # openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '=' + # or by checking matrix.org/federationtester/api/report?server_name=$host + # tls_fingerprints: [] # tls_fingerprints: [{"sha256": ""}] """ % locals() From 33a9026cdf19dc13a3492e838f0893755d380981 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 25 Oct 2017 10:26:06 +0100 Subject: [PATCH 0329/1637] Add logging and fix log contexts for publicRooms --- synapse/handlers/room_list.py | 2 ++ synapse/util/caches/response_cache.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 41e1781df7..ae7c611170 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -70,6 +70,7 @@ class RoomListHandler(BaseHandler): if search_filter: # We explicitly don't bother caching searches or requests for # appservice specific lists. + logger.info("Bypassing cache as search request.") return self._get_public_room_list( limit, since_token, search_filter, network_tuple=network_tuple, ) @@ -77,6 +78,7 @@ class RoomListHandler(BaseHandler): key = (limit, since_token, network_tuple) result = self.response_cache.get(key) if not result: + logger.info("No cached result, calculating one.") result = self.response_cache.set( key, self._get_public_room_list( diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 00af539880..df0ae099c8 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from synapse.util import logcontext from synapse.util.async import ObservableDeferred @@ -52,4 +53,4 @@ class ResponseCache(object): return r result.addBoth(remove) - return result.observe() + return logcontext.make_deferred_yieldable(result.observe()) From 2a7e9faeec9af8723b9613fef3b3059b1fe777f5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 25 Oct 2017 15:21:08 +0100 Subject: [PATCH 0330/1637] Do logcontexts outside ResponseCache --- synapse/appservice/api.py | 10 +++++++--- synapse/federation/federation_server.py | 8 +++++--- synapse/handlers/room_list.py | 7 +++++-- synapse/handlers/sync.py | 6 +++--- synapse/util/caches/response_cache.py | 3 +-- 5 files changed, 21 insertions(+), 13 deletions(-) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 6893610e71..40c433d7ae 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -18,6 +18,7 @@ from synapse.api.constants import ThirdPartyEntityKind from synapse.api.errors import CodeMessageException from synapse.http.client import SimpleHttpClient from synapse.events.utils import serialize_event +from synapse.util.logcontext import preserve_fn, make_deferred_yieldable from synapse.util.caches.response_cache import ResponseCache from synapse.types import ThirdPartyInstanceID @@ -192,9 +193,12 @@ class ApplicationServiceApi(SimpleHttpClient): defer.returnValue(None) key = (service.id, protocol) - return self.protocol_meta_cache.get(key) or ( - self.protocol_meta_cache.set(key, _get()) - ) + result = self.protocol_meta_cache.get(key) + if not result: + result = self.protocol_meta_cache.set( + key, preserve_fn(_get)() + ) + return make_deferred_yieldable(result) @defer.inlineCallbacks def push_bulk(self, service, events, txn_id=None): diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index e15228e70b..a2327f24b6 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -18,6 +18,7 @@ from .federation_base import FederationBase from .units import Transaction, Edu from synapse.util import async +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.util.logutils import log_function from synapse.util.caches.response_cache import ResponseCache from synapse.events import FrozenEvent @@ -253,12 +254,13 @@ class FederationServer(FederationBase): result = self._state_resp_cache.get((room_id, event_id)) if not result: with (yield self._server_linearizer.queue((origin, room_id))): - resp = yield self._state_resp_cache.set( + d = self._state_resp_cache.set( (room_id, event_id), - self._on_context_state_request_compute(room_id, event_id) + preserve_fn(self._on_context_state_request_compute)(room_id, event_id) ) + resp = yield make_deferred_yieldable(d) else: - resp = yield result + resp = yield make_deferred_yieldable(result) defer.returnValue((200, resp)) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index ae7c611170..5a47254b56 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -20,6 +20,7 @@ from ._base import BaseHandler from synapse.api.constants import ( EventTypes, JoinRules, ) +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.util.async import concurrently_execute from synapse.util.caches.descriptors import cachedInlineCallbacks from synapse.util.caches.response_cache import ResponseCache @@ -81,11 +82,13 @@ class RoomListHandler(BaseHandler): logger.info("No cached result, calculating one.") result = self.response_cache.set( key, - self._get_public_room_list( + preserve_fn(self._get_public_room_list)( limit, since_token, network_tuple=network_tuple ) ) - return result + else: + logger.info("Using cached result.") + return make_deferred_yieldable(result) @defer.inlineCallbacks def _get_public_room_list(self, limit=None, since_token=None, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 219529936f..b12988f3c9 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -15,7 +15,7 @@ from synapse.api.constants import Membership, EventTypes from synapse.util.async import concurrently_execute -from synapse.util.logcontext import LoggingContext +from synapse.util.logcontext import LoggingContext, make_deferred_yieldable, preserve_fn from synapse.util.metrics import Measure, measure_func from synapse.util.caches.response_cache import ResponseCache from synapse.push.clientformat import format_push_rules_for_user @@ -184,11 +184,11 @@ class SyncHandler(object): if not result: result = self.response_cache.set( sync_config.request_key, - self._wait_for_sync_for_user( + preserve_fn(self._wait_for_sync_for_user)( sync_config, since_token, timeout, full_state ) ) - return result + return make_deferred_yieldable(result) @defer.inlineCallbacks def _wait_for_sync_for_user(self, sync_config, since_token, timeout, diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index df0ae099c8..00af539880 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util import logcontext from synapse.util.async import ObservableDeferred @@ -53,4 +52,4 @@ class ResponseCache(object): return r result.addBoth(remove) - return logcontext.make_deferred_yieldable(result.observe()) + return result.observe() From 5287e57c86b180671c956802861fec9fcd843e39 Mon Sep 17 00:00:00 2001 From: Maxime Vaillancourt Date: Wed, 25 Oct 2017 20:44:34 -0400 Subject: [PATCH 0331/1637] Ignore noscript tags when generating URL previews --- synapse/rest/media/v1/preview_url_resource.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 80114fca0d..7907a9d17a 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -520,7 +520,14 @@ def _calc_og(tree, media_uri): from lxml import etree TAGS_TO_REMOVE = ( - "header", "nav", "aside", "footer", "script", "style", etree.Comment + "header", + "nav", + "aside", + "footer", + "script", + "noscript", + "style", + etree.Comment ) # Split all the text nodes into paragraphs (by splitting on new From 37d766aeddf63450fc12fd2078bcaabe98042dd8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 26 Oct 2017 09:59:50 +0100 Subject: [PATCH 0332/1637] Fix port script We changed _simple_update_one_txn to use _simple_update_txn but didn't yank it out in the port script. Fixes #2565 --- scripts/synapse_port_db | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index dc7fe940e8..a7a50e4d36 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -112,6 +112,7 @@ class Store(object): _simple_update_one = SQLBaseStore.__dict__["_simple_update_one"] _simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"] + _simple_update_txn = SQLBaseStore.__dict__["_simple_update_txn"] def runInteraction(self, desc, func, *args, **kwargs): def r(conn): From 351cc35342cc1edbb567b929da05c47d59baa2d1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Oct 2017 10:28:41 +0100 Subject: [PATCH 0333/1637] code_style.rst: a couple of tidyups --- docs/code_style.rst | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/code_style.rst b/docs/code_style.rst index 8d73d17beb..38d52abd47 100644 --- a/docs/code_style.rst +++ b/docs/code_style.rst @@ -1,5 +1,5 @@ -Basically, PEP8 - +- Everything should comply with PEP8. Code should pass + ``pep8 --max-line-length=100`` without any warnings. - NEVER tabs. 4 spaces to indent. - Max line width: 79 chars (with flexibility to overflow by a "few chars" if the overflowing content is not semantically significant and avoids an @@ -43,10 +43,10 @@ Basically, PEP8 together, or want to deliberately extend or preserve vertical/horizontal space) -Comments should follow the `google code style `_. -This is so that we can generate documentation with -`sphinx `_. See the -`examples `_ -in the sphinx documentation. - -Code should pass pep8 --max-line-length=100 without any warnings. +- Comments should follow the `google code style + `_. + This is so that we can generate documentation with `sphinx + `_. See the + `examples + `_ + in the sphinx documentation. From 566e21eac80f1bf921da5876a0fa03ecc412d551 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 26 Oct 2017 11:39:54 +0100 Subject: [PATCH 0334/1637] Update room_list.py --- synapse/handlers/room_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 5a47254b56..2cf34e51cb 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -87,7 +87,7 @@ class RoomListHandler(BaseHandler): ) ) else: - logger.info("Using cached result.") + logger.info("Using cached deferred result.") return make_deferred_yieldable(result) @defer.inlineCallbacks From f7f6bfaae45c0ac01132ea99b15008d70a7cd52f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Oct 2017 10:42:06 +0100 Subject: [PATCH 0335/1637] code_style: more formatting --- docs/code_style.rst | 93 ++++++++++++++++++++++++++++----------------- 1 file changed, 58 insertions(+), 35 deletions(-) diff --git a/docs/code_style.rst b/docs/code_style.rst index 38d52abd47..a7a71686ba 100644 --- a/docs/code_style.rst +++ b/docs/code_style.rst @@ -1,49 +1,72 @@ - Everything should comply with PEP8. Code should pass ``pep8 --max-line-length=100`` without any warnings. -- NEVER tabs. 4 spaces to indent. -- Max line width: 79 chars (with flexibility to overflow by a "few chars" if + +- **Indenting**: + + - NEVER tabs. 4 spaces to indent. + + - follow PEP8; either hanging indent or multiline-visual indent depending + on the size and shape of the arguments and what makes more sense to the + author. In other words, both this:: + + print("I am a fish %s" % "moo") + + and this:: + + print("I am a fish %s" % + "moo") + + and this:: + + print( + "I am a fish %s" % + "moo", + ) + + ...are valid, although given each one takes up 2x more vertical space than + the previous, it's up to the author's discretion as to which layout makes + most sense for their function invocation. (e.g. if they want to add + comments per-argument, or put expressions in the arguments, or group + related arguments together, or want to deliberately extend or preserve + vertical/horizontal space) + +- **Line length**: + + Max line length is 79 chars (with flexibility to overflow by a "few chars" if the overflowing content is not semantically significant and avoids an explosion of vertical whitespace). -- Use camel case for class and type names -- Use underscores for functions and variables. -- Use double quotes. -- Use parentheses instead of '\\' for line continuation where ever possible - (which is pretty much everywhere) -- There should be max a single new line between: + + Use parentheses instead of ``\`` for line continuation where ever possible + (which is pretty much everywhere). + +- **Naming**: + + - Use camel case for class and type names + - Use underscores for functions and variables. + +- Use double quotes ``"foo"`` rather than single quotes ``'foo'``. + +- **Blank lines**: + + - There should be max a single new line between: + - statements - functions in a class -- There should be two new lines between: + + - There should be two new lines between: + - definitions in a module (e.g., between different classes) -- There should be spaces where spaces should be and not where there shouldn't be: - - a single space after a comma - - a single space before and after for '=' when used as assignment - - no spaces before and after for '=' for default values and keyword arguments. -- Indenting must follow PEP8; either hanging indent or multiline-visual indent - depending on the size and shape of the arguments and what makes more sense to - the author. In other words, both this:: - print("I am a fish %s" % "moo") +- **Whitespace**: - and this:: + There should be spaces where spaces should be and not where there shouldn't + be: - print("I am a fish %s" % - "moo") + - a single space after a comma + - a single space before and after for '=' when used as assignment + - no spaces before and after for '=' for default values and keyword arguments. - and this:: - - print( - "I am a fish %s" % - "moo" - ) - - ...are valid, although given each one takes up 2x more vertical space than - the previous, it's up to the author's discretion as to which layout makes most - sense for their function invocation. (e.g. if they want to add comments - per-argument, or put expressions in the arguments, or group related arguments - together, or want to deliberately extend or preserve vertical/horizontal - space) - -- Comments should follow the `google code style +- **Comments**: should follow the `google code style `_. This is so that we can generate documentation with `sphinx `_. See the From 1eb300e1fcc2ec05c33420033f1d2acdf46d7e20 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Oct 2017 10:58:34 +0100 Subject: [PATCH 0336/1637] Document import rules --- docs/code_style.rst | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/docs/code_style.rst b/docs/code_style.rst index a7a71686ba..9c52cb3182 100644 --- a/docs/code_style.rst +++ b/docs/code_style.rst @@ -73,3 +73,47 @@ `examples `_ in the sphinx documentation. + +- **Imports**: + + - Prefer to import classes and functions than packages or modules. + + Example:: + + from synapse.types import UserID + ... + user_id = UserID(local, server) + + is preferred over:: + + from synapse import types + ... + user_id = types.UserID(local, server) + + (or any other variant). + + This goes against the advice in the Google style guide, but it means that + errors in the name are caught early (at import time). + + - Multiple imports from the same package can be combined onto one line:: + + from synapse.types import GroupID, RoomID, UserID + + An effort should be made to keep the individual imports in alphabetical + order. + + If the list becomes long, wrap it with parentheses and split it over + multiple lines. + + - As per `PEP-8 `_, + imports should be grouped in the following order, with a blank line between + each group: + + 1. standard library imports + 2. related third party imports + 3. local application/library specific imports + + - Imports within each group should be sorted alphabetically by module name. + + - Avoid wildcard imports (``from synapse.types import *``) and relative + imports (``from .types import UserID``). From 9b436c8b4ce0967c47da117d12f6b406f08d3172 Mon Sep 17 00:00:00 2001 From: Krombel Date: Thu, 26 Oct 2017 15:22:50 +0200 Subject: [PATCH 0337/1637] register some /unstable endpoints in /r0 as well --- synapse/rest/client/v2_alpha/devices.py | 7 +++---- synapse/rest/client/v2_alpha/keys.py | 18 ++++-------------- synapse/rest/client/v2_alpha/notifications.py | 2 +- synapse/rest/client/v2_alpha/sendtodevice.py | 2 +- 4 files changed, 9 insertions(+), 20 deletions(-) diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index b57ba95d24..2a2438b7dc 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) class DevicesRestServlet(servlet.RestServlet): - PATTERNS = client_v2_patterns("/devices$", releases=[], v2_alpha=False) + PATTERNS = client_v2_patterns("/devices$", v2_alpha=False) def __init__(self, hs): """ @@ -51,7 +51,7 @@ class DeleteDevicesRestServlet(servlet.RestServlet): API for bulk deletion of devices. Accepts a JSON object with a devices key which lists the device_ids to delete. Requires user interactive auth. """ - PATTERNS = client_v2_patterns("/delete_devices", releases=[], v2_alpha=False) + PATTERNS = client_v2_patterns("/delete_devices", v2_alpha=False) def __init__(self, hs): super(DeleteDevicesRestServlet, self).__init__() @@ -93,8 +93,7 @@ class DeleteDevicesRestServlet(servlet.RestServlet): class DeviceRestServlet(servlet.RestServlet): - PATTERNS = client_v2_patterns("/devices/(?P[^/]*)$", - releases=[], v2_alpha=False) + PATTERNS = client_v2_patterns("/devices/(?P[^/]*)$", v2_alpha=False) def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 943e87e7fd..3cc87ea63f 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -53,8 +53,7 @@ class KeyUploadServlet(RestServlet): }, } """ - PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$", - releases=()) + PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$") def __init__(self, hs): """ @@ -128,10 +127,7 @@ class KeyQueryServlet(RestServlet): } } } } } } """ - PATTERNS = client_v2_patterns( - "/keys/query$", - releases=() - ) + PATTERNS = client_v2_patterns("/keys/query$") def __init__(self, hs): """ @@ -160,10 +156,7 @@ class KeyChangesServlet(RestServlet): 200 OK { "changed": ["@foo:example.com"] } """ - PATTERNS = client_v2_patterns( - "/keys/changes$", - releases=() - ) + PATTERNS = client_v2_patterns("/keys/changes$") def __init__(self, hs): """ @@ -213,10 +206,7 @@ class OneTimeKeyServlet(RestServlet): } } } } """ - PATTERNS = client_v2_patterns( - "/keys/claim$", - releases=() - ) + PATTERNS = client_v2_patterns("/keys/claim$") def __init__(self, hs): super(OneTimeKeyServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index fd2a3d69d4..ec170109fe 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -30,7 +30,7 @@ logger = logging.getLogger(__name__) class NotificationsServlet(RestServlet): - PATTERNS = client_v2_patterns("/notifications$", releases=()) + PATTERNS = client_v2_patterns("/notifications$") def __init__(self, hs): super(NotificationsServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index d607bd2970..90bdb1db15 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -29,7 +29,7 @@ logger = logging.getLogger(__name__) class SendToDeviceRestServlet(servlet.RestServlet): PATTERNS = client_v2_patterns( "/sendToDevice/(?P[^/]*)/(?P[^/]*)$", - releases=[], v2_alpha=False + v2_alpha=False ) def __init__(self, hs): From 8299b323eee11dbfebb7c97bfcd16281b874be1d Mon Sep 17 00:00:00 2001 From: Krombel Date: Thu, 26 Oct 2017 16:58:20 +0200 Subject: [PATCH 0338/1637] add release endpoints for /thirdparty --- synapse/rest/client/v2_alpha/thirdparty.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 6fceb23e26..6773b9ba60 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -26,7 +26,7 @@ logger = logging.getLogger(__name__) class ThirdPartyProtocolsServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/protocols", releases=()) + PATTERNS = client_v2_patterns("/thirdparty/protocols") def __init__(self, hs): super(ThirdPartyProtocolsServlet, self).__init__() @@ -43,8 +43,7 @@ class ThirdPartyProtocolsServlet(RestServlet): class ThirdPartyProtocolServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/protocol/(?P[^/]+)$", - releases=()) + PATTERNS = client_v2_patterns("/thirdparty/protocol/(?P[^/]+)$") def __init__(self, hs): super(ThirdPartyProtocolServlet, self).__init__() @@ -66,8 +65,7 @@ class ThirdPartyProtocolServlet(RestServlet): class ThirdPartyUserServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/user(/(?P[^/]+))?$", - releases=()) + PATTERNS = client_v2_patterns("/thirdparty/user(/(?P[^/]+))?$") def __init__(self, hs): super(ThirdPartyUserServlet, self).__init__() @@ -90,8 +88,7 @@ class ThirdPartyUserServlet(RestServlet): class ThirdPartyLocationServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/location(/(?P[^/]+))?$", - releases=()) + PATTERNS = client_v2_patterns("/thirdparty/location(/(?P[^/]+))?$") def __init__(self, hs): super(ThirdPartyLocationServlet, self).__init__() From f7f90e0c8da613a833e5dcd3fa130a986fa5475c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Oct 2017 16:45:20 +0100 Subject: [PATCH 0339/1637] Fix error when running synapse with no logfile Fixes 'UnboundLocalError: local variable 'sighup' referenced before assignment' --- synapse/config/logger.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 2dbeafa9dd..a1d6e4d4f7 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -148,8 +148,8 @@ def setup_logging(config, use_worker_options=False): "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) - if log_config is None: + if log_config is None: level = logging.INFO level_for_storage = logging.INFO if config.verbosity: @@ -176,6 +176,10 @@ def setup_logging(config, use_worker_options=False): logger.info("Opened new log file due to SIGHUP") else: handler = logging.StreamHandler() + + def sighup(signum, stack): + pass + handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) From 9b2feef9eb9502bf07d51378c75fc6b690a15676 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 26 Oct 2017 16:51:32 +0100 Subject: [PATCH 0340/1637] Add is_public to groups table to allow for private groups Prevent group API access to non-members for private groups Also make all the group code paths consistent with `requester_user_id` always being the User ID of the requesting user. --- synapse/groups/groups_server.py | 114 +++++++++--------- synapse/rest/client/v2_alpha/groups.py | 80 ++++++------ .../storage/schema/delta/46/group_server.sql | 17 +++ 3 files changed, 116 insertions(+), 95 deletions(-) create mode 100644 synapse/storage/schema/delta/46/group_server.sql diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 23beb3187e..91c0b26107 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -49,7 +49,7 @@ class GroupsServerHandler(object): hs.get_groups_attestation_renewer() @defer.inlineCallbacks - def check_group_is_ours(self, group_id, and_exists=False, and_is_admin=None): + def check_group_is_ours(self, group_id, requester_user_id, and_exists=False, and_is_admin=None): """Check that the group is ours, and optionally if it exists. If group does exist then return group. @@ -67,6 +67,10 @@ class GroupsServerHandler(object): if and_exists and not group: raise SynapseError(404, "Unknown group") + is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) + if is_user_in_group or not group.is_public: + raise SynapseError(404, "Unknown group") + if and_is_admin: is_admin = yield self.store.is_user_admin_in_group(group_id, and_is_admin) if not is_admin: @@ -84,7 +88,7 @@ class GroupsServerHandler(object): A user/room may appear in multiple roles/categories. """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) @@ -153,10 +157,10 @@ class GroupsServerHandler(object): }) @defer.inlineCallbacks - def update_group_summary_room(self, group_id, user_id, room_id, category_id, content): + def update_group_summary_room(self, group_id, requester_user_id, room_id, category_id, content): """Add/update a room to the group summary """ - yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) RoomID.from_string(room_id) # Ensure valid room id @@ -175,10 +179,10 @@ class GroupsServerHandler(object): defer.returnValue({}) @defer.inlineCallbacks - def delete_group_summary_room(self, group_id, user_id, room_id, category_id): + def delete_group_summary_room(self, group_id, requester_user_id, room_id, category_id): """Remove a room from the summary """ - yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) yield self.store.remove_room_from_summary( group_id=group_id, @@ -189,10 +193,10 @@ class GroupsServerHandler(object): defer.returnValue({}) @defer.inlineCallbacks - def get_group_categories(self, group_id, user_id): + def get_group_categories(self, group_id, requester_user_id): """Get all categories in a group (as seen by user) """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) categories = yield self.store.get_group_categories( group_id=group_id, @@ -200,10 +204,10 @@ class GroupsServerHandler(object): defer.returnValue({"categories": categories}) @defer.inlineCallbacks - def get_group_category(self, group_id, user_id, category_id): + def get_group_category(self, group_id, requester_user_id, category_id): """Get a specific category in a group (as seen by user) """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) res = yield self.store.get_group_category( group_id=group_id, @@ -213,10 +217,10 @@ class GroupsServerHandler(object): defer.returnValue(res) @defer.inlineCallbacks - def update_group_category(self, group_id, user_id, category_id, content): + def update_group_category(self, group_id, requester_user_id, category_id, content): """Add/Update a group category """ - yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) is_public = _parse_visibility_from_contents(content) profile = content.get("profile") @@ -231,10 +235,10 @@ class GroupsServerHandler(object): defer.returnValue({}) @defer.inlineCallbacks - def delete_group_category(self, group_id, user_id, category_id): + def delete_group_category(self, group_id, requester_user_id, category_id): """Delete a group category """ - yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) yield self.store.remove_group_category( group_id=group_id, @@ -244,10 +248,10 @@ class GroupsServerHandler(object): defer.returnValue({}) @defer.inlineCallbacks - def get_group_roles(self, group_id, user_id): + def get_group_roles(self, group_id, requester_user_id): """Get all roles in a group (as seen by user) """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) roles = yield self.store.get_group_roles( group_id=group_id, @@ -255,10 +259,10 @@ class GroupsServerHandler(object): defer.returnValue({"roles": roles}) @defer.inlineCallbacks - def get_group_role(self, group_id, user_id, role_id): + def get_group_role(self, group_id, requester_user_id, role_id): """Get a specific role in a group (as seen by user) """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) res = yield self.store.get_group_role( group_id=group_id, @@ -267,10 +271,10 @@ class GroupsServerHandler(object): defer.returnValue(res) @defer.inlineCallbacks - def update_group_role(self, group_id, user_id, role_id, content): + def update_group_role(self, group_id, requester_user_id, role_id, content): """Add/update a role in a group """ - yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) is_public = _parse_visibility_from_contents(content) @@ -286,10 +290,10 @@ class GroupsServerHandler(object): defer.returnValue({}) @defer.inlineCallbacks - def delete_group_role(self, group_id, user_id, role_id): + def delete_group_role(self, group_id, requester_user_id, role_id): """Remove role from group """ - yield self.check_group_is_ours(group_id, and_exists=True, and_is_admin=user_id) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) yield self.store.remove_group_role( group_id=group_id, @@ -304,7 +308,7 @@ class GroupsServerHandler(object): """Add/update a users entry in the group summary """ yield self.check_group_is_ours( - group_id, and_exists=True, and_is_admin=requester_user_id, + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id, ) order = content.get("order", None) @@ -326,7 +330,7 @@ class GroupsServerHandler(object): """Remove a user from the group summary """ yield self.check_group_is_ours( - group_id, and_exists=True, and_is_admin=requester_user_id, + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id, ) yield self.store.remove_user_from_summary( @@ -342,7 +346,7 @@ class GroupsServerHandler(object): """Get the group profile as seen by requester_user_id """ - yield self.check_group_is_ours(group_id) + yield self.check_group_is_ours(group_id, requester_user_id) group_description = yield self.store.get_group(group_id) @@ -356,7 +360,7 @@ class GroupsServerHandler(object): """Update the group profile """ yield self.check_group_is_ours( - group_id, and_exists=True, and_is_admin=requester_user_id, + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id, ) profile = {} @@ -377,7 +381,7 @@ class GroupsServerHandler(object): The ordering is arbitrary at the moment """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) @@ -425,7 +429,7 @@ class GroupsServerHandler(object): The ordering is arbitrary at the moment """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) @@ -459,7 +463,7 @@ class GroupsServerHandler(object): This returns rooms in order of decreasing number of joined users """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) @@ -500,7 +504,7 @@ class GroupsServerHandler(object): RoomID.from_string(room_id) # Ensure valid room id yield self.check_group_is_ours( - group_id, and_exists=True, and_is_admin=requester_user_id + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id ) is_public = _parse_visibility_from_contents(content) @@ -514,7 +518,7 @@ class GroupsServerHandler(object): """Remove room from group """ yield self.check_group_is_ours( - group_id, and_exists=True, and_is_admin=requester_user_id + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id ) yield self.store.remove_room_from_group(group_id, room_id) @@ -527,7 +531,7 @@ class GroupsServerHandler(object): """ group = yield self.check_group_is_ours( - group_id, and_exists=True, and_is_admin=requester_user_id + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id ) # TODO: Check if user knocked @@ -596,35 +600,35 @@ class GroupsServerHandler(object): raise SynapseError(502, "Unknown state returned by HS") @defer.inlineCallbacks - def accept_invite(self, group_id, user_id, content): + def accept_invite(self, group_id, requester_user_id, content): """User tries to accept an invite to the group. This is different from them asking to join, and so should error if no invite exists (and they're not a member of the group) """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - if not self.store.is_user_invited_to_local_group(group_id, user_id): + if not self.store.is_user_invited_to_local_group(group_id, requester_user_id): raise SynapseError(403, "User not invited to group") - if not self.hs.is_mine_id(user_id): + if not self.hs.is_mine_id(requester_user_id): remote_attestation = content["attestation"] yield self.attestations.verify_attestation( remote_attestation, - user_id=user_id, + user_id=requester_user_id, group_id=group_id, ) else: remote_attestation = None - local_attestation = self.attestations.create_attestation(group_id, user_id) + local_attestation = self.attestations.create_attestation(group_id, requester_user_id) is_public = _parse_visibility_from_contents(content) yield self.store.add_user_to_group( - group_id, user_id, + group_id, requester_user_id, is_admin=False, is_public=is_public, local_attestation=local_attestation, @@ -637,31 +641,31 @@ class GroupsServerHandler(object): }) @defer.inlineCallbacks - def knock(self, group_id, user_id, content): + def knock(self, group_id, requester_user_id, content): """A user requests becoming a member of the group """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) raise NotImplementedError() @defer.inlineCallbacks - def accept_knock(self, group_id, user_id, content): + def accept_knock(self, group_id, requester_user_id, content): """Accept a users knock to the room. Errors if the user hasn't knocked, rather than inviting them. """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) raise NotImplementedError() @defer.inlineCallbacks def remove_user_from_group(self, group_id, user_id, requester_user_id, content): - """Remove a user from the group; either a user is leaving or and admin - kicked htem. + """Remove a user from the group; either a user is leaving or an admin + kicked them. """ - yield self.check_group_is_ours(group_id, and_exists=True) + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) is_kick = False if requester_user_id != user_id: @@ -692,7 +696,7 @@ class GroupsServerHandler(object): defer.returnValue({}) @defer.inlineCallbacks - def create_group(self, group_id, user_id, content): + def create_group(self, group_id, requester_user_id, content): group = yield self.check_group_is_ours(group_id) logger.info("Attempting to create group with ID: %r", group_id) @@ -703,7 +707,7 @@ class GroupsServerHandler(object): if group: raise SynapseError(400, "Group already exists") - is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) + is_admin = yield self.auth.is_server_admin(UserID.from_string(requester_user_id)) if not is_admin: if not self.hs.config.enable_group_creation: raise SynapseError( @@ -727,38 +731,38 @@ class GroupsServerHandler(object): yield self.store.create_group( group_id, - user_id, + requester_user_id, name=name, avatar_url=avatar_url, short_description=short_description, long_description=long_description, ) - if not self.hs.is_mine_id(user_id): + if not self.hs.is_mine_id(requester_user_id): remote_attestation = content["attestation"] yield self.attestations.verify_attestation( remote_attestation, - user_id=user_id, + user_id=requester_user_id, group_id=group_id, ) - local_attestation = self.attestations.create_attestation(group_id, user_id) + local_attestation = self.attestations.create_attestation(group_id, requester_user_id) else: local_attestation = None remote_attestation = None yield self.store.add_user_to_group( - group_id, user_id, + group_id, requester_user_id, is_admin=True, is_public=True, # TODO local_attestation=local_attestation, remote_attestation=remote_attestation, ) - if not self.hs.is_mine_id(user_id): + if not self.hs.is_mine_id(requester_user_id): yield self.store.add_remote_profile_cache( - user_id, + requester_user_id, displayname=user_profile.get("displayname"), avatar_url=user_profile.get("avatar_url"), ) diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 100f47ca9e..05a40d6941 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -39,20 +39,20 @@ class GroupServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, group_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() - group_description = yield self.groups_handler.get_group_profile(group_id, user_id) + group_description = yield self.groups_handler.get_group_profile(group_id, requester_user_id) defer.returnValue((200, group_description)) @defer.inlineCallbacks def on_POST(self, request, group_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() content = parse_json_object_from_request(request) yield self.groups_handler.update_group_profile( - group_id, user_id, content, + group_id, requester_user_id, content, ) defer.returnValue((200, {})) @@ -72,9 +72,9 @@ class GroupSummaryServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, group_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() - get_group_summary = yield self.groups_handler.get_group_summary(group_id, user_id) + get_group_summary = yield self.groups_handler.get_group_summary(group_id, requester_user_id) defer.returnValue((200, get_group_summary)) @@ -101,11 +101,11 @@ class GroupSummaryRoomsCatServlet(RestServlet): @defer.inlineCallbacks def on_PUT(self, request, group_id, category_id, room_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() content = parse_json_object_from_request(request) resp = yield self.groups_handler.update_group_summary_room( - group_id, user_id, + group_id, requester_user_id, room_id=room_id, category_id=category_id, content=content, @@ -116,10 +116,10 @@ class GroupSummaryRoomsCatServlet(RestServlet): @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id, room_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() resp = yield self.groups_handler.delete_group_summary_room( - group_id, user_id, + group_id, requester_user_id, room_id=room_id, category_id=category_id, ) @@ -143,10 +143,10 @@ class GroupCategoryServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, group_id, category_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() category = yield self.groups_handler.get_group_category( - group_id, user_id, + group_id, requester_user_id, category_id=category_id, ) @@ -155,11 +155,11 @@ class GroupCategoryServlet(RestServlet): @defer.inlineCallbacks def on_PUT(self, request, group_id, category_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() content = parse_json_object_from_request(request) resp = yield self.groups_handler.update_group_category( - group_id, user_id, + group_id, requester_user_id, category_id=category_id, content=content, ) @@ -169,10 +169,10 @@ class GroupCategoryServlet(RestServlet): @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() resp = yield self.groups_handler.delete_group_category( - group_id, user_id, + group_id, requester_user_id, category_id=category_id, ) @@ -195,10 +195,10 @@ class GroupCategoriesServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, group_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() category = yield self.groups_handler.get_group_categories( - group_id, user_id, + group_id, requester_user_id, ) defer.returnValue((200, category)) @@ -220,10 +220,10 @@ class GroupRoleServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, group_id, role_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() category = yield self.groups_handler.get_group_role( - group_id, user_id, + group_id, requester_user_id, role_id=role_id, ) @@ -232,11 +232,11 @@ class GroupRoleServlet(RestServlet): @defer.inlineCallbacks def on_PUT(self, request, group_id, role_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() content = parse_json_object_from_request(request) resp = yield self.groups_handler.update_group_role( - group_id, user_id, + group_id, requester_user_id, role_id=role_id, content=content, ) @@ -246,10 +246,10 @@ class GroupRoleServlet(RestServlet): @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() resp = yield self.groups_handler.delete_group_role( - group_id, user_id, + group_id, requester_user_id, role_id=role_id, ) @@ -272,10 +272,10 @@ class GroupRolesServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, group_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() category = yield self.groups_handler.get_group_roles( - group_id, user_id, + group_id, requester_user_id, ) defer.returnValue((200, category)) @@ -343,9 +343,9 @@ class GroupRoomServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, group_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() - result = yield self.groups_handler.get_rooms_in_group(group_id, user_id) + result = yield self.groups_handler.get_rooms_in_group(group_id, requester_user_id) defer.returnValue((200, result)) @@ -364,9 +364,9 @@ class GroupUsersServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, group_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() - result = yield self.groups_handler.get_users_in_group(group_id, user_id) + result = yield self.groups_handler.get_users_in_group(group_id, requester_user_id) defer.returnValue((200, result)) @@ -385,9 +385,9 @@ class GroupInvitedUsersServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, group_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() - result = yield self.groups_handler.get_invited_users_in_group(group_id, user_id) + result = yield self.groups_handler.get_invited_users_in_group(group_id, requester_user_id) defer.returnValue((200, result)) @@ -407,14 +407,14 @@ class GroupCreateServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() # TODO: Create group on remote server content = parse_json_object_from_request(request) localpart = content.pop("localpart") group_id = GroupID(localpart, self.server_name).to_string() - result = yield self.groups_handler.create_group(group_id, user_id, content) + result = yield self.groups_handler.create_group(group_id, requester_user_id, content) defer.returnValue((200, result)) @@ -435,11 +435,11 @@ class GroupAdminRoomsServlet(RestServlet): @defer.inlineCallbacks def on_PUT(self, request, group_id, room_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() content = parse_json_object_from_request(request) result = yield self.groups_handler.add_room_to_group( - group_id, user_id, room_id, content, + group_id, requester_user_id, room_id, content, ) defer.returnValue((200, result)) @@ -447,10 +447,10 @@ class GroupAdminRoomsServlet(RestServlet): @defer.inlineCallbacks def on_DELETE(self, request, group_id, room_id): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() result = yield self.groups_handler.remove_room_from_group( - group_id, user_id, room_id, + group_id, requester_user_id, room_id, ) defer.returnValue((200, result)) @@ -685,9 +685,9 @@ class GroupsForUserServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester_user_id = requester.user.to_string() - result = yield self.groups_handler.get_joined_groups(user_id) + result = yield self.groups_handler.get_joined_groups(requester_user_id) defer.returnValue((200, result)) diff --git a/synapse/storage/schema/delta/46/group_server.sql b/synapse/storage/schema/delta/46/group_server.sql new file mode 100644 index 0000000000..23ee1194d3 --- /dev/null +++ b/synapse/storage/schema/delta/46/group_server.sql @@ -0,0 +1,17 @@ +/* Copyright 2017 Vector Creations Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- whether non-members can access group APIs +ALTER TABLE groups ADD COLUMN is_public BOOL DEFAULT 1 NOT NULL; From 595fe67f01d73f3a1ccddaf9922ac8c7e7e367cb Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 26 Oct 2017 17:20:24 +0100 Subject: [PATCH 0341/1637] delint --- synapse/groups/groups_server.py | 61 +++++++++++++++++++++----- synapse/rest/client/v2_alpha/groups.py | 21 +++++++-- 2 files changed, 67 insertions(+), 15 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 91c0b26107..75634febd0 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -49,7 +49,8 @@ class GroupsServerHandler(object): hs.get_groups_attestation_renewer() @defer.inlineCallbacks - def check_group_is_ours(self, group_id, requester_user_id, and_exists=False, and_is_admin=None): + def check_group_is_ours(self, group_id, requester_user_id, + and_exists=False, and_is_admin=None): """Check that the group is ours, and optionally if it exists. If group does exist then return group. @@ -157,10 +158,16 @@ class GroupsServerHandler(object): }) @defer.inlineCallbacks - def update_group_summary_room(self, group_id, requester_user_id, room_id, category_id, content): + def update_group_summary_room(self, group_id, requester_user_id, + room_id, category_id, content): """Add/update a room to the group summary """ - yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) + yield self.check_group_is_ours( + group_id, + requester_user_id, + and_exists=True, + and_is_admin=requester_user_id, + ) RoomID.from_string(room_id) # Ensure valid room id @@ -179,10 +186,16 @@ class GroupsServerHandler(object): defer.returnValue({}) @defer.inlineCallbacks - def delete_group_summary_room(self, group_id, requester_user_id, room_id, category_id): + def delete_group_summary_room(self, group_id, requester_user_id, + room_id, category_id): """Remove a room from the summary """ - yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) + yield self.check_group_is_ours( + group_id, + requester_user_id, + and_exists=True, + and_is_admin=requester_user_id, + ) yield self.store.remove_room_from_summary( group_id=group_id, @@ -220,7 +233,12 @@ class GroupsServerHandler(object): def update_group_category(self, group_id, requester_user_id, category_id, content): """Add/Update a group category """ - yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) + yield self.check_group_is_ours( + group_id, + requester_user_id, + and_exists=True, + and_is_admin=requester_user_id, + ) is_public = _parse_visibility_from_contents(content) profile = content.get("profile") @@ -238,7 +256,12 @@ class GroupsServerHandler(object): def delete_group_category(self, group_id, requester_user_id, category_id): """Delete a group category """ - yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) + yield self.check_group_is_ours( + group_id, + requester_user_id, + and_exists=True, + and_is_admin=requester_user_id + ) yield self.store.remove_group_category( group_id=group_id, @@ -274,7 +297,12 @@ class GroupsServerHandler(object): def update_group_role(self, group_id, requester_user_id, role_id, content): """Add/update a role in a group """ - yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) + yield self.check_group_is_ours( + group_id, + requester_user_id, + and_exists=True, + and_is_admin=requester_user_id, + ) is_public = _parse_visibility_from_contents(content) @@ -293,7 +321,12 @@ class GroupsServerHandler(object): def delete_group_role(self, group_id, requester_user_id, role_id): """Remove role from group """ - yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id) + yield self.check_group_is_ours( + group_id, + requester_user_id, + and_exists=True, + and_is_admin=requester_user_id, + ) yield self.store.remove_group_role( group_id=group_id, @@ -623,7 +656,10 @@ class GroupsServerHandler(object): else: remote_attestation = None - local_attestation = self.attestations.create_attestation(group_id, requester_user_id) + local_attestation = self.attestations.create_attestation( + group_id, + requester_user_id, + ) is_public = _parse_visibility_from_contents(content) @@ -747,7 +783,10 @@ class GroupsServerHandler(object): group_id=group_id, ) - local_attestation = self.attestations.create_attestation(group_id, requester_user_id) + local_attestation = self.attestations.create_attestation( + group_id, + requester_user_id, + ) else: local_attestation = None remote_attestation = None diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 05a40d6941..c97885cfc7 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -41,7 +41,10 @@ class GroupServlet(RestServlet): requester = yield self.auth.get_user_by_req(request) requester_user_id = requester.user.to_string() - group_description = yield self.groups_handler.get_group_profile(group_id, requester_user_id) + group_description = yield self.groups_handler.get_group_profile( + group_id, + requester_user_id, + ) defer.returnValue((200, group_description)) @@ -74,7 +77,10 @@ class GroupSummaryServlet(RestServlet): requester = yield self.auth.get_user_by_req(request) requester_user_id = requester.user.to_string() - get_group_summary = yield self.groups_handler.get_group_summary(group_id, requester_user_id) + get_group_summary = yield self.groups_handler.get_group_summary( + group_id, + requester_user_id, + ) defer.returnValue((200, get_group_summary)) @@ -387,7 +393,10 @@ class GroupInvitedUsersServlet(RestServlet): requester = yield self.auth.get_user_by_req(request) requester_user_id = requester.user.to_string() - result = yield self.groups_handler.get_invited_users_in_group(group_id, requester_user_id) + result = yield self.groups_handler.get_invited_users_in_group( + group_id, + requester_user_id, + ) defer.returnValue((200, result)) @@ -414,7 +423,11 @@ class GroupCreateServlet(RestServlet): localpart = content.pop("localpart") group_id = GroupID(localpart, self.server_name).to_string() - result = yield self.groups_handler.create_group(group_id, requester_user_id, content) + result = yield self.groups_handler.create_group( + group_id, + requester_user_id, + content, + ) defer.returnValue((200, result)) From cfa4e658e0cf0ba3286116a3f71635a2142496d8 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 26 Oct 2017 17:23:49 +0100 Subject: [PATCH 0342/1637] Bump schema version to 46 --- synapse/storage/prepare_database.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 817c2185c8..a4e08e6757 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 45 +SCHEMA_VERSION = 46 dir_path = os.path.abspath(os.path.dirname(__file__)) From e86cefcb6f594bf66bde577899e996b5c75fc63f Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 26 Oct 2017 17:24:54 +0100 Subject: [PATCH 0343/1637] Add groups table to BOOLEAN_COLUMNS in synapse_port_db --- scripts/synapse_port_db | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index a7a50e4d36..d6d8ee50cb 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -42,6 +42,7 @@ BOOLEAN_COLUMNS = { "public_room_list_stream": ["visibility"], "device_lists_outbound_pokes": ["sent"], "users_who_share_rooms": ["share_private"], + "groups": ["is_public"], } From 713e60b9b6658d611b399d80a6ae429946713689 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 26 Oct 2017 17:38:14 +0100 Subject: [PATCH 0344/1637] Awful hack to get default true --- synapse/storage/schema/delta/46/group_server.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/46/group_server.sql b/synapse/storage/schema/delta/46/group_server.sql index 23ee1194d3..a892cff7e5 100644 --- a/synapse/storage/schema/delta/46/group_server.sql +++ b/synapse/storage/schema/delta/46/group_server.sql @@ -14,4 +14,5 @@ */ -- whether non-members can access group APIs -ALTER TABLE groups ADD COLUMN is_public BOOL DEFAULT 1 NOT NULL; +-- NB: awful hack to get the default to be true on postgres and 1 on sqlite +ALTER TABLE groups ADD COLUMN is_public BOOL DEFAULT (1=1) NOT NULL; From 007cd48af67576df23e988ea8a4abcbc64396c6a Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 26 Oct 2017 17:55:22 +0100 Subject: [PATCH 0345/1637] Recreate groups table instead of adding column Adding a column with non-constant default not possible in sqlite3 --- .../storage/schema/delta/46/group_server.sql | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/synapse/storage/schema/delta/46/group_server.sql b/synapse/storage/schema/delta/46/group_server.sql index a892cff7e5..e754b554f8 100644 --- a/synapse/storage/schema/delta/46/group_server.sql +++ b/synapse/storage/schema/delta/46/group_server.sql @@ -13,6 +13,20 @@ * limitations under the License. */ --- whether non-members can access group APIs +CREATE TABLE groups_new ( + group_id TEXT NOT NULL, + name TEXT, -- the display name of the room + avatar_url TEXT, + short_description TEXT, + long_description TEXT, + is_public BOOL NOT NULL -- whether non-members can access group APIs +); + -- NB: awful hack to get the default to be true on postgres and 1 on sqlite -ALTER TABLE groups ADD COLUMN is_public BOOL DEFAULT (1=1) NOT NULL; +INSERT INTO groups_new + SELECT group_id, name, avatar_url, short_description, long_description, (1=1) FROM groups; + +DROP TABLE groups; +ALTER TABLE groups_new RENAME TO groups; + +CREATE UNIQUE INDEX groups_idx ON groups(group_id); From 69e8a05f355f24bb1377b7d39812f98ea9f28bb4 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 26 Oct 2017 17:55:58 +0100 Subject: [PATCH 0346/1637] Make it work --- synapse/groups/groups_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 75634febd0..eac2f41768 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -69,7 +69,7 @@ class GroupsServerHandler(object): raise SynapseError(404, "Unknown group") is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) - if is_user_in_group or not group.is_public: + if not is_user_in_group or not group.is_public: raise SynapseError(404, "Unknown group") if and_is_admin: From 12ef02dc3d9d9244447e8ef073dcd7cae67f85e5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Oct 2017 17:59:50 +0100 Subject: [PATCH 0347/1637] SimpleHTTPClient: add support for headers Sometimes we need to pass headers into these methods --- synapse/http/client.py | 97 ++++++++++++++++++++++++++++++------------ 1 file changed, 69 insertions(+), 28 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index 9eba046bbf..e96c027d75 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -114,19 +114,23 @@ class SimpleHttpClient(object): raise e @defer.inlineCallbacks - def post_urlencoded_get_json(self, uri, args={}): + def post_urlencoded_get_json(self, uri, args={}, headers=None): # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) query_bytes = urllib.urlencode(encode_urlencode_args(args), True) + actual_headers = { + b"Content-Type": [b"application/x-www-form-urlencoded"], + b"User-Agent": [self.user_agent], + } + if headers: + actual_headers.update(headers) + response = yield self.request( "POST", uri.encode("ascii"), - headers=Headers({ - b"Content-Type": [b"application/x-www-form-urlencoded"], - b"User-Agent": [self.user_agent], - }), + headers=Headers(actual_headers), bodyProducer=FileBodyProducer(StringIO(query_bytes)) ) @@ -135,18 +139,33 @@ class SimpleHttpClient(object): defer.returnValue(json.loads(body)) @defer.inlineCallbacks - def post_json_get_json(self, uri, post_json): + def post_json_get_json(self, uri, post_json, headers=None): + """ + + Args: + uri (str): + post_json (object): + headers (dict[str, List[str]]|None): If not None, a map from + header name to a list of values for that header + + Returns: + Deferred[object]: parsed json + """ json_str = encode_canonical_json(post_json) logger.debug("HTTP POST %s -> %s", json_str, uri) + actual_headers = { + b"Content-Type": [b"application/json"], + b"User-Agent": [self.user_agent], + } + if headers: + actual_headers.update(headers) + response = yield self.request( "POST", uri.encode("ascii"), - headers=Headers({ - b"Content-Type": [b"application/json"], - b"User-Agent": [self.user_agent], - }), + headers=Headers(actual_headers), bodyProducer=FileBodyProducer(StringIO(json_str)) ) @@ -160,7 +179,7 @@ class SimpleHttpClient(object): defer.returnValue(json.loads(body)) @defer.inlineCallbacks - def get_json(self, uri, args={}): + def get_json(self, uri, args={}, headers=None): """ Gets some json from the given URI. Args: @@ -169,6 +188,8 @@ class SimpleHttpClient(object): None. **Note**: The value of each key is assumed to be an iterable and *not* a string. + headers (dict[str, List[str]]|None): If not None, a map from + header name to a list of values for that header Returns: Deferred: Succeeds when we get *any* 2xx HTTP response, with the HTTP body as JSON. @@ -177,13 +198,13 @@ class SimpleHttpClient(object): error message. """ try: - body = yield self.get_raw(uri, args) + body = yield self.get_raw(uri, args, headers=headers) defer.returnValue(json.loads(body)) except CodeMessageException as e: raise self._exceptionFromFailedRequest(e.code, e.msg) @defer.inlineCallbacks - def put_json(self, uri, json_body, args={}): + def put_json(self, uri, json_body, args={}, headers=None): """ Puts some json to the given URI. Args: @@ -193,6 +214,8 @@ class SimpleHttpClient(object): None. **Note**: The value of each key is assumed to be an iterable and *not* a string. + headers (dict[str, List[str]]|None): If not None, a map from + header name to a list of values for that header Returns: Deferred: Succeeds when we get *any* 2xx HTTP response, with the HTTP body as JSON. @@ -205,13 +228,17 @@ class SimpleHttpClient(object): json_str = encode_canonical_json(json_body) + actual_headers = { + b"Content-Type": [b"application/json"], + b"User-Agent": [self.user_agent], + } + if headers: + actual_headers.update(headers) + response = yield self.request( "PUT", uri.encode("ascii"), - headers=Headers({ - b"User-Agent": [self.user_agent], - "Content-Type": ["application/json"] - }), + headers=Headers(actual_headers), bodyProducer=FileBodyProducer(StringIO(json_str)) ) @@ -226,7 +253,7 @@ class SimpleHttpClient(object): raise CodeMessageException(response.code, body) @defer.inlineCallbacks - def get_raw(self, uri, args={}): + def get_raw(self, uri, args={}, headers=None): """ Gets raw text from the given URI. Args: @@ -235,6 +262,8 @@ class SimpleHttpClient(object): None. **Note**: The value of each key is assumed to be an iterable and *not* a string. + headers (dict[str, List[str]]|None): If not None, a map from + header name to a list of values for that header Returns: Deferred: Succeeds when we get *any* 2xx HTTP response, with the HTTP body at text. @@ -246,12 +275,16 @@ class SimpleHttpClient(object): query_bytes = urllib.urlencode(args, True) uri = "%s?%s" % (uri, query_bytes) + actual_headers = { + b"User-Agent": [self.user_agent], + } + if headers: + actual_headers.update(headers) + response = yield self.request( "GET", uri.encode("ascii"), - headers=Headers({ - b"User-Agent": [self.user_agent], - }) + headers=Headers(actual_headers), ) body = yield preserve_context_over_fn(readBody, response) @@ -274,27 +307,33 @@ class SimpleHttpClient(object): # The two should be factored out. @defer.inlineCallbacks - def get_file(self, url, output_stream, max_size=None): + def get_file(self, url, output_stream, max_size=None, headers=None): """GETs a file from a given URL Args: url (str): The URL to GET output_stream (file): File to write the response body to. + headers (dict[str, List[str]]|None): If not None, a map from + header name to a list of values for that header Returns: A (int,dict,string,int) tuple of the file length, dict of the response headers, absolute URI of the response and HTTP response code. """ + actual_headers = { + b"User-Agent": [self.user_agent], + } + if headers: + actual_headers.update(headers) + response = yield self.request( "GET", url.encode("ascii"), - headers=Headers({ - b"User-Agent": [self.user_agent], - }) + headers=Headers(actual_headers), ) - headers = dict(response.headers.getAllRawHeaders()) + resp_headers = dict(response.headers.getAllRawHeaders()) - if 'Content-Length' in headers and headers['Content-Length'] > max_size: + if 'Content-Length' in resp_headers and resp_headers['Content-Length'] > max_size: logger.warn("Requested URL is too large > %r bytes" % (self.max_size,)) raise SynapseError( 502, @@ -327,7 +366,9 @@ class SimpleHttpClient(object): Codes.UNKNOWN, ) - defer.returnValue((length, headers, response.request.absoluteURI, response.code)) + defer.returnValue( + (length, resp_headers, response.request.absoluteURI, response.code), + ) # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient. From 0d8e3ad48b606eac938d09f00b007822d9d11632 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Oct 2017 18:00:47 +0100 Subject: [PATCH 0348/1637] Fix logcontext leaks in httpclient `preserve_context_over_fn` is borked --- synapse/http/client.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index 9eba046bbf..6c7be57b16 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -18,7 +18,7 @@ from OpenSSL.SSL import VERIFY_NONE from synapse.api.errors import ( CodeMessageException, MatrixCodeMessageException, SynapseError, Codes, ) -from synapse.util.logcontext import preserve_context_over_fn +from synapse.util.logcontext import make_deferred_yieldable from synapse.util import logcontext import synapse.metrics from synapse.http.endpoint import SpiderEndpoint @@ -130,7 +130,7 @@ class SimpleHttpClient(object): bodyProducer=FileBodyProducer(StringIO(query_bytes)) ) - body = yield preserve_context_over_fn(readBody, response) + body = yield make_deferred_yieldable(readBody(response)) defer.returnValue(json.loads(body)) @@ -150,7 +150,7 @@ class SimpleHttpClient(object): bodyProducer=FileBodyProducer(StringIO(json_str)) ) - body = yield preserve_context_over_fn(readBody, response) + body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: defer.returnValue(json.loads(body)) @@ -215,7 +215,7 @@ class SimpleHttpClient(object): bodyProducer=FileBodyProducer(StringIO(json_str)) ) - body = yield preserve_context_over_fn(readBody, response) + body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: defer.returnValue(json.loads(body)) @@ -254,7 +254,7 @@ class SimpleHttpClient(object): }) ) - body = yield preserve_context_over_fn(readBody, response) + body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: defer.returnValue(body) @@ -315,10 +315,9 @@ class SimpleHttpClient(object): # straight back in again try: - length = yield preserve_context_over_fn( - _readBodyToFile, - response, output_stream, max_size - ) + length = yield make_deferred_yieldable(_readBodyToFile( + response, output_stream, max_size, + )) except Exception as e: logger.exception("Failed to download body") raise SynapseError( @@ -395,7 +394,7 @@ class CaptchaServerHttpClient(SimpleHttpClient): ) try: - body = yield preserve_context_over_fn(readBody, response) + body = yield make_deferred_yieldable(readBody(response)) defer.returnValue(body) except PartialDownloadError as e: # twisted dislikes google's response, no content length. From 0a5866bec9a06eaed11201ad4506b09f5cd73310 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Oct 2017 18:13:21 +0100 Subject: [PATCH 0349/1637] Support /keys/upload on /r0 as well as /unstable (So that we can stop riot relying on it in /unstable) --- synapse/app/frontend_proxy.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index bee4c47498..3f8c3feeb5 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -50,8 +50,7 @@ logger = logging.getLogger("synapse.app.frontend_proxy") class KeyUploadServlet(RestServlet): - PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$", - releases=()) + PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$") def __init__(self, hs): """ From 54a2525133fdd5cb8de6d7af648c13186969e018 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 26 Oct 2017 18:14:57 +0100 Subject: [PATCH 0350/1637] Front-end proxy: pass through auth header So that access-token-in-an-auth-header works. --- synapse/app/frontend_proxy.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index bee4c47498..d2fb3bc454 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -89,9 +89,16 @@ class KeyUploadServlet(RestServlet): if body: # They're actually trying to upload something, proxy to main synapse. + # Pass through the auth headers, if any, in case the access token + # is there. + auth_headers = request.requestHeaders.getRawHeaders("Authorization", []) + headers = { + "Authorization": auth_headers, + } result = yield self.http_client.post_json_get_json( self.main_uri + request.uri, body, + headers=headers, ) defer.returnValue((200, result)) From 785bd7fd75ffd944f6257185fe3129495e5fa6e7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Oct 2017 00:01:00 +0100 Subject: [PATCH 0351/1637] Allow ASes to deactivate their own users --- synapse/handlers/auth.py | 2 +- synapse/rest/client/v2_alpha/account.py | 48 ++++++++++++++++--------- 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 9cef9d184b..acae4d9e0d 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -82,7 +82,7 @@ class AuthHandler(BaseHandler): def check_auth(self, flows, clientdict, clientip): """ Takes a dictionary sent by the client in the login / registration - protocol and handles the login flow. + protocol and handles the User-Interactive Auth flow. As a side effect, this function fills in the 'creds' key on the user's session with a map, which maps each auth-type (str) to the relevant diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 4990b22b9f..1a0d57a04a 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -13,22 +13,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging from twisted.internet import defer +from synapse.api.auth import has_access_token from synapse.api.constants import LoginType -from synapse.api.errors import LoginError, SynapseError, Codes +from synapse.api.errors import Codes, LoginError, SynapseError from synapse.http.servlet import ( - RestServlet, parse_json_object_from_request, assert_params_in_request + RestServlet, assert_params_in_request, + parse_json_object_from_request, ) from synapse.util.async import run_on_reactor from synapse.util.msisdn import phone_number_to_msisdn - from ._base import client_v2_patterns -import logging - - logger = logging.getLogger(__name__) @@ -172,6 +171,18 @@ class DeactivateAccountRestServlet(RestServlet): def on_POST(self, request): body = parse_json_object_from_request(request) + # if the caller provides an access token, it ought to be valid. + requester = None + if has_access_token(request): + requester = yield self.auth.get_user_by_req( + request, + ) # type: synapse.types.Requester + + # allow ASes to dectivate their own users + if requester and requester.app_service: + yield self._deactivate_account(requester.user.to_string()) + defer.returnValue((200, {})) + authed, result, params, _ = yield self.auth_handler.check_auth([ [LoginType.PASSWORD], ], body, self.hs.get_ip_from_request(request)) @@ -179,27 +190,32 @@ class DeactivateAccountRestServlet(RestServlet): if not authed: defer.returnValue((401, result)) - user_id = None - requester = None - if LoginType.PASSWORD in result: + user_id = result[LoginType.PASSWORD] # if using password, they should also be logged in - requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() - if user_id != result[LoginType.PASSWORD]: + if requester is None: + raise SynapseError( + 400, + "Deactivate account requires an access_token", + errcode=Codes.MISSING_TOKEN + ) + if requester.user.to_string() != user_id: raise LoginError(400, "", Codes.UNKNOWN) else: logger.error("Auth succeeded but no known type!", result.keys()) raise SynapseError(500, "", Codes.UNKNOWN) - # FIXME: Theoretically there is a race here wherein user resets password - # using threepid. + yield self._deactivate_account(user_id) + defer.returnValue((200, {})) + + @defer.inlineCallbacks + def _deactivate_account(self, user_id): + # FIXME: Theoretically there is a race here wherein user resets + # password using threepid. yield self.store.user_delete_access_tokens(user_id) yield self.store.user_delete_threepids(user_id) yield self.store.user_set_password_hash(user_id, None) - defer.returnValue((200, {})) - class EmailThreepidRequestTokenRestServlet(RestServlet): PATTERNS = client_v2_patterns("/account/3pid/email/requestToken$") From 92f680889dc7eac4dc1c1c28318d0bacd4b57c87 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 27 Oct 2017 00:02:22 +0100 Subject: [PATCH 0352/1637] spell out need for libxml2 for lxml to work --- README.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 9da8c7f7a8..6f146b63b1 100644 --- a/README.rst +++ b/README.rst @@ -823,7 +823,9 @@ spidering 'internal' URLs on your network. At the very least we recommend that your loopback and RFC1918 IP addresses are blacklisted. This also requires the optional lxml and netaddr python dependencies to be -installed. +installed. This in turn requires the libxml2 library to be available - on +Debian/Ubuntu this means ``apt-get install libxml2-dev``, or equivalent for +your OS. Password reset From 7a6546228b92723a891758d20c22c11beee0c9f9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Oct 2017 00:04:31 +0100 Subject: [PATCH 0353/1637] Device deletion: check UI auth matches access token (otherwise there's no point in the UI auth) --- synapse/rest/client/v2_alpha/devices.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 2a2438b7dc..5321e5abbb 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -117,6 +117,8 @@ class DeviceRestServlet(servlet.RestServlet): @defer.inlineCallbacks def on_DELETE(self, request, device_id): + requester = yield self.auth.get_user_by_req(request) + try: body = servlet.parse_json_object_from_request(request) @@ -135,11 +137,12 @@ class DeviceRestServlet(servlet.RestServlet): if not authed: defer.returnValue((401, result)) - requester = yield self.auth.get_user_by_req(request) - yield self.device_handler.delete_device( - requester.user.to_string(), - device_id, - ) + # check that the UI auth matched the access token + user_id = result[constants.LoginType.PASSWORD] + if user_id != requester.user.to_string(): + raise errors.AuthError(403, "Invalid auth") + + yield self.device_handler.delete_device(user_id, device_id) defer.returnValue((200, {})) @defer.inlineCallbacks From 585972b51a033d7082b3fba4013ad2ca544c846b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2017 09:44:34 +0100 Subject: [PATCH 0354/1637] Don't generate group attestations for local users --- synapse/groups/groups_server.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 23beb3187e..96f112b580 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -609,6 +609,8 @@ class GroupsServerHandler(object): raise SynapseError(403, "User not invited to group") if not self.hs.is_mine_id(user_id): + local_attestation = self.attestations.create_attestation(group_id, user_id) + remote_attestation = content["attestation"] yield self.attestations.verify_attestation( @@ -617,10 +619,9 @@ class GroupsServerHandler(object): group_id=group_id, ) else: + local_attestation = None remote_attestation = None - local_attestation = self.attestations.create_attestation(group_id, user_id) - is_public = _parse_visibility_from_contents(content) yield self.store.add_user_to_group( From d8dde19f04799270186723f7f35dc32217dda33e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2017 09:55:01 +0100 Subject: [PATCH 0355/1637] Log if we try to do attestations for our own user and group --- synapse/groups/attestations.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index b751cf5e43..2e252b66a7 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -130,10 +130,16 @@ class GroupAttestionRenewer(object): def _renew_attestation(group_id, user_id): attestation = self.attestations.create_attestation(group_id, user_id) - if self.is_mine_id(group_id): + if not self.is_mine_id(group_id): + destination = get_domain_from_id(group_id) + else not self.is_mine_id(user_id): destination = get_domain_from_id(user_id) else: - destination = get_domain_from_id(group_id) + logger.warn( + "Incorrectly trying to do attestations for user: %r in %r", + user_id, group_id, + ) + return yield self.transport_client.renew_group_attestation( destination, group_id, user_id, From 195abfe7a5ec3b0d52812a3d7a04264f97376771 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2017 09:58:13 +0100 Subject: [PATCH 0356/1637] Remove incorrect attestations --- synapse/groups/attestations.py | 1 + synapse/storage/group_server.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 2e252b66a7..0bd73b6a61 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -139,6 +139,7 @@ class GroupAttestionRenewer(object): "Incorrectly trying to do attestations for user: %r in %r", user_id, group_id, ) + yield self.store.remove_attestation_renewal(group_id, user_id) return yield self.transport_client.renew_group_attestation( diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 9e63db5c6c..ed2ee61ad2 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -1086,6 +1086,24 @@ class GroupServerStore(SQLBaseStore): desc="update_remote_attestion", ) + def remove_attestation_renewal(self, group_id, user_id): + """Remove an attestation that we thought we should renew, but actually + shouldn't. Ideally this would never get called as we would never + incorrectly try and do attestations for local users on local groups. + + Args: + group_id (str) + user_id (str) + """ + return self._simple_update_one( + table="_simple_delete", + keyvalues={ + "group_id": group_id, + "user_id": user_id, + }, + desc="remove_attestation_renewal", + ) + @defer.inlineCallbacks def get_remote_attestation(self, group_id, user_id): """Get the attestation that proves the remote agrees that the user is From 82d8c1bacb085588b59021d21cd4df56b0d8411a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2017 10:30:21 +0100 Subject: [PATCH 0357/1637] Fixup --- synapse/groups/attestations.py | 6 +++--- synapse/storage/group_server.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 0bd73b6a61..4656e854f0 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -128,11 +128,9 @@ class GroupAttestionRenewer(object): @defer.inlineCallbacks def _renew_attestation(group_id, user_id): - attestation = self.attestations.create_attestation(group_id, user_id) - if not self.is_mine_id(group_id): destination = get_domain_from_id(group_id) - else not self.is_mine_id(user_id): + elif not self.is_mine_id(user_id): destination = get_domain_from_id(user_id) else: logger.warn( @@ -142,6 +140,8 @@ class GroupAttestionRenewer(object): yield self.store.remove_attestation_renewal(group_id, user_id) return + attestation = self.attestations.create_attestation(group_id, user_id) + yield self.transport_client.renew_group_attestation( destination, group_id, user_id, content={"attestation": attestation}, diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index ed2ee61ad2..ba3f5617fa 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -1095,8 +1095,8 @@ class GroupServerStore(SQLBaseStore): group_id (str) user_id (str) """ - return self._simple_update_one( - table="_simple_delete", + return self._simple_delete( + table="group_attestations_renewals", keyvalues={ "group_id": group_id, "user_id": user_id, From 2ca46c7afcb0e0fe780e2ef2d8cefd34669fb1a9 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 27 Oct 2017 10:48:01 +0100 Subject: [PATCH 0358/1637] Correct logic for checking private group membership --- synapse/groups/groups_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index eac2f41768..054d56abec 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -69,7 +69,7 @@ class GroupsServerHandler(object): raise SynapseError(404, "Unknown group") is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) - if not is_user_in_group or not group.is_public: + if group and not is_user_in_group and not group.is_public: raise SynapseError(404, "Unknown group") if and_is_admin: From e27b76d11728ba0fa2cbbd99ac50d33dee95da63 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2017 10:54:02 +0100 Subject: [PATCH 0359/1637] Import logger --- synapse/groups/attestations.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 4656e854f0..c060cff5dd 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer from synapse.api.errors import SynapseError @@ -22,6 +24,9 @@ from synapse.util.logcontext import preserve_fn from signedjson.sign import sign_json +logger = logging.getLogger(__name__) + + # Default validity duration for new attestations we create DEFAULT_ATTESTATION_LENGTH_MS = 3 * 24 * 60 * 60 * 1000 From c7d9f25d2242db2a5674a76f074858dbcf216d04 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 27 Oct 2017 10:57:20 +0100 Subject: [PATCH 0360/1637] Fix create_group to pass requester_user_id --- synapse/groups/groups_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 054d56abec..175ff433a1 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -733,7 +733,7 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def create_group(self, group_id, requester_user_id, content): - group = yield self.check_group_is_ours(group_id) + group = yield self.check_group_is_ours(group_id, requester_user_id) logger.info("Attempting to create group with ID: %r", group_id) From 173567a7f2fadb96eb580f4e2a5b51bbb2949baa Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Oct 2017 10:59:50 +0100 Subject: [PATCH 0361/1637] Docstring for post_urlencoded_get_json --- synapse/http/client.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/synapse/http/client.py b/synapse/http/client.py index e96c027d75..24830a1526 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -115,6 +115,17 @@ class SimpleHttpClient(object): @defer.inlineCallbacks def post_urlencoded_get_json(self, uri, args={}, headers=None): + """ + Args: + uri (str): + args (dict[str, str|List[str]]): query params + headers (dict[str, List[str]]|None): If not None, a map from + header name to a list of values for that header + + Returns: + Deferred[object]: parsed json + """ + # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) From 6362298fa5cf6d0b80b199372bc6682d3a6b8101 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 27 Oct 2017 11:04:20 +0100 Subject: [PATCH 0362/1637] Create groups with is_public = True --- synapse/storage/group_server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 9e63db5c6c..d2437ff9c5 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -1026,6 +1026,7 @@ class GroupServerStore(SQLBaseStore): "avatar_url": avatar_url, "short_description": short_description, "long_description": long_description, + "is_public": True, }, desc="create_group", ) From 124314672fdc984255277e504215889ebd1de0ed Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 27 Oct 2017 11:08:19 +0100 Subject: [PATCH 0363/1637] group is dict --- synapse/groups/groups_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 175ff433a1..4f9e459136 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -69,7 +69,7 @@ class GroupsServerHandler(object): raise SynapseError(404, "Unknown group") is_user_in_group = yield self.store.is_user_in_group(requester_user_id, group_id) - if group and not is_user_in_group and not group.is_public: + if group and not is_user_in_group and not group["is_public"]: raise SynapseError(404, "Unknown group") if and_is_admin: From 5451cc77926750c7da73202cf3251a72c5a6d497 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 27 Oct 2017 11:27:43 +0100 Subject: [PATCH 0364/1637] Request is_public from database --- synapse/storage/group_server.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index d2437ff9c5..095a3dd382 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -35,7 +35,9 @@ class GroupServerStore(SQLBaseStore): keyvalues={ "group_id": group_id, }, - retcols=("name", "short_description", "long_description", "avatar_url",), + retcols=( + "name", "short_description", "long_description", "avatar_url", "is_public" + ), allow_none=True, desc="is_user_in_group", ) From c067088747bea9b50afb1c1fad94e83bead754e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2017 11:28:12 +0100 Subject: [PATCH 0365/1637] Add comment about attestations --- synapse/groups/attestations.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index b751cf5e43..c52e020989 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -13,6 +13,28 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Attestations ensure that users and groups can't lie about their memberships. + +When a user joins a group the HS and GS swap attestations, which allow them +both to independently prove to third parties their membership.These +attestations have a validity period so need to be periodically renewed. + +If a user leaves (or gets kicked out of) a group, either side can still use +their attestation to "prove" their membership, until the attestation expires. +Therefore attestations shouldn't be relied on to prove membership in important +cases, but can for less important situtations, e.g. showing a users membership +of groups on their profile, showing flairs, etc.abs + +An attestsation is a signed blob of json that looks like: + + { + "user_id": "@foo:a.example.com", + "group_id": "+bar:b.example.com", + "valid_until_ms": 1507994728530, + "signatures":{"matrix.org":{"ed25519:auto":"..."}} + } +""" + from twisted.internet import defer from synapse.api.errors import SynapseError From ca571b0ec3674cd477e9da5f8e9d20c4dfcaf58b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2017 11:57:27 +0100 Subject: [PATCH 0366/1637] Add jitter to validity period of attestations This helps ensure that the renewals of attestations are spread out more evenly. --- synapse/groups/attestations.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index b751cf5e43..fc5f92121e 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import random + from twisted.internet import defer from synapse.api.errors import SynapseError @@ -25,6 +27,11 @@ from signedjson.sign import sign_json # Default validity duration for new attestations we create DEFAULT_ATTESTATION_LENGTH_MS = 3 * 24 * 60 * 60 * 1000 +# We add some jitter to the validity duration of attestations so that if we +# add lots of users at once we don't need to renew them all at once. +# The jitter is a multiplier picked randomly between the first and second number +DEFAULT_ATTESTATION_JITTER = (0.9, 1.3) + # Start trying to update our attestations when they come this close to expiring UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000 @@ -73,10 +80,14 @@ class GroupAttestationSigning(object): """Create an attestation for the group_id and user_id with default validity length. """ + validity_period = DEFAULT_ATTESTATION_LENGTH_MS + validity_period *= random.uniform(*DEFAULT_ATTESTATION_JITTER) + valid_until_ms = int(self.clock.time_msec() + validity_period) + return sign_json({ "group_id": group_id, "user_id": user_id, - "valid_until_ms": self.clock.time_msec() + DEFAULT_ATTESTATION_LENGTH_MS, + "valid_until_ms": valid_until_ms, }, self.server_name, self.signing_key) From af92f5b00fd9cbd8b9124f09384e718fac33a261 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2017 15:07:21 +0100 Subject: [PATCH 0367/1637] Revert "Add jitter to validity period of attestations" --- synapse/groups/attestations.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index fc5f92121e..b751cf5e43 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import random - from twisted.internet import defer from synapse.api.errors import SynapseError @@ -27,11 +25,6 @@ from signedjson.sign import sign_json # Default validity duration for new attestations we create DEFAULT_ATTESTATION_LENGTH_MS = 3 * 24 * 60 * 60 * 1000 -# We add some jitter to the validity duration of attestations so that if we -# add lots of users at once we don't need to renew them all at once. -# The jitter is a multiplier picked randomly between the first and second number -DEFAULT_ATTESTATION_JITTER = (0.9, 1.3) - # Start trying to update our attestations when they come this close to expiring UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000 @@ -80,14 +73,10 @@ class GroupAttestationSigning(object): """Create an attestation for the group_id and user_id with default validity length. """ - validity_period = DEFAULT_ATTESTATION_LENGTH_MS - validity_period *= random.uniform(*DEFAULT_ATTESTATION_JITTER) - valid_until_ms = int(self.clock.time_msec() + validity_period) - return sign_json({ "group_id": group_id, "user_id": user_id, - "valid_until_ms": valid_until_ms, + "valid_until_ms": self.clock.time_msec() + DEFAULT_ATTESTATION_LENGTH_MS, }, self.server_name, self.signing_key) From 977078f06d173771cae66836b23ee76ef1a58e26 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2017 15:10:50 +0100 Subject: [PATCH 0368/1637] Fix bad merge --- synapse/groups/groups_server.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 7406f67d07..b021b7f77f 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -646,7 +646,9 @@ class GroupsServerHandler(object): raise SynapseError(403, "User not invited to group") if not self.hs.is_mine_id(requester_user_id): - local_attestation = self.attestations.create_attestation(group_id, user_id) + local_attestation = self.attestations.create_attestation( + group_id, requester_user_id, + ) remote_attestation = content["attestation"] yield self.attestations.verify_attestation( From d0abb4e8e6d6577bbe07465f8568b4eccef2c9f3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2017 16:57:19 +0100 Subject: [PATCH 0369/1637] Fix typo when checking if user is invited to group --- synapse/groups/groups_server.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index b021b7f77f..cb2ff76a0d 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -642,7 +642,10 @@ class GroupsServerHandler(object): yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - if not self.store.is_user_invited_to_local_group(group_id, requester_user_id): + is_invited = yield self.store.is_user_invited_to_local_group( + group_id, requester_user_id, + ) + if not is_invited: raise SynapseError(403, "User not invited to group") if not self.hs.is_mine_id(requester_user_id): From e51c2bcaef4b15a1e24a31b7edbfefbf93b7c425 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 29 Oct 2017 20:47:06 +0000 Subject: [PATCH 0370/1637] move url_previews to MD as RST does my head in --- docs/{url_previews.rst => url_previews.md} | 2 ++ 1 file changed, 2 insertions(+) rename docs/{url_previews.rst => url_previews.md} (99%) diff --git a/docs/url_previews.rst b/docs/url_previews.md similarity index 99% rename from docs/url_previews.rst rename to docs/url_previews.md index 634d9d907f..665554e165 100644 --- a/docs/url_previews.rst +++ b/docs/url_previews.md @@ -56,6 +56,7 @@ As a first cut, let's do #2 and have the receiver hit the API to calculate its o API --- +``` GET /_matrix/media/r0/preview_url?url=http://wherever.com 200 OK { @@ -66,6 +67,7 @@ GET /_matrix/media/r0/preview_url?url=http://wherever.com "og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”" "og:site_name" : "Twitter" } +``` * Downloads the URL * If HTML, just stores it in RAM and parses it for OG meta tags From 208a6647f13ed508309523aa0ed7b0250c97f886 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 29 Oct 2017 20:54:20 +0000 Subject: [PATCH 0371/1637] fix typo --- synapse/config/cas.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/cas.py b/synapse/config/cas.py index 938f6f25f8..8109e5f95e 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -41,7 +41,7 @@ class CasConfig(Config): #cas_config: # enabled: true # server_url: "https://cas-server.com" - # service_url: "https://homesever.domain.com:8448" + # service_url: "https://homeserver.domain.com:8448" # #required_attributes: # # name: value """ From 9bc17fc5fb8189eb954881b90f4b2f502f303067 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Mon, 30 Oct 2017 15:17:23 +0000 Subject: [PATCH 0372/1637] Fix wording on group creation error --- synapse/groups/groups_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index cb2ff76a0d..dedc9cc7fd 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -749,7 +749,7 @@ class GroupsServerHandler(object): if not is_admin: if not self.hs.config.enable_group_creation: raise SynapseError( - 403, "Only server admin can create group on this server", + 403, "Only a server admin can create groups on this server", ) localpart = group_id_obj.localpart if not localpart.startswith(self.hs.config.group_creation_prefix): From ffc574a6f9697d035051b75edf78896747b4c02f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Oct 2017 17:07:24 +0100 Subject: [PATCH 0373/1637] Clean up backwards-compat hacks for ldap try to make the backwards-compat flows follow the same code paths as the modern impl. This commit should be non-functional. --- synapse/config/password_auth_providers.py | 39 +++++++++++------------ 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py index 90824cab7f..e9828fac17 100644 --- a/synapse/config/password_auth_providers.py +++ b/synapse/config/password_auth_providers.py @@ -13,41 +13,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config, ConfigError +from ._base import Config from synapse.util.module_loader import load_module +LDAP_PROVIDER = 'ldap_auth_provider.LdapAuthProvider' + class PasswordAuthProviderConfig(Config): def read_config(self, config): self.password_providers = [] - - provider_config = None + providers = [] # We want to be backwards compatible with the old `ldap_config` # param. ldap_config = config.get("ldap_config", {}) - self.ldap_enabled = ldap_config.get("enabled", False) - if self.ldap_enabled: - from ldap_auth_provider import LdapAuthProvider - parsed_config = LdapAuthProvider.parse_config(ldap_config) - self.password_providers.append((LdapAuthProvider, parsed_config)) + if ldap_config.get("enabled", False): + providers.append[{ + 'module': LDAP_PROVIDER, + 'config': ldap_config, + }] - providers = config.get("password_providers", []) + providers.extend(config.get("password_providers", [])) for provider in providers: + mod_name = provider['module'] + # This is for backwards compat when the ldap auth provider resided # in this package. - if provider['module'] == "synapse.util.ldap_auth_provider.LdapAuthProvider": - from ldap_auth_provider import LdapAuthProvider - provider_class = LdapAuthProvider - try: - provider_config = provider_class.parse_config(provider["config"]) - except Exception as e: - raise ConfigError( - "Failed to parse config for %r: %r" % (provider['module'], e) - ) - else: - (provider_class, provider_config) = load_module(provider) + if mod_name == "synapse.util.ldap_auth_provider.LdapAuthProvider": + mod_name = LDAP_PROVIDER + + (provider_class, provider_config) = load_module({ + "module": mod_name, + "config": provider['config'], + }) self.password_providers.append((provider_class, provider_config)) From ebda45de4c04d4a3d569e4f2969b798699bd5b16 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 30 Oct 2017 14:41:35 +0000 Subject: [PATCH 0374/1637] Start some documentation on password providers Document the existing interface, before I start adding new stuff. --- docs/password_auth_providers.rst | 39 ++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 docs/password_auth_providers.rst diff --git a/docs/password_auth_providers.rst b/docs/password_auth_providers.rst new file mode 100644 index 0000000000..3da1a67844 --- /dev/null +++ b/docs/password_auth_providers.rst @@ -0,0 +1,39 @@ +Password auth provider modules +============================== + +Password auth providers offer a way for server administrators to integrate +their Synapse installation with an existing authentication system. + +A password auth provider is a Python class which is dynamically loaded into +Synapse, and provides a number of methods by which it can integrate with the +authentication system. + +This document serves as a reference for those looking to implement their own +password auth providers. + +Required methods +---------------- + +Password auth provider classes must provide the following methods: + +*class* ``SomeProvider.parse_config``\(*config*) + + This method is passed the ``config`` object for this module from the + homeserver configuration file. + + It should perform any appropriate sanity checks on the provided + configuration, and return an object which is then passed into ``__init__``. + +*class* ``SomeProvider``\(*config*, *account_handler*) + + The constructor is passed the config object returned by ``parse_config``, + and a ``synapse.handlers.auth._AccountHandler`` object which allows the + password provider to check if accounts exist and/or create new ones. + +``someprovider.check_password``\(*user_id*, *password*) + + This is the method that actually does the work. It is passed a qualified + ``@localpart:domain`` user id, and the password provided by the user. + + The method should return a Twisted ``Deferred`` object, which resolves to + ``True`` if authentication is successful, and ``False`` if not. From 1b65ae00ac7e15767c2e61036ba349170d0e91b7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Oct 2017 10:38:40 +0000 Subject: [PATCH 0375/1637] Refactor some logic from LoginRestServlet into AuthHandler I'm going to need some more flexibility in handling login types in password auth providers, so as a first step, move some stuff from LoginRestServlet into AuthHandler. In particular, we pass everything other than SAML, JWT and token logins down to the AuthHandler, which now has responsibility for checking the login type and fishing the password out of the login dictionary, as well as qualifying the user_id if need be. Ideally SAML, JWT and token would go that way too, but there's no real need for it right now and I'm trying to minimise impact. This commit *should* be non-functional. --- synapse/handlers/auth.py | 82 +++++++++++++++++++++------------ synapse/rest/client/v1/login.py | 55 +++++++++++----------- 2 files changed, 79 insertions(+), 58 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index acae4d9e0d..93d8ac0e04 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -77,6 +77,12 @@ class AuthHandler(BaseHandler): self.hs = hs # FIXME better possibility to access registrationHandler later? self.device_handler = hs.get_device_handler() self.macaroon_gen = hs.get_macaroon_generator() + self._password_enabled = hs.config.password_enabled + + login_types = set() + if self._password_enabled: + login_types.add(LoginType.PASSWORD) + self._supported_login_types = frozenset(login_types) @defer.inlineCallbacks def check_auth(self, flows, clientdict, clientip): @@ -266,10 +272,11 @@ class AuthHandler(BaseHandler): user_id = authdict["user"] password = authdict["password"] - if not user_id.startswith('@'): - user_id = UserID(user_id, self.hs.hostname).to_string() - return self._check_password(user_id, password) + return self.validate_login(user_id, { + "type": LoginType.PASSWORD, + "password": password, + }) @defer.inlineCallbacks def _check_recaptcha(self, authdict, clientip): @@ -398,23 +405,6 @@ class AuthHandler(BaseHandler): return self.sessions[session_id] - def validate_password_login(self, user_id, password): - """ - Authenticates the user with their username and password. - - Used only by the v1 login API. - - Args: - user_id (str): complete @user:id - password (str): Password - Returns: - defer.Deferred: (str) canonical user id - Raises: - StoreError if there was a problem accessing the database - LoginError if there was an authentication problem. - """ - return self._check_password(user_id, password) - @defer.inlineCallbacks def get_access_token_for_user_id(self, user_id, device_id=None, initial_display_name=None): @@ -501,26 +491,60 @@ class AuthHandler(BaseHandler): ) defer.returnValue(result) - @defer.inlineCallbacks - def _check_password(self, user_id, password): - """Authenticate a user against the LDAP and local databases. + def get_supported_login_types(self): + """Get a the login types supported for the /login API - user_id is checked case insensitively against the local database, but - will throw if there are multiple inexact matches. + By default this is just 'm.login.password' (unless password_enabled is + False in the config file), but password auth providers can provide + other login types. + + Returns: + Iterable[str]: login types + """ + return self._supported_login_types + + @defer.inlineCallbacks + def validate_login(self, user_id, login_submission): + """Authenticates the user for the /login API + + Also used by the user-interactive auth flow to validate + m.login.password auth types. Args: - user_id (str): complete @user:id + user_id (str): user_id supplied by the user + login_submission (dict): the whole of the login submission + (including 'type' and other relevant fields) Returns: - (str) the canonical_user_id + Deferred[str]: canonical user id Raises: - LoginError if login fails + StoreError if there was a problem accessing the database + SynapseError if there was a problem with the request + LoginError if there was an authentication problem. """ + + if not user_id.startswith('@'): + user_id = UserID( + user_id, self.hs.hostname + ).to_string() + + login_type = login_submission.get("type") + + if login_type != LoginType.PASSWORD: + raise SynapseError(400, "Bad login type.") + if not self._password_enabled: + raise SynapseError(400, "Password login has been disabled.") + if "password" not in login_submission: + raise SynapseError(400, "Missing parameter: password") + + password = login_submission["password"] for provider in self.password_providers: is_valid = yield provider.check_password(user_id, password) if is_valid: defer.returnValue(user_id) - canonical_user_id = yield self._check_local_password(user_id, password) + canonical_user_id = yield self._check_local_password( + user_id, password, + ) if canonical_user_id: defer.returnValue(canonical_user_id) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 9536e8ade6..d24590011b 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -85,7 +85,6 @@ def login_id_thirdparty_from_phone(identifier): class LoginRestServlet(ClientV1RestServlet): PATTERNS = client_path_patterns("/login$") - PASS_TYPE = "m.login.password" SAML2_TYPE = "m.login.saml2" CAS_TYPE = "m.login.cas" TOKEN_TYPE = "m.login.token" @@ -94,7 +93,6 @@ class LoginRestServlet(ClientV1RestServlet): def __init__(self, hs): super(LoginRestServlet, self).__init__(hs) self.idp_redirect_url = hs.config.saml2_idp_redirect_url - self.password_enabled = hs.config.password_enabled self.saml2_enabled = hs.config.saml2_enabled self.jwt_enabled = hs.config.jwt_enabled self.jwt_secret = hs.config.jwt_secret @@ -121,8 +119,10 @@ class LoginRestServlet(ClientV1RestServlet): # fall back to the fallback API if they don't understand one of the # login flow types returned. flows.append({"type": LoginRestServlet.TOKEN_TYPE}) - if self.password_enabled: - flows.append({"type": LoginRestServlet.PASS_TYPE}) + + flows.extend(( + {"type": t} for t in self.auth_handler.get_supported_login_types() + )) return (200, {"flows": flows}) @@ -133,14 +133,8 @@ class LoginRestServlet(ClientV1RestServlet): def on_POST(self, request): login_submission = parse_json_object_from_request(request) try: - if login_submission["type"] == LoginRestServlet.PASS_TYPE: - if not self.password_enabled: - raise SynapseError(400, "Password login has been disabled.") - - result = yield self.do_password_login(login_submission) - defer.returnValue(result) - elif self.saml2_enabled and (login_submission["type"] == - LoginRestServlet.SAML2_TYPE): + if self.saml2_enabled and (login_submission["type"] == + LoginRestServlet.SAML2_TYPE): relay_state = "" if "relay_state" in login_submission: relay_state = "&RelayState=" + urllib.quote( @@ -157,15 +151,21 @@ class LoginRestServlet(ClientV1RestServlet): result = yield self.do_token_login(login_submission) defer.returnValue(result) else: - raise SynapseError(400, "Bad login type.") + result = yield self._do_other_login(login_submission) + defer.returnValue(result) except KeyError: raise SynapseError(400, "Missing JSON keys.") @defer.inlineCallbacks - def do_password_login(self, login_submission): - if "password" not in login_submission: - raise SynapseError(400, "Missing parameter: password") + def _do_other_login(self, login_submission): + """Handle non-token/saml/jwt logins + Args: + login_submission: + + Returns: + (int, object): HTTP code/response + """ login_submission_legacy_convert(login_submission) if "identifier" not in login_submission: @@ -208,25 +208,22 @@ class LoginRestServlet(ClientV1RestServlet): if "user" not in identifier: raise SynapseError(400, "User identifier is missing 'user' key") - user_id = identifier["user"] - - if not user_id.startswith('@'): - user_id = UserID( - user_id, self.hs.hostname - ).to_string() - auth_handler = self.auth_handler - user_id = yield auth_handler.validate_password_login( - user_id=user_id, - password=login_submission["password"], + canonical_user_id = yield auth_handler.validate_login( + identifier["user"], + login_submission, + ) + + device_id = yield self._register_device( + canonical_user_id, login_submission, ) - device_id = yield self._register_device(user_id, login_submission) access_token = yield auth_handler.get_access_token_for_user_id( - user_id, device_id, + canonical_user_id, device_id, login_submission.get("initial_device_display_name"), ) + result = { - "user_id": user_id, # may have changed + "user_id": canonical_user_id, "access_token": access_token, "home_server": self.hs.hostname, "device_id": device_id, From 1650eb584772dbad61d74c2b3c9c932a52fe1979 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 30 Oct 2017 15:16:21 +0000 Subject: [PATCH 0376/1637] DB schema interface for password auth providers Provide an interface by which password auth providers can register db schema files to be run at startup --- docs/password_auth_providers.rst | 12 ++++ synapse/storage/prepare_database.py | 70 +++++++++++++++++++++++ synapse/storage/schema/schema_version.sql | 7 +++ 3 files changed, 89 insertions(+) diff --git a/docs/password_auth_providers.rst b/docs/password_auth_providers.rst index 3da1a67844..ca05a76617 100644 --- a/docs/password_auth_providers.rst +++ b/docs/password_auth_providers.rst @@ -37,3 +37,15 @@ Password auth provider classes must provide the following methods: The method should return a Twisted ``Deferred`` object, which resolves to ``True`` if authentication is successful, and ``False`` if not. + +Optional methods +---------------- + +Password provider classes may optionally provide the following methods. + +*class* ``SomeProvider.get_db_schema_files()`` + + This method, if implemented, should return an Iterable of ``(name, + stream)`` pairs of database schema files. Each file is applied in turn at + initialisation, and a record is then made in the database so that it is + not re-applied on the next start. diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index a4e08e6757..d1691bbac2 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -44,6 +44,13 @@ def prepare_database(db_conn, database_engine, config): If `config` is None then prepare_database will assert that no upgrade is necessary, *or* will create a fresh database if the database is empty. + + Args: + db_conn: + database_engine: + config (synapse.config.homeserver.HomeServerConfig|None): + application config, or None if we are connecting to an existing + database which we expect to be configured already """ try: cur = db_conn.cursor() @@ -64,6 +71,10 @@ def prepare_database(db_conn, database_engine, config): else: _setup_new_database(cur, database_engine) + # check if any of our configured dynamic modules want a database + if config is not None: + _apply_module_schemas(cur, database_engine, config) + cur.close() db_conn.commit() except Exception: @@ -283,6 +294,65 @@ def _upgrade_existing_database(cur, current_version, applied_delta_files, ) +def _apply_module_schemas(txn, database_engine, config): + """Apply the module schemas for the dynamic modules, if any + + Args: + cur: database cursor + database_engine: synapse database engine class + config (synapse.config.homeserver.HomeServerConfig): + application config + """ + for (mod, _config) in config.password_providers: + if not hasattr(mod, 'get_db_schema_files'): + continue + modname = ".".join((mod.__module__, mod.__name__)) + _apply_module_schema_files( + txn, database_engine, modname, mod.get_db_schema_files(), + ) + + +def _apply_module_schema_files(cur, database_engine, modname, names_and_streams): + """Apply the module schemas for a single module + + Args: + cur: database cursor + database_engine: synapse database engine class + modname (str): fully qualified name of the module + names_and_streams (Iterable[(str, file)]): the names and streams of + schemas to be applied + """ + cur.execute( + database_engine.convert_param_style( + "SELECT file FROM applied_module_schemas WHERE module_name = ?" + ), + (modname,) + ) + applied_deltas = set(d for d, in cur) + for (name, stream) in names_and_streams: + if name in applied_deltas: + continue + + root_name, ext = os.path.splitext(name) + if ext != '.sql': + raise PrepareDatabaseException( + "only .sql files are currently supported for module schemas", + ) + + logger.info("applying schema %s for %s", name, modname) + for statement in get_statements(stream): + cur.execute(statement) + + # Mark as done. + cur.execute( + database_engine.convert_param_style( + "INSERT INTO applied_module_schemas (module_name, file)" + " VALUES (?,?)", + ), + (modname, name) + ) + + def get_statements(f): statement_buffer = "" in_comment = False # If we're in a /* ... */ style comment diff --git a/synapse/storage/schema/schema_version.sql b/synapse/storage/schema/schema_version.sql index a7ade69986..42e5cb6df5 100644 --- a/synapse/storage/schema/schema_version.sql +++ b/synapse/storage/schema/schema_version.sql @@ -25,3 +25,10 @@ CREATE TABLE IF NOT EXISTS applied_schema_deltas( file TEXT NOT NULL, UNIQUE(version, file) ); + +-- a list of schema files we have loaded on behalf of dynamic modules +CREATE TABLE IF NOT EXISTS applied_module_schemas( + module_name TEXT NOT NULL, + file TEXT NOT NULL, + UNIQUE(module_name, file) +); From 9ded00f22173af2ce1fc72e2534cbbb172957373 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Oct 2017 14:21:13 +0000 Subject: [PATCH 0377/1637] fix tests --- tests/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/utils.py b/tests/utils.py index d2ebce4b2e..ed8a7360f5 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -310,6 +310,7 @@ class SQLiteMemoryDbPool(ConnectionPool, object): ) self.config = Mock() + self.config.password_providers = [] self.config.database_config = {"name": "sqlite3"} def prepare(self): From 9d419f48e635dbcc5ecc2e9fff7db85c320c0228 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 31 Oct 2017 16:58:49 +0000 Subject: [PATCH 0378/1637] Make the port script drop NUL values in all tables Postgres doesn't support NULs in strings so it makes the script throw an exception and stop if any values contain \0. Drop them with appropriate warning. --- scripts/synapse_port_db | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index d6d8ee50cb..3a8972efc3 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -320,7 +320,7 @@ class Porter(object): backward_chunk = min(row[0] for row in brows) - 1 rows = frows + brows - self._convert_rows(table, headers, rows) + rows = self._convert_rows(table, headers, rows) def insert(txn): self.postgres_store.insert_many_txn( @@ -556,17 +556,29 @@ class Porter(object): i for i, h in enumerate(headers) if h in bool_col_names ] + class BadValueException(Exception): + pass + def conv(j, col): if j in bool_cols: return bool(col) + elif isinstance(col, basestring) and "\0" in col: + logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col) + raise BadValueException(); return col + outrows = [] for i, row in enumerate(rows): - rows[i] = tuple( - conv(j, col) - for j, col in enumerate(row) - if j > 0 - ) + try: + outrows.append(tuple( + conv(j, col) + for j, col in enumerate(row) + if j > 0 + )) + except BadValueException: + pass + + return outrows @defer.inlineCallbacks def _setup_sent_transactions(self): @@ -594,7 +606,7 @@ class Porter(object): "select", r, ) - self._convert_rows("sent_transactions", headers, rows) + rows = self._convert_rows("sent_transactions", headers, rows) inserted_rows = len(rows) if inserted_rows: From 20fe347906355e6eec2b890f5d9dc8e2c4534ce3 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Tue, 31 Oct 2017 17:04:28 +0000 Subject: [PATCH 0379/1637] Modify group room association API to allow modification of is_public also includes renamings to make things more consistent. --- synapse/federation/transport/client.py | 4 ++-- synapse/federation/transport/server.py | 4 ++-- synapse/groups/groups_server.py | 8 ++++---- synapse/handlers/groups_local.py | 4 ++-- synapse/rest/client/v2_alpha/groups.py | 4 ++-- synapse/storage/group_server.py | 20 +++++++++++++------- 6 files changed, 25 insertions(+), 19 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index d25ae1b282..2fcbb7069b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -531,7 +531,7 @@ class TransportLayerClient(object): ignore_backoff=True, ) - def add_room_to_group(self, destination, group_id, requester_user_id, room_id, + def update_room_group_association(self, destination, group_id, requester_user_id, room_id, content): """Add a room to a group """ @@ -545,7 +545,7 @@ class TransportLayerClient(object): ignore_backoff=True, ) - def remove_room_from_group(self, destination, group_id, requester_user_id, room_id): + def delete_room_group_association(self, destination, group_id, requester_user_id, room_id): """Remove a room from a group """ path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 8f3c14c303..ded6d4edc9 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -684,7 +684,7 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.add_room_to_group( + new_content = yield self.handler.update_room_group_association( group_id, requester_user_id, room_id, content ) @@ -696,7 +696,7 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.remove_room_from_group( + new_content = yield self.handler.delete_room_group_association( group_id, requester_user_id, room_id, ) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index dedc9cc7fd..c91d8e624f 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -531,7 +531,7 @@ class GroupsServerHandler(object): }) @defer.inlineCallbacks - def add_room_to_group(self, group_id, requester_user_id, room_id, content): + def update_room_group_association(self, group_id, requester_user_id, room_id, content): """Add room to group """ RoomID.from_string(room_id) # Ensure valid room id @@ -542,19 +542,19 @@ class GroupsServerHandler(object): is_public = _parse_visibility_from_contents(content) - yield self.store.add_room_to_group(group_id, room_id, is_public=is_public) + yield self.store.update_room_group_association(group_id, room_id, is_public=is_public) defer.returnValue({}) @defer.inlineCallbacks - def remove_room_from_group(self, group_id, requester_user_id, room_id): + def delete_room_group_association(self, group_id, requester_user_id, room_id): """Remove room from group """ yield self.check_group_is_ours( group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id ) - yield self.store.remove_room_from_group(group_id, room_id) + yield self.store.delete_room_group_association(group_id, room_id) defer.returnValue({}) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 6699d0888f..dabc2a3fbb 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -70,8 +70,8 @@ class GroupsLocalHandler(object): get_invited_users_in_group = _create_rerouter("get_invited_users_in_group") - add_room_to_group = _create_rerouter("add_room_to_group") - remove_room_from_group = _create_rerouter("remove_room_from_group") + update_room_group_association = _create_rerouter("update_room_group_association") + delete_room_group_association = _create_rerouter("delete_room_group_association") update_group_summary_room = _create_rerouter("update_group_summary_room") delete_group_summary_room = _create_rerouter("delete_group_summary_room") diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index c97885cfc7..792608cd48 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -451,7 +451,7 @@ class GroupAdminRoomsServlet(RestServlet): requester_user_id = requester.user.to_string() content = parse_json_object_from_request(request) - result = yield self.groups_handler.add_room_to_group( + result = yield self.groups_handler.update_room_group_association( group_id, requester_user_id, room_id, content, ) @@ -462,7 +462,7 @@ class GroupAdminRoomsServlet(RestServlet): requester = yield self.auth.get_user_by_req(request) requester_user_id = requester.user.to_string() - result = yield self.groups_handler.remove_room_from_group( + result = yield self.groups_handler.delete_room_group_association( group_id, requester_user_id, room_id, ) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 8c4ad0a9a9..a7a43de279 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -846,19 +846,25 @@ class GroupServerStore(SQLBaseStore): ) return self.runInteraction("remove_user_from_group", _remove_user_from_group_txn) - def add_room_to_group(self, group_id, room_id, is_public): - return self._simple_insert( + def update_room_group_association(self, group_id, room_id, is_public=True): + return self._simple_upsert( table="group_rooms", - values={ + keyvalues={ "group_id": group_id, "room_id": room_id, + }, + values={ "is_public": is_public, }, - desc="add_room_to_group", + insertion_values={ + "group_id": group_id, + "room_id": room_id, + }, + desc="update_room_group_association", ) - def remove_room_from_group(self, group_id, room_id): - def _remove_room_from_group_txn(txn): + def delete_room_group_association(self, group_id, room_id): + def _delete_room_group_association_txn(txn): self._simple_delete_txn( txn, table="group_rooms", @@ -877,7 +883,7 @@ class GroupServerStore(SQLBaseStore): }, ) return self.runInteraction( - "remove_room_from_group", _remove_room_from_group_txn, + "delete_room_group_association", _delete_room_group_association_txn, ) def get_publicised_groups_for_user(self, user_id): From 13b3d7b4a08d3456cf3adc0a68b2a0a2fc60098b Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Tue, 31 Oct 2017 17:20:11 +0000 Subject: [PATCH 0380/1637] Flake8 --- synapse/federation/transport/client.py | 7 ++++--- synapse/groups/groups_server.py | 7 +++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 2fcbb7069b..d513c01b7e 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -531,8 +531,8 @@ class TransportLayerClient(object): ignore_backoff=True, ) - def update_room_group_association(self, destination, group_id, requester_user_id, room_id, - content): + def update_room_group_association(self, destination, group_id, requester_user_id, + room_id, content): """Add a room to a group """ path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) @@ -545,7 +545,8 @@ class TransportLayerClient(object): ignore_backoff=True, ) - def delete_room_group_association(self, destination, group_id, requester_user_id, room_id): + def delete_room_group_association(self, destination, group_id, requester_user_id, + room_id): """Remove a room from a group """ path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index c91d8e624f..69831eacbd 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -531,7 +531,8 @@ class GroupsServerHandler(object): }) @defer.inlineCallbacks - def update_room_group_association(self, group_id, requester_user_id, room_id, content): + def update_room_group_association(self, group_id, requester_user_id, room_id, + content): """Add room to group """ RoomID.from_string(room_id) # Ensure valid room id @@ -542,7 +543,9 @@ class GroupsServerHandler(object): is_public = _parse_visibility_from_contents(content) - yield self.store.update_room_group_association(group_id, room_id, is_public=is_public) + yield self.store.update_room_group_association( + group_id, room_id, is_public=is_public + ) defer.returnValue({}) From 3e0aaad1903cb942920b06ba5eeb345d0256af19 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Oct 2017 16:20:11 +0000 Subject: [PATCH 0381/1637] Let auth providers get to the database Somewhat open to abuse, but also somewhat unavoidable :/ --- synapse/handlers/auth.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 93d8ac0e04..12c50f32f2 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -730,6 +730,7 @@ class _AccountHandler(object): self.hs = hs self._check_user_exists = check_user_exists + self._store = hs.get_datastore() def check_user_exists(self, user_id): """Check if user exissts. @@ -747,3 +748,18 @@ class _AccountHandler(object): """ reg = self.hs.get_handlers().registration_handler return reg.register(localpart=localpart) + + def run_db_interaction(self, desc, func, *args, **kwargs): + """Run a function with a database connection + + Args: + desc (str): description for the transaction, for metrics etc + func (func): function to be run. Passed a database cursor object + as well as *args and **kwargs + *args: positional args to be passed to func + **kwargs: named args to be passed to func + + Returns: + Deferred[object]: result of func + """ + return self._store.runInteraction(desc, func, *args, **kwargs) From 356bcafc4452d1cf5deea61c5084fc25a54a5ead Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Oct 2017 20:35:58 +0000 Subject: [PATCH 0382/1637] Remove the last vestiges of refresh_tokens --- synapse/handlers/device.py | 2 -- synapse/storage/registration.py | 29 +++++++------------ .../schema/delta/23/refresh_tokens.sql | 21 -------------- .../delta/33/refreshtoken_device_index.sql | 17 ----------- .../drop_refresh_tokens.sql} | 5 ++-- 5 files changed, 14 insertions(+), 60 deletions(-) delete mode 100644 synapse/storage/schema/delta/23/refresh_tokens.sql delete mode 100644 synapse/storage/schema/delta/33/refreshtoken_device_index.sql rename synapse/storage/schema/delta/{33/refreshtoken_device.sql => 46/drop_refresh_tokens.sql} (81%) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index dac4b3f4e0..94fc08446b 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -161,7 +161,6 @@ class DeviceHandler(BaseHandler): yield self.store.user_delete_access_tokens( user_id, device_id=device_id, - delete_refresh_tokens=True, ) yield self.store.delete_e2e_keys_by_device( @@ -196,7 +195,6 @@ class DeviceHandler(BaseHandler): for device_id in device_ids: yield self.store.user_delete_access_tokens( user_id, device_id=device_id, - delete_refresh_tokens=True, ) yield self.store.delete_e2e_keys_by_device( user_id=user_id, device_id=device_id diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 20acd58fcf..3d3bdba894 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -36,12 +36,15 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): columns=["user_id", "device_id"], ) - self.register_background_index_update( - "refresh_tokens_device_index", - index_name="refresh_tokens_device_id", - table="refresh_tokens", - columns=["user_id", "device_id"], - ) + # we no longer use refresh tokens, but it's possible that some people + # might have a background update queued to build this index. Just + # clear the background update. + @defer.inlineCallbacks + def noop_update(progress, batch_size): + yield self._end_background_update("refresh_tokens_device_index") + defer.returnValue(1) + self.register_background_update_handler( + "refresh_tokens_device_index", noop_update) @defer.inlineCallbacks def add_access_token_to_user(self, user_id, token, device_id=None): @@ -238,10 +241,9 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): @defer.inlineCallbacks def user_delete_access_tokens(self, user_id, except_token_id=None, - device_id=None, - delete_refresh_tokens=False): + device_id=None): """ - Invalidate access/refresh tokens belonging to a user + Invalidate access tokens belonging to a user Args: user_id (str): ID of user the tokens belong to @@ -250,8 +252,6 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): device_id (str|None): ID of device the tokens are associated with. If None, tokens associated with any device (or no device) will be deleted - delete_refresh_tokens (bool): True to delete refresh tokens as - well as access tokens. Returns: defer.Deferred: """ @@ -262,13 +262,6 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): if device_id is not None: keyvalues["device_id"] = device_id - if delete_refresh_tokens: - self._simple_delete_txn( - txn, - table="refresh_tokens", - keyvalues=keyvalues, - ) - items = keyvalues.items() where_clause = " AND ".join(k + " = ?" for k, _ in items) values = [v for _, v in items] diff --git a/synapse/storage/schema/delta/23/refresh_tokens.sql b/synapse/storage/schema/delta/23/refresh_tokens.sql deleted file mode 100644 index 34db0cf12b..0000000000 --- a/synapse/storage/schema/delta/23/refresh_tokens.sql +++ /dev/null @@ -1,21 +0,0 @@ -/* Copyright 2015, 2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS refresh_tokens( - id INTEGER PRIMARY KEY, - token TEXT NOT NULL, - user_id TEXT NOT NULL, - UNIQUE (token) -); diff --git a/synapse/storage/schema/delta/33/refreshtoken_device_index.sql b/synapse/storage/schema/delta/33/refreshtoken_device_index.sql deleted file mode 100644 index bb225dafbf..0000000000 --- a/synapse/storage/schema/delta/33/refreshtoken_device_index.sql +++ /dev/null @@ -1,17 +0,0 @@ -/* Copyright 2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -INSERT INTO background_updates (update_name, progress_json) VALUES - ('refresh_tokens_device_index', '{}'); diff --git a/synapse/storage/schema/delta/33/refreshtoken_device.sql b/synapse/storage/schema/delta/46/drop_refresh_tokens.sql similarity index 81% rename from synapse/storage/schema/delta/33/refreshtoken_device.sql rename to synapse/storage/schema/delta/46/drop_refresh_tokens.sql index 290bd6da86..68c48a89a9 100644 --- a/synapse/storage/schema/delta/33/refreshtoken_device.sql +++ b/synapse/storage/schema/delta/46/drop_refresh_tokens.sql @@ -1,4 +1,4 @@ -/* Copyright 2016 OpenMarket Ltd +/* Copyright 2017 New Vector Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,4 +13,5 @@ * limitations under the License. */ -ALTER TABLE refresh_tokens ADD COLUMN device_id TEXT; +/* we no longer use (or create) the refresh_tokens table */ +DROP TABLE IF EXISTS refresh_tokens; From 207fabbc6abcd47d8d48bd1235e944177fa6521a Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Wed, 1 Nov 2017 09:35:15 +0000 Subject: [PATCH 0383/1637] Update docs for updating room group association --- synapse/federation/transport/client.py | 2 +- synapse/groups/groups_server.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index d513c01b7e..ed41dfc7ee 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -533,7 +533,7 @@ class TransportLayerClient(object): def update_room_group_association(self, destination, group_id, requester_user_id, room_id, content): - """Add a room to a group + """Add or update an association between room and group """ path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 69831eacbd..e21ac8e49e 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -533,7 +533,7 @@ class GroupsServerHandler(object): @defer.inlineCallbacks def update_room_group_association(self, group_id, requester_user_id, room_id, content): - """Add room to group + """Add or update an association between room and group """ RoomID.from_string(room_id) # Ensure valid room id From 318a249c8b84ed930368fd3c154a88a17d666356 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Wed, 1 Nov 2017 09:36:01 +0000 Subject: [PATCH 0384/1637] Leave `is_public` as required argument of update_room_group_association --- synapse/storage/group_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index a7a43de279..f6924e1a32 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -846,7 +846,7 @@ class GroupServerStore(SQLBaseStore): ) return self.runInteraction("remove_user_from_group", _remove_user_from_group_txn) - def update_room_group_association(self, group_id, room_id, is_public=True): + def update_room_group_association(self, group_id, room_id, is_public): return self._simple_upsert( table="group_rooms", keyvalues={ From 02237ce725fcdf2646f3e72d6be3ed6e2aa6519d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 Nov 2017 10:18:59 +0000 Subject: [PATCH 0385/1637] Fix tests for refresh_token removal --- tests/storage/test_registration.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 316ecdb32d..7c7b164ee6 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -86,7 +86,8 @@ class RegistrationStoreTestCase(unittest.TestCase): # now delete some yield self.store.user_delete_access_tokens( - self.user_id, device_id=self.device_id, delete_refresh_tokens=True) + self.user_id, device_id=self.device_id, + ) # check they were deleted user = yield self.store.get_user_by_access_token(self.tokens[1]) @@ -97,8 +98,7 @@ class RegistrationStoreTestCase(unittest.TestCase): self.assertEqual(self.user_id, user["name"]) # now delete the rest - yield self.store.user_delete_access_tokens( - self.user_id, delete_refresh_tokens=True) + yield self.store.user_delete_access_tokens(self.user_id) user = yield self.store.get_user_by_access_token(self.tokens[0]) self.assertIsNone(user, From 74c56f794cc33adb52746bf76c49ec5ca1edebed Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 Nov 2017 10:23:21 +0000 Subject: [PATCH 0386/1637] Break dependency of auth_handler on device_handler I'm going to need to make the device_handler depend on the auth_handler, so I need to break this dependency to avoid a cycle. It turns out that the auth_handler was only using the device_handler in one place which was an edge case which we can more elegantly handle by throwing an error rather than fixing it up. --- synapse/handlers/auth.py | 15 ++++++--------- synapse/rest/client/v1/login.py | 3 --- synapse/rest/client/v2_alpha/register.py | 1 - 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 93d8ac0e04..051995bcce 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -75,7 +75,6 @@ class AuthHandler(BaseHandler): logger.info("Extra password_providers: %r", self.password_providers) self.hs = hs # FIXME better possibility to access registrationHandler later? - self.device_handler = hs.get_device_handler() self.macaroon_gen = hs.get_macaroon_generator() self._password_enabled = hs.config.password_enabled @@ -406,8 +405,7 @@ class AuthHandler(BaseHandler): return self.sessions[session_id] @defer.inlineCallbacks - def get_access_token_for_user_id(self, user_id, device_id=None, - initial_display_name=None): + def get_access_token_for_user_id(self, user_id, device_id=None): """ Creates a new access token for the user with the given user ID. @@ -421,13 +419,10 @@ class AuthHandler(BaseHandler): device_id (str|None): the device ID to associate with the tokens. None to leave the tokens unassociated with a device (deprecated: we should always have a device ID) - initial_display_name (str): display name to associate with the - device if it needs re-registering Returns: The access token for the user's session. Raises: StoreError if there was a problem storing the token. - LoginError if there was an authentication problem. """ logger.info("Logging in user %s on device %s", user_id, device_id) access_token = yield self.issue_access_token(user_id, device_id) @@ -437,9 +432,11 @@ class AuthHandler(BaseHandler): # really don't want is active access_tokens without a record of the # device, so we double-check it here. if device_id is not None: - yield self.device_handler.check_device_registered( - user_id, device_id, initial_display_name - ) + try: + yield self.store.get_device(user_id, device_id) + except StoreError: + yield self.store.delete_access_token(access_token) + raise StoreError(400, "Login raced against device deletion") defer.returnValue(access_token) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index d24590011b..55d2fb056e 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -219,7 +219,6 @@ class LoginRestServlet(ClientV1RestServlet): ) access_token = yield auth_handler.get_access_token_for_user_id( canonical_user_id, device_id, - login_submission.get("initial_device_display_name"), ) result = { @@ -241,7 +240,6 @@ class LoginRestServlet(ClientV1RestServlet): device_id = yield self._register_device(user_id, login_submission) access_token = yield auth_handler.get_access_token_for_user_id( user_id, device_id, - login_submission.get("initial_device_display_name"), ) result = { "user_id": user_id, # may have changed @@ -284,7 +282,6 @@ class LoginRestServlet(ClientV1RestServlet): ) access_token = yield auth_handler.get_access_token_for_user_id( registered_user_id, device_id, - login_submission.get("initial_device_display_name"), ) result = { diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index d9a8cdbbb5..a077146c89 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -566,7 +566,6 @@ class RegisterRestServlet(RestServlet): access_token = ( yield self.auth_handler.get_access_token_for_user_id( user_id, device_id=device_id, - initial_display_name=params.get("initial_device_display_name") ) ) From f8420d6279e47b13c08036ead20207e39b6c4b19 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 1 Nov 2017 13:15:41 +0000 Subject: [PATCH 0387/1637] automatically set default displayname on register to avoid leaking ugly MXIDs and cluttering up the timeline with displayname changes as well as membership joins for autojoin rooms (e.g. the status autojoin rooms), automatically set the displayname to match the localpart of the mxid upon registration. --- synapse/rest/client/v2_alpha/register.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index d9a8cdbbb5..3f8f1d7f51 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -20,7 +20,7 @@ import synapse import synapse.types from synapse.api.auth import get_access_token_from_request, has_access_token from synapse.api.constants import LoginType -from synapse.types import RoomID, RoomAlias +from synapse.types import RoomID, RoomAlias, get_localpart_from_id from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, assert_params_in_request, parse_string @@ -343,6 +343,13 @@ class RegisterRestServlet(RestServlet): generate_token=False, ) + # before we auto-join, set a default displayname to avoid ugly race + # between the client joining rooms and trying to set a displayname + localpart = get_localpart_from_id(registered_user_id) + yield self.store.set_profile_displayname( + localpart, localpart + ) + # auto-join the user to any rooms we're supposed to dump them into fake_requester = synapse.types.create_requester(registered_user_id) for r in self.hs.config.auto_join_rooms: From 59e7e62c4ba24d245b5a4855cd08c583a997e968 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 1 Nov 2017 13:58:01 +0000 Subject: [PATCH 0388/1637] Log login requests Carefully though, to avoid logging passwords --- synapse/rest/client/v1/login.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index d24590011b..7c8240a6d7 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -166,6 +166,16 @@ class LoginRestServlet(ClientV1RestServlet): Returns: (int, object): HTTP code/response """ + # Log the request we got, but only certain fields to minimise the chance of + # logging someone's password (even if they accidentally put it in the wrong + # field) + logger.info( + "Got login request with identifier: %r, medium: %r, address: %r, user: %r", + login_submission.get('identifier'), + login_submission.get('medium'), + login_submission.get('address'), + login_submission.get('user'), + ); login_submission_legacy_convert(login_submission) if "identifier" not in login_submission: From 0bb253f37b7f81944902c847db60c307edb7a4c6 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 1 Nov 2017 14:02:52 +0000 Subject: [PATCH 0389/1637] Apparently this is python --- synapse/rest/client/v1/login.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 7c8240a6d7..11a2aab84b 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -175,7 +175,7 @@ class LoginRestServlet(ClientV1RestServlet): login_submission.get('medium'), login_submission.get('address'), login_submission.get('user'), - ); + ) login_submission_legacy_convert(login_submission) if "identifier" not in login_submission: From dd13310fb8ca0cfce60e4fccdb93e90a16078609 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 Nov 2017 10:29:34 +0000 Subject: [PATCH 0390/1637] Move access token deletion into auth handler Also move duplicated deactivation code into the auth handler. I want to add some hooks when we deactivate an access token, so let's bring it all in here so that there's somewhere to put it. --- synapse/handlers/auth.py | 49 ++++++++++++++++++++++++- synapse/handlers/device.py | 5 ++- synapse/handlers/register.py | 3 +- synapse/rest/client/v1/admin.py | 9 +---- synapse/rest/client/v1/logout.py | 8 ++-- synapse/rest/client/v2_alpha/account.py | 15 ++------ 6 files changed, 62 insertions(+), 27 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 93d8ac0e04..1a90c10b01 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -608,13 +608,58 @@ class AuthHandler(BaseHandler): if e.code == 404: raise SynapseError(404, "Unknown user", Codes.NOT_FOUND) raise e - yield self.store.user_delete_access_tokens( - user_id, except_access_token_id + yield self.delete_access_tokens_for_user( + user_id, except_token_id=except_access_token_id, ) yield self.hs.get_pusherpool().remove_pushers_by_user( user_id, except_access_token_id ) + @defer.inlineCallbacks + def deactivate_account(self, user_id): + """Deactivate a user's account + + Args: + user_id (str): ID of user to be deactivated + + Returns: + Deferred + """ + # FIXME: Theoretically there is a race here wherein user resets + # password using threepid. + yield self.delete_access_tokens_for_user(user_id) + yield self.store.user_delete_threepids(user_id) + yield self.store.user_set_password_hash(user_id, None) + + def delete_access_token(self, access_token): + """Invalidate a single access token + + Args: + access_token (str): access token to be deleted + + Returns: + Deferred + """ + return self.store.delete_access_token(access_token) + + def delete_access_tokens_for_user(self, user_id, except_token_id=None, + device_id=None): + """Invalidate access tokens belonging to a user + + Args: + user_id (str): ID of user the tokens belong to + except_token_id (str|None): access_token ID which should *not* be + deleted + device_id (str|None): ID of device the tokens are associated with. + If None, tokens associated with any device (or no device) will + be deleted + Returns: + Deferred + """ + return self.store.user_delete_access_tokens( + user_id, except_token_id=except_token_id, device_id=device_id, + ) + @defer.inlineCallbacks def add_threepid(self, user_id, medium, address, validated_at): # 'Canonicalise' email addresses down to lower case. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index dac4b3f4e0..5201e8be16 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -34,6 +34,7 @@ class DeviceHandler(BaseHandler): self.hs = hs self.state = hs.get_state_handler() + self._auth_handler = hs.get_auth_handler() self.federation_sender = hs.get_federation_sender() self.federation = hs.get_replication_layer() @@ -159,7 +160,7 @@ class DeviceHandler(BaseHandler): else: raise - yield self.store.user_delete_access_tokens( + yield self._auth_handler.delete_access_tokens_for_user( user_id, device_id=device_id, delete_refresh_tokens=True, ) @@ -194,7 +195,7 @@ class DeviceHandler(BaseHandler): # Delete access tokens and e2e keys for each device. Not optimised as it is not # considered as part of a critical path. for device_id in device_ids: - yield self.store.user_delete_access_tokens( + yield self._auth_handler.delete_access_tokens_for_user( user_id, device_id=device_id, delete_refresh_tokens=True, ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 49dc33c147..f6e7e58563 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -36,6 +36,7 @@ class RegistrationHandler(BaseHandler): super(RegistrationHandler, self).__init__(hs) self.auth = hs.get_auth() + self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.captcha_client = CaptchaServerHttpClient(hs) @@ -416,7 +417,7 @@ class RegistrationHandler(BaseHandler): create_profile_with_localpart=user.localpart, ) else: - yield self.store.user_delete_access_tokens(user_id=user_id) + yield self._auth_handler.delete_access_tokens_for_user(user_id) yield self.store.add_access_token_to_user(user_id=user_id, token=token) if displayname is not None: diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 465b25033d..1197158fdc 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -137,7 +137,7 @@ class DeactivateAccountRestServlet(ClientV1RestServlet): PATTERNS = client_path_patterns("/admin/deactivate/(?P[^/]*)") def __init__(self, hs): - self.store = hs.get_datastore() + self._auth_handler = hs.get_auth_handler() super(DeactivateAccountRestServlet, self).__init__(hs) @defer.inlineCallbacks @@ -149,12 +149,7 @@ class DeactivateAccountRestServlet(ClientV1RestServlet): if not is_admin: raise AuthError(403, "You are not a server admin") - # FIXME: Theoretically there is a race here wherein user resets password - # using threepid. - yield self.store.user_delete_access_tokens(target_user_id) - yield self.store.user_delete_threepids(target_user_id) - yield self.store.user_set_password_hash(target_user_id, None) - + yield self._auth_handler.deactivate_account(target_user_id) defer.returnValue((200, {})) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 1358d0acab..6add754782 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -30,7 +30,7 @@ class LogoutRestServlet(ClientV1RestServlet): def __init__(self, hs): super(LogoutRestServlet, self).__init__(hs) - self.store = hs.get_datastore() + self._auth_handler = hs.get_auth_handler() def on_OPTIONS(self, request): return (200, {}) @@ -38,7 +38,7 @@ class LogoutRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request): access_token = get_access_token_from_request(request) - yield self.store.delete_access_token(access_token) + yield self._auth_handler.delete_access_token(access_token) defer.returnValue((200, {})) @@ -47,8 +47,8 @@ class LogoutAllRestServlet(ClientV1RestServlet): def __init__(self, hs): super(LogoutAllRestServlet, self).__init__(hs) - self.store = hs.get_datastore() self.auth = hs.get_auth() + self._auth_handler = hs.get_auth_handler() def on_OPTIONS(self, request): return (200, {}) @@ -57,7 +57,7 @@ class LogoutAllRestServlet(ClientV1RestServlet): def on_POST(self, request): requester = yield self.auth.get_user_by_req(request) user_id = requester.user.to_string() - yield self.store.user_delete_access_tokens(user_id) + yield self._auth_handler.delete_access_tokens_for_user(user_id) defer.returnValue((200, {})) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 1a0d57a04a..3062e04c59 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -162,7 +162,6 @@ class DeactivateAccountRestServlet(RestServlet): def __init__(self, hs): self.hs = hs - self.store = hs.get_datastore() self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() super(DeactivateAccountRestServlet, self).__init__() @@ -180,7 +179,9 @@ class DeactivateAccountRestServlet(RestServlet): # allow ASes to dectivate their own users if requester and requester.app_service: - yield self._deactivate_account(requester.user.to_string()) + yield self.auth_handler.deactivate_account( + requester.user.to_string() + ) defer.returnValue((200, {})) authed, result, params, _ = yield self.auth_handler.check_auth([ @@ -205,17 +206,9 @@ class DeactivateAccountRestServlet(RestServlet): logger.error("Auth succeeded but no known type!", result.keys()) raise SynapseError(500, "", Codes.UNKNOWN) - yield self._deactivate_account(user_id) + yield self.auth_handler.deactivate_account(user_id) defer.returnValue((200, {})) - @defer.inlineCallbacks - def _deactivate_account(self, user_id): - # FIXME: Theoretically there is a race here wherein user resets - # password using threepid. - yield self.store.user_delete_access_tokens(user_id) - yield self.store.user_delete_threepids(user_id) - yield self.store.user_set_password_hash(user_id, None) - class EmailThreepidRequestTokenRestServlet(RestServlet): PATTERNS = client_v2_patterns("/account/3pid/email/requestToken$") From 9f7a555b4e8c1d90a638365cf1d4acb3ce7f3db7 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 1 Nov 2017 15:51:25 +0000 Subject: [PATCH 0391/1637] switch to setting default displayname in the storage layer to avoid clobbering guest user displaynames on registration --- synapse/rest/client/v2_alpha/register.py | 9 +-------- synapse/storage/registration.py | 6 ++++-- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 3f8f1d7f51..d9a8cdbbb5 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -20,7 +20,7 @@ import synapse import synapse.types from synapse.api.auth import get_access_token_from_request, has_access_token from synapse.api.constants import LoginType -from synapse.types import RoomID, RoomAlias, get_localpart_from_id +from synapse.types import RoomID, RoomAlias from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError from synapse.http.servlet import ( RestServlet, parse_json_object_from_request, assert_params_in_request, parse_string @@ -343,13 +343,6 @@ class RegisterRestServlet(RestServlet): generate_token=False, ) - # before we auto-join, set a default displayname to avoid ugly race - # between the client joining rooms and trying to set a displayname - localpart = get_localpart_from_id(registered_user_id) - yield self.store.set_profile_displayname( - localpart, localpart - ) - # auto-join the user to any rooms we're supposed to dump them into fake_requester = synapse.types.create_requester(registered_user_id) for r in self.hs.config.auto_join_rooms: diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 20acd58fcf..3442af4b90 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -177,9 +177,11 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): ) if create_profile_with_localpart: + # set a default displayname serverside to avoid ugly race + # between auto-joins and clients trying to set displaynames txn.execute( - "INSERT INTO profiles(user_id) VALUES (?)", - (create_profile_with_localpart,) + "INSERT INTO profiles(user_id, displayname) VALUES (?,?)", + (create_profile_with_localpart, create_profile_with_localpart) ) self._invalidate_cache_and_stream( From 3cd6b22c7bf0aa0108535bad5656a0d2d9e85634 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Oct 2017 10:43:57 +0000 Subject: [PATCH 0392/1637] Let password auth providers handle arbitrary login types Provide a hook where password auth providers can say they know about other login types, and get passed the relevant parameters --- docs/password_auth_providers.rst | 53 +++++++++++--- synapse/handlers/auth.py | 120 +++++++++++++++++++++++++------ 2 files changed, 140 insertions(+), 33 deletions(-) diff --git a/docs/password_auth_providers.rst b/docs/password_auth_providers.rst index ca05a76617..2dbebcd72c 100644 --- a/docs/password_auth_providers.rst +++ b/docs/password_auth_providers.rst @@ -30,22 +30,55 @@ Password auth provider classes must provide the following methods: and a ``synapse.handlers.auth._AccountHandler`` object which allows the password provider to check if accounts exist and/or create new ones. -``someprovider.check_password``\(*user_id*, *password*) - - This is the method that actually does the work. It is passed a qualified - ``@localpart:domain`` user id, and the password provided by the user. - - The method should return a Twisted ``Deferred`` object, which resolves to - ``True`` if authentication is successful, and ``False`` if not. - Optional methods ---------------- -Password provider classes may optionally provide the following methods. +Password auth provider classes may optionally provide the following methods. -*class* ``SomeProvider.get_db_schema_files()`` +*class* ``SomeProvider.get_db_schema_files``\() This method, if implemented, should return an Iterable of ``(name, stream)`` pairs of database schema files. Each file is applied in turn at initialisation, and a record is then made in the database so that it is not re-applied on the next start. + +``someprovider.get_supported_login_types``\() + + This method, if implemented, should return a ``dict`` mapping from a login + type identifier (such as ``m.login.password``) to an iterable giving the + fields which must be provided by the user in the submission to the + ``/login`` api. These fields are passed in the ``login_dict`` dictionary + to ``check_auth``. + + For example, if a password auth provider wants to implement a custom login + type of ``com.example.custom_login``, where the client is expected to pass + the fields ``secret1`` and ``secret2``, the provider should implement this + method and return the following dict:: + + {"com.example.custom_login": ("secret1", "secret2")} + +``someprovider.check_auth``\(*username*, *login_type*, *login_dict*) + + This method is the one that does the real work. If implemented, it will be + called for each login attempt where the login type matches one of the keys + returned by ``get_supported_login_types``. + + It is passed the (possibly UNqualified) ``user`` provided by the client, + the login type, and a dictionary of login secrets passed by the client. + + The method should return a Twisted ``Deferred`` object, which resolves to + the canonical ``@localpart:domain`` user id if authentication is successful, + and ``None`` if not. + +``someprovider.check_password``\(*user_id*, *password*) + + This method provides a simpler interface than ``get_supported_login_types`` + and ``check_auth`` for password auth providers that just want to provide a + mechanism for validating ``m.login.password`` logins. + + Iif implemented, it will be called to check logins with an + ``m.login.password`` login type. It is passed a qualified + ``@localpart:domain`` user id, and the password provided by the user. + + The method should return a Twisted ``Deferred`` object, which resolves to + ``True`` if authentication is successful, and ``False`` if not. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 93d8ac0e04..d5da27a3c3 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -82,6 +82,11 @@ class AuthHandler(BaseHandler): login_types = set() if self._password_enabled: login_types.add(LoginType.PASSWORD) + for provider in self.password_providers: + if hasattr(provider, "get_supported_login_types"): + login_types.update( + provider.get_supported_login_types().keys() + ) self._supported_login_types = frozenset(login_types) @defer.inlineCallbacks @@ -504,14 +509,14 @@ class AuthHandler(BaseHandler): return self._supported_login_types @defer.inlineCallbacks - def validate_login(self, user_id, login_submission): + def validate_login(self, username, login_submission): """Authenticates the user for the /login API Also used by the user-interactive auth flow to validate m.login.password auth types. Args: - user_id (str): user_id supplied by the user + username (str): username supplied by the user login_submission (dict): the whole of the login submission (including 'type' and other relevant fields) Returns: @@ -522,32 +527,81 @@ class AuthHandler(BaseHandler): LoginError if there was an authentication problem. """ - if not user_id.startswith('@'): - user_id = UserID( - user_id, self.hs.hostname + if username.startswith('@'): + qualified_user_id = username + else: + qualified_user_id = UserID( + username, self.hs.hostname ).to_string() login_type = login_submission.get("type") + known_login_type = False - if login_type != LoginType.PASSWORD: - raise SynapseError(400, "Bad login type.") - if not self._password_enabled: - raise SynapseError(400, "Password login has been disabled.") - if "password" not in login_submission: - raise SynapseError(400, "Missing parameter: password") + # special case to check for "password" for the check_password interface + # for the auth providers + password = login_submission.get("password") + if login_type == LoginType.PASSWORD: + if not self._password_enabled: + raise SynapseError(400, "Password login has been disabled.") + if not password: + raise SynapseError(400, "Missing parameter: password") - password = login_submission["password"] for provider in self.password_providers: - is_valid = yield provider.check_password(user_id, password) - if is_valid: - defer.returnValue(user_id) + if (hasattr(provider, "check_password") + and login_type == LoginType.PASSWORD): + known_login_type = True + is_valid = yield provider.check_password( + qualified_user_id, password, + ) + if is_valid: + defer.returnValue(qualified_user_id) - canonical_user_id = yield self._check_local_password( - user_id, password, - ) + if (not hasattr(provider, "get_supported_login_types") + or not hasattr(provider, "check_auth")): + # this password provider doesn't understand custom login types + continue - if canonical_user_id: - defer.returnValue(canonical_user_id) + supported_login_types = provider.get_supported_login_types() + if login_type not in supported_login_types: + # this password provider doesn't understand this login type + continue + + known_login_type = True + login_fields = supported_login_types[login_type] + + missing_fields = [] + login_dict = {} + for f in login_fields: + if f not in login_submission: + missing_fields.append(f) + else: + login_dict[f] = login_submission[f] + if missing_fields: + raise SynapseError( + 400, "Missing parameters for login type %s: %s" % ( + login_type, + missing_fields, + ), + ) + + returned_user_id = yield provider.check_auth( + username, login_type, login_dict, + ) + if returned_user_id: + defer.returnValue(returned_user_id) + + if login_type == LoginType.PASSWORD: + known_login_type = True + + canonical_user_id = yield self._check_local_password( + qualified_user_id, password, + ) + + if canonical_user_id: + defer.returnValue(canonical_user_id) + + if not known_login_type: + raise SynapseError(400, "Unknown login type %s" % login_type) # unknown username or invalid password. We raise a 403 here, but note # that if we're doing user-interactive login, it turns all LoginErrors @@ -731,11 +785,31 @@ class _AccountHandler(object): self._check_user_exists = check_user_exists - def check_user_exists(self, user_id): - """Check if user exissts. + def get_qualified_user_id(self, username): + """Qualify a user id, if necessary + + Takes a user id provided by the user and adds the @ and :domain to + qualify it, if necessary + + Args: + username (str): provided user id Returns: - Deferred(bool) + str: qualified @user:id + """ + if username.startswith('@'): + return username + return UserID(username, self.hs.hostname).to_string() + + def check_user_exists(self, user_id): + """Check if user exists. + + Args: + user_id (str): Complete @user:id + + Returns: + Deferred[str|None]: Canonical (case-corrected) user_id, or None + if the user is not registered. """ return self._check_user_exists(user_id) From 4c8f94ac9433753464c4d8379aae650c3129500d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Oct 2017 15:15:51 +0000 Subject: [PATCH 0393/1637] Allow password_auth_providers to return a callback ... so that they have a way to record access tokens. --- docs/password_auth_providers.rst | 5 +++++ synapse/handlers/auth.py | 13 ++++++++----- synapse/rest/client/v1/login.py | 5 ++++- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/docs/password_auth_providers.rst b/docs/password_auth_providers.rst index 2dbebcd72c..4ae4aeb53f 100644 --- a/docs/password_auth_providers.rst +++ b/docs/password_auth_providers.rst @@ -70,6 +70,11 @@ Password auth provider classes may optionally provide the following methods. the canonical ``@localpart:domain`` user id if authentication is successful, and ``None`` if not. + Alternatively, the ``Deferred`` can resolve to a ``(str, func)`` tuple, in + which case the second field is a callback which will be called with the + result from the ``/login`` call (including ``access_token``, ``device_id``, + etc.) + ``someprovider.check_password``\(*user_id*, *password*) This method provides a simpler interface than ``get_supported_login_types`` diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 9799461d26..5c89768c14 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -517,7 +517,8 @@ class AuthHandler(BaseHandler): login_submission (dict): the whole of the login submission (including 'type' and other relevant fields) Returns: - Deferred[str]: canonical user id + Deferred[str, func]: canonical user id, and optional callback + to be called once the access token and device id are issued Raises: StoreError if there was a problem accessing the database SynapseError if there was a problem with the request @@ -581,11 +582,13 @@ class AuthHandler(BaseHandler): ), ) - returned_user_id = yield provider.check_auth( + result = yield provider.check_auth( username, login_type, login_dict, ) - if returned_user_id: - defer.returnValue(returned_user_id) + if result: + if isinstance(result, str): + result = (result, None) + defer.returnValue(result) if login_type == LoginType.PASSWORD: known_login_type = True @@ -595,7 +598,7 @@ class AuthHandler(BaseHandler): ) if canonical_user_id: - defer.returnValue(canonical_user_id) + defer.returnValue((canonical_user_id, None)) if not known_login_type: raise SynapseError(400, "Unknown login type %s" % login_type) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index d25a68e753..5669ecb724 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -219,7 +219,7 @@ class LoginRestServlet(ClientV1RestServlet): raise SynapseError(400, "User identifier is missing 'user' key") auth_handler = self.auth_handler - canonical_user_id = yield auth_handler.validate_login( + canonical_user_id, callback = yield auth_handler.validate_login( identifier["user"], login_submission, ) @@ -238,6 +238,9 @@ class LoginRestServlet(ClientV1RestServlet): "device_id": device_id, } + if callback is not None: + yield callback(result) + defer.returnValue((200, result)) @defer.inlineCallbacks From bc8a5c033097f719d6b2971660ad833ab8cb3838 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 Nov 2017 15:42:38 +0000 Subject: [PATCH 0394/1637] Notify auth providers on logout Provide a hook by which auth providers can be notified of logouts. --- docs/password_auth_providers.rst | 10 ++++++++++ synapse/handlers/auth.py | 26 ++++++++++++++++++++++++-- synapse/storage/registration.py | 13 ++++++++----- 3 files changed, 42 insertions(+), 7 deletions(-) diff --git a/docs/password_auth_providers.rst b/docs/password_auth_providers.rst index 2dbebcd72c..98019cea01 100644 --- a/docs/password_auth_providers.rst +++ b/docs/password_auth_providers.rst @@ -82,3 +82,13 @@ Password auth provider classes may optionally provide the following methods. The method should return a Twisted ``Deferred`` object, which resolves to ``True`` if authentication is successful, and ``False`` if not. + +``someprovider.on_logged_out``\(*user_id*, *device_id*, *access_token*) + + This method, if implemented, is called when a user logs out. It is passed + the qualified user ID, the ID of the deactivated device (if any: access + tokens are occasionally created without an associated device ID), and the + (now deactivated) access token. + + It may return a Twisted ``Deferred`` object; the logout request will wait + for the deferred to complete but the result is ignored. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 9799461d26..cc667b6d8b 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -682,6 +682,7 @@ class AuthHandler(BaseHandler): yield self.store.user_delete_threepids(user_id) yield self.store.user_set_password_hash(user_id, None) + @defer.inlineCallbacks def delete_access_token(self, access_token): """Invalidate a single access token @@ -691,8 +692,19 @@ class AuthHandler(BaseHandler): Returns: Deferred """ - return self.store.delete_access_token(access_token) + user_info = yield self.auth.get_user_by_access_token(access_token) + yield self.store.delete_access_token(access_token) + # see if any of our auth providers want to know about this + for provider in self.password_providers: + if hasattr(provider, "on_logged_out"): + yield provider.on_logged_out( + user_id=str(user_info["user"]), + device_id=user_info["device_id"], + access_token=access_token, + ) + + @defer.inlineCallbacks def delete_access_tokens_for_user(self, user_id, except_token_id=None, device_id=None): """Invalidate access tokens belonging to a user @@ -707,10 +719,20 @@ class AuthHandler(BaseHandler): Returns: Deferred """ - return self.store.user_delete_access_tokens( + tokens_and_devices = yield self.store.user_delete_access_tokens( user_id, except_token_id=except_token_id, device_id=device_id, ) + # see if any of our auth providers want to know about this + for provider in self.password_providers: + if hasattr(provider, "on_logged_out"): + for token, device_id in tokens_and_devices: + yield provider.on_logged_out( + user_id=user_id, + device_id=device_id, + access_token=token, + ) + @defer.inlineCallbacks def add_threepid(self, user_id, medium, address, validated_at): # 'Canonicalise' email addresses down to lower case. diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 65ddefda92..9c4f61da76 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -255,7 +255,8 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): If None, tokens associated with any device (or no device) will be deleted Returns: - defer.Deferred: + defer.Deferred[list[str, str|None]]: a list of the deleted tokens + and device IDs """ def f(txn): keyvalues = { @@ -272,14 +273,14 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): values.append(except_token_id) txn.execute( - "SELECT token FROM access_tokens WHERE %s" % where_clause, + "SELECT token, device_id FROM access_tokens WHERE %s" % where_clause, values ) - rows = self.cursor_to_dict(txn) + tokens_and_devices = [(r[0], r[1]) for r in txn] - for row in rows: + for token, _ in tokens_and_devices: self._invalidate_cache_and_stream( - txn, self.get_user_by_access_token, (row["token"],) + txn, self.get_user_by_access_token, (token,) ) txn.execute( @@ -287,6 +288,8 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): values ) + return tokens_and_devices + yield self.runInteraction( "user_delete_access_tokens", f, ) From 979eed43627e3d13a8a09d4904c80d84b0b6e609 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 Nov 2017 17:03:20 +0000 Subject: [PATCH 0395/1637] Fix user-interactive password auth this got broken in the previous commit --- synapse/handlers/auth.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 5c89768c14..11d2b804d4 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -270,6 +270,7 @@ class AuthHandler(BaseHandler): sess = self._get_session_info(session_id) return sess.setdefault('serverdict', {}).get(key, default) + @defer.inlineCallbacks def _check_password_auth(self, authdict, _): if "user" not in authdict or "password" not in authdict: raise LoginError(400, "", Codes.MISSING_PARAM) @@ -277,10 +278,11 @@ class AuthHandler(BaseHandler): user_id = authdict["user"] password = authdict["password"] - return self.validate_login(user_id, { + (canonical_id, callback) = yield self.validate_login(user_id, { "type": LoginType.PASSWORD, "password": password, }) + defer.returnValue(canonical_id) @defer.inlineCallbacks def _check_recaptcha(self, authdict, clientip): From 6650a07ede4e6cb24fba527ffba2f175cdc30584 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 2 Nov 2017 13:27:21 +0000 Subject: [PATCH 0396/1637] Factor out _configure_named_resource This was a bit of a code vomit, so let's factor it out to preserve some sanity --- synapse/app/homeserver.py | 110 ++++++++++++++++++++++---------------- 1 file changed, 64 insertions(+), 46 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3adf72e141..97d2b01a5e 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -107,52 +107,9 @@ class SynapseHomeServer(HomeServer): resources = {} for res in listener_config["resources"]: for name in res["names"]: - if name == "client": - client_resource = ClientRestResource(self) - if res["compress"]: - client_resource = gz_wrap(client_resource) - - resources.update({ - "/_matrix/client/api/v1": client_resource, - "/_matrix/client/r0": client_resource, - "/_matrix/client/unstable": client_resource, - "/_matrix/client/v2_alpha": client_resource, - "/_matrix/client/versions": client_resource, - }) - - if name == "federation": - resources.update({ - FEDERATION_PREFIX: TransportLayerServer(self), - }) - - if name in ["static", "client"]: - resources.update({ - STATIC_PREFIX: File( - os.path.join(os.path.dirname(synapse.__file__), "static") - ), - }) - - if name in ["media", "federation", "client"]: - media_repo = MediaRepositoryResource(self) - resources.update({ - MEDIA_PREFIX: media_repo, - LEGACY_MEDIA_PREFIX: media_repo, - CONTENT_REPO_PREFIX: ContentRepoResource( - self, self.config.uploads_path - ), - }) - - if name in ["keys", "federation"]: - resources.update({ - SERVER_KEY_PREFIX: LocalKey(self), - SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self), - }) - - if name == "webclient": - resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self) - - if name == "metrics" and self.get_config().enable_metrics: - resources[METRICS_PREFIX] = MetricsResource(self) + resources.update(self._configure_named_resource( + name, res.get("compress", False), + )) if WEB_CLIENT_PREFIX in resources: root_resource = RootRedirect(WEB_CLIENT_PREFIX) @@ -188,6 +145,67 @@ class SynapseHomeServer(HomeServer): ) logger.info("Synapse now listening on port %d", port) + def _configure_named_resource(self, name, compress=False): + """Build a resource map for a named resource + + Args: + name (str): named resource: one of "client", "federation", etc + compress (bool): whether to enable gzip compression for this + resource + + Returns: + dict[str, Resource]: map from path to HTTP resource + """ + resources = {} + if name == "client": + client_resource = ClientRestResource(self) + if compress: + client_resource = gz_wrap(client_resource) + + resources.update({ + "/_matrix/client/api/v1": client_resource, + "/_matrix/client/r0": client_resource, + "/_matrix/client/unstable": client_resource, + "/_matrix/client/v2_alpha": client_resource, + "/_matrix/client/versions": client_resource, + }) + + if name == "federation": + resources.update({ + FEDERATION_PREFIX: TransportLayerServer(self), + }) + + if name in ["static", "client"]: + resources.update({ + STATIC_PREFIX: File( + os.path.join(os.path.dirname(synapse.__file__), "static") + ), + }) + + if name in ["media", "federation", "client"]: + media_repo = MediaRepositoryResource(self) + resources.update({ + MEDIA_PREFIX: media_repo, + LEGACY_MEDIA_PREFIX: media_repo, + CONTENT_REPO_PREFIX: ContentRepoResource( + self, self.config.uploads_path + ), + }) + + if name in ["keys", "federation"]: + resources.update({ + SERVER_KEY_PREFIX: LocalKey(self), + SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self), + }) + + if name == "webclient": + resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self) + + if name == "metrics" and self.get_config().enable_metrics: + resources[METRICS_PREFIX] = MetricsResource(self) + + return resources + def start_listening(self): config = self.get_config() From 1189be43a2479f5adf034613e8d10e3f4f452eb9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 2 Nov 2017 14:13:25 +0000 Subject: [PATCH 0397/1637] Factor _AccountHandler proxy out to ModuleApi We're going to need to use this from places that aren't password auth, so let's move it to a proper class. --- docs/password_auth_providers.rst | 2 +- synapse/handlers/auth.py | 72 ++--------------------------- synapse/module_api/__init__.py | 79 ++++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+), 70 deletions(-) create mode 100644 synapse/module_api/__init__.py diff --git a/docs/password_auth_providers.rst b/docs/password_auth_providers.rst index 2842d187e5..d8a7b61cdc 100644 --- a/docs/password_auth_providers.rst +++ b/docs/password_auth_providers.rst @@ -27,7 +27,7 @@ Password auth provider classes must provide the following methods: *class* ``SomeProvider``\(*config*, *account_handler*) The constructor is passed the config object returned by ``parse_config``, - and a ``synapse.handlers.auth._AccountHandler`` object which allows the + and a ``synapse.module_api.ModuleApi`` object which allows the password provider to check if accounts exist and/or create new ones. Optional methods diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 0337be36c2..7a0ba6ef35 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -13,13 +13,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from twisted.internet import defer from ._base import BaseHandler from synapse.api.constants import LoginType -from synapse.types import UserID from synapse.api.errors import AuthError, LoginError, Codes, StoreError, SynapseError +from synapse.module_api import ModuleApi +from synapse.types import UserID from synapse.util.async import run_on_reactor from synapse.util.caches.expiringcache import ExpiringCache @@ -63,10 +63,7 @@ class AuthHandler(BaseHandler): reset_expiry_on_get=True, ) - account_handler = _AccountHandler( - hs, check_user_exists=self.check_user_exists - ) - + account_handler = ModuleApi(hs, self) self.password_providers = [ module(config=config, account_handler=account_handler) for module, config in hs.config.password_providers @@ -843,66 +840,3 @@ class MacaroonGeneartor(object): macaroon.add_first_party_caveat("gen = 1") macaroon.add_first_party_caveat("user_id = %s" % (user_id,)) return macaroon - - -class _AccountHandler(object): - """A proxy object that gets passed to password auth providers so they - can register new users etc if necessary. - """ - def __init__(self, hs, check_user_exists): - self.hs = hs - - self._check_user_exists = check_user_exists - self._store = hs.get_datastore() - - def get_qualified_user_id(self, username): - """Qualify a user id, if necessary - - Takes a user id provided by the user and adds the @ and :domain to - qualify it, if necessary - - Args: - username (str): provided user id - - Returns: - str: qualified @user:id - """ - if username.startswith('@'): - return username - return UserID(username, self.hs.hostname).to_string() - - def check_user_exists(self, user_id): - """Check if user exists. - - Args: - user_id (str): Complete @user:id - - Returns: - Deferred[str|None]: Canonical (case-corrected) user_id, or None - if the user is not registered. - """ - return self._check_user_exists(user_id) - - def register(self, localpart): - """Registers a new user with given localpart - - Returns: - Deferred: a 2-tuple of (user_id, access_token) - """ - reg = self.hs.get_handlers().registration_handler - return reg.register(localpart=localpart) - - def run_db_interaction(self, desc, func, *args, **kwargs): - """Run a function with a database connection - - Args: - desc (str): description for the transaction, for metrics etc - func (func): function to be run. Passed a database cursor object - as well as *args and **kwargs - *args: positional args to be passed to func - **kwargs: named args to be passed to func - - Returns: - Deferred[object]: result of func - """ - return self._store.runInteraction(desc, func, *args, **kwargs) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py new file mode 100644 index 0000000000..9ccf6dfcd6 --- /dev/null +++ b/synapse/module_api/__init__.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.types import UserID + + +class ModuleApi(object): + """A proxy object that gets passed to password auth providers so they + can register new users etc if necessary. + """ + def __init__(self, hs, auth_handler): + self.hs = hs + + self._store = hs.get_datastore() + self._auth_handler = auth_handler + + def get_qualified_user_id(self, username): + """Qualify a user id, if necessary + + Takes a user id provided by the user and adds the @ and :domain to + qualify it, if necessary + + Args: + username (str): provided user id + + Returns: + str: qualified @user:id + """ + if username.startswith('@'): + return username + return UserID(username, self.hs.hostname).to_string() + + def check_user_exists(self, user_id): + """Check if user exists. + + Args: + user_id (str): Complete @user:id + + Returns: + Deferred[str|None]: Canonical (case-corrected) user_id, or None + if the user is not registered. + """ + return self._auth_handler.check_user_exists(user_id) + + def register(self, localpart): + """Registers a new user with given localpart + + Returns: + Deferred: a 2-tuple of (user_id, access_token) + """ + reg = self.hs.get_handlers().registration_handler + return reg.register(localpart=localpart) + + def run_db_interaction(self, desc, func, *args, **kwargs): + """Run a function with a database connection + + Args: + desc (str): description for the transaction, for metrics etc + func (func): function to be run. Passed a database cursor object + as well as *args and **kwargs + *args: positional args to be passed to func + **kwargs: named args to be passed to func + + Returns: + Deferred[object]: result of func + """ + return self._store.runInteraction(desc, func, *args, **kwargs) From fcdfc911eef966ad6b1e0cbe9a4af5b1679d66bd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 2 Nov 2017 14:18:24 +0000 Subject: [PATCH 0398/1637] Add a hook for custom rest endpoints Let the user specify custom modules which can be used for implementing extra endpoints. --- synapse/app/homeserver.py | 12 +++++++ synapse/config/server.py | 7 ++++ synapse/http/additional_resource.py | 55 +++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+) create mode 100644 synapse/http/additional_resource.py diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3adf72e141..8f2e08506a 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -30,6 +30,8 @@ from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory from synapse.federation.transport.server import TransportLayerServer +from synapse.module_api import ModuleApi +from synapse.http.additional_resource import AdditionalResource from synapse.http.server import RootRedirect from synapse.http.site import SynapseSite from synapse.metrics import register_memory_metrics @@ -49,6 +51,7 @@ from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_d from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole +from synapse.util.module_loader import load_module from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string from twisted.application import service @@ -154,6 +157,15 @@ class SynapseHomeServer(HomeServer): if name == "metrics" and self.get_config().enable_metrics: resources[METRICS_PREFIX] = MetricsResource(self) + additional_resources = listener_config.get("additional_resources", {}) + logger.debug("Configuring additional resources: %r", + additional_resources) + module_api = ModuleApi(self, self.get_auth_handler()) + for path, resmodule in additional_resources.items(): + handler_cls, config = load_module(resmodule) + handler = handler_cls(config, module_api) + resources[path] = AdditionalResource(self, handler.handle_request) + if WEB_CLIENT_PREFIX in resources: root_resource = RootRedirect(WEB_CLIENT_PREFIX) else: diff --git a/synapse/config/server.py b/synapse/config/server.py index b66993dab9..4d9193536d 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -247,6 +247,13 @@ class ServerConfig(Config): - names: [federation] # Federation APIs compress: false + # optional list of additional endpoints which can be loaded via + # dynamic modules + # additional_resources: + # "/_matrix/my/custom/endpoint": + # module: my_module.CustomRequestHandler + # config: {} + # Unsecure HTTP listener, # For when matrix traffic passes through loadbalancer that unwraps TLS. - port: %(unsecure_port)s diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py new file mode 100644 index 0000000000..343e932cb1 --- /dev/null +++ b/synapse/http/additional_resource.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.http.server import wrap_request_handler +from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET + + +class AdditionalResource(Resource): + """Resource wrapper for additional_resources + + If the user has configured additional_resources, we need to wrap the + handler class with a Resource so that we can map it into the resource tree. + + This class is also where we wrap the request handler with logging, metrics, + and exception handling. + """ + def __init__(self, hs, handler): + """Initialise AdditionalResource + + The ``handler`` should return a deferred which completes when it has + done handling the request. It should write a response with + ``request.write()``, and call ``request.finish()``. + + Args: + hs (synapse.server.HomeServer): homeserver + handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred): + function to be called to handle the request. + """ + Resource.__init__(self) + self._handler = handler + + # these are required by the request_handler wrapper + self.version_string = hs.version_string + self.clock = hs.get_clock() + + def render(self, request): + self._async_render(request) + return NOT_DONE_YET + + @wrap_request_handler + def _async_render(self, request): + return self._handler(request) From 6b60f7dca01a8959bc87909dbb24680eb755ed1d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 2 Nov 2017 14:29:37 +0000 Subject: [PATCH 0399/1637] Add more hooks to ModuleApi add `get_user_by_req` and `invalidate_access_token` --- synapse/module_api/__init__.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 9ccf6dfcd6..dc680ddf43 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -24,8 +24,26 @@ class ModuleApi(object): self.hs = hs self._store = hs.get_datastore() + self._auth = hs.get_auth() self._auth_handler = auth_handler + def get_user_by_req(self, req, allow_guest=False): + """Check the access_token provided for a request + + Args: + req (twisted.web.server.Request): Incoming HTTP request + allow_guest (bool): True if guest users should be allowed. If this + is False, and the access token is for a guest user, an + AuthError will be thrown + Returns: + twisted.internet.defer.Deferred[synapse.types.Requester]: + the requester for this request + Raises: + synapse.api.errors.AuthError: if no user by that token exists, + or the token is invalid. + """ + return self._auth.get_user_by_req(req, allow_guest) + def get_qualified_user_id(self, username): """Qualify a user id, if necessary @@ -63,6 +81,22 @@ class ModuleApi(object): reg = self.hs.get_handlers().registration_handler return reg.register(localpart=localpart) + def invalidate_access_token(self, access_token): + """Invalidate an access token for a user + + Args: + access_token(str): access token + + Returns: + twisted.internet.defer.Deferred - resolves once the access token + has been removed. + + Raises: + synapse.api.errors.AuthError: the access token is invalid + """ + + return self._auth_handler.delete_access_token(access_token) + def run_db_interaction(self, desc, func, *args, **kwargs): """Run a function with a database connection From 6c3a02072b8c5b4694e91bf597dbf14ac3d81cea Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 2 Nov 2017 16:31:07 +0000 Subject: [PATCH 0400/1637] support inhibit_login in /register Allow things to pass inhibit_login when registering to ... inhibit logins. --- synapse/rest/client/v2_alpha/register.py | 30 ++++++++++++++---------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index a077146c89..eebd071e59 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -557,24 +557,28 @@ class RegisterRestServlet(RestServlet): Args: (str) user_id: full canonical @user:id (object) params: registration parameters, from which we pull - device_id and initial_device_name + device_id, initial_device_name and inhibit_login Returns: defer.Deferred: (object) dictionary for response from /register """ - device_id = yield self._register_device(user_id, params) - - access_token = ( - yield self.auth_handler.get_access_token_for_user_id( - user_id, device_id=device_id, - ) - ) - - defer.returnValue({ + result = { "user_id": user_id, - "access_token": access_token, "home_server": self.hs.hostname, - "device_id": device_id, - }) + } + if not params.get("inhibit_login", False): + device_id = yield self._register_device(user_id, params) + + access_token = ( + yield self.auth_handler.get_access_token_for_user_id( + user_id, device_id=device_id, + ) + ) + + result.update({ + "access_token": access_token, + "device_id": device_id, + }) + defer.returnValue(result) def _register_device(self, user_id, params): """Register a device for a user. From a34c586a89e06b0c68d58ab2f29a2dd5281df893 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 2 Nov 2017 16:41:07 +0000 Subject: [PATCH 0401/1637] Make the get_rooms_in_group API more sane Return entries with is_public = True when they're public and is_public = False otherwise. --- synapse/groups/groups_server.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index e21ac8e49e..addc70ce94 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -507,7 +507,6 @@ class GroupsServerHandler(object): chunk = [] for room_result in room_results: room_id = room_result["room_id"] - is_public = room_result["is_public"] joined_users = yield self.store.get_users_in_room(room_id) entry = yield self.room_list_handler.generate_room_entry( @@ -518,8 +517,7 @@ class GroupsServerHandler(object): if not entry: continue - if not is_public: - entry["is_public"] = False + entry["is_public"] = bool(room_result["is_public"]) chunk.append(entry) From 45fbe4ff67b7a7b51cbe474572947dc8e7b8c0da Mon Sep 17 00:00:00 2001 From: Ilya Zhuravlev Date: Thu, 2 Nov 2017 22:49:43 +0300 Subject: [PATCH 0402/1637] Fix appservices being backlogged and not receiving new events due to a bug in notify_interested_services --- synapse/handlers/appservice.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 05af54d31b..5ce752a196 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -74,7 +74,7 @@ class ApplicationServicesHandler(object): limit = 100 while True: upper_bound, events = yield self.store.get_new_events_for_appservice( - upper_bound, limit + self.current_max, limit ) if not events: @@ -105,9 +105,6 @@ class ApplicationServicesHandler(object): ) yield self.store.set_appservice_last_pos(upper_bound) - - if len(events) < limit: - break finally: self.is_processing = False From 8a4a0ddea60260014ff09eb0a72b9e30fe43c9e8 Mon Sep 17 00:00:00 2001 From: Ilya Zhuravlev Date: Thu, 2 Nov 2017 23:11:28 +0300 Subject: [PATCH 0403/1637] Fix appservice tests to account for new behavior of notify_interested_services --- tests/handlers/test_appservice.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 7fe88172c0..a667fb6f0e 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -53,7 +53,10 @@ class AppServiceHandlerTestCase(unittest.TestCase): type="m.room.message", room_id="!foo:bar" ) - self.mock_store.get_new_events_for_appservice.return_value = (0, [event]) + self.mock_store.get_new_events_for_appservice.side_effect = [ + (0, [event]), + (0, []) + ] self.mock_as_api.push = Mock() yield self.handler.notify_interested_services(0) self.mock_scheduler.submit_event_for_as.assert_called_once_with( @@ -75,7 +78,10 @@ class AppServiceHandlerTestCase(unittest.TestCase): ) self.mock_as_api.push = Mock() self.mock_as_api.query_user = Mock() - self.mock_store.get_new_events_for_appservice.return_value = (0, [event]) + self.mock_store.get_new_events_for_appservice.side_effect = [ + (0, [event]), + (0, []) + ] yield self.handler.notify_interested_services(0) self.mock_as_api.query_user.assert_called_once_with( services[0], user_id @@ -98,7 +104,10 @@ class AppServiceHandlerTestCase(unittest.TestCase): ) self.mock_as_api.push = Mock() self.mock_as_api.query_user = Mock() - self.mock_store.get_new_events_for_appservice.return_value = (0, [event]) + self.mock_store.get_new_events_for_appservice.side_effect = [ + (0, [event]), + (0, []) + ] yield self.handler.notify_interested_services(0) self.assertFalse( self.mock_as_api.query_user.called, From fa4f337b49b9a417cbb3a555cd959b1be36cc666 Mon Sep 17 00:00:00 2001 From: Francois Granade Date: Fri, 3 Nov 2017 18:25:04 +0100 Subject: [PATCH 0404/1637] Fix for issue 2635: correctly update rooms avatar/display name when modified by admin --- synapse/handlers/profile.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 62b9bd503e..d6646825d8 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -140,7 +140,7 @@ class ProfileHandler(BaseHandler): target_user.localpart, new_displayname ) - yield self._update_join_states(requester) + yield self._update_join_states(requester, target_user) @defer.inlineCallbacks def get_avatar_url(self, target_user): @@ -184,7 +184,7 @@ class ProfileHandler(BaseHandler): target_user.localpart, new_avatar_url ) - yield self._update_join_states(requester) + yield self._update_join_states(requester, target_user) @defer.inlineCallbacks def on_profile_query(self, args): @@ -209,28 +209,24 @@ class ProfileHandler(BaseHandler): defer.returnValue(response) @defer.inlineCallbacks - def _update_join_states(self, requester): - user = requester.user - if not self.hs.is_mine(user): + def _update_join_states(self, requester, target_user): + if not self.hs.is_mine(target_user): return yield self.ratelimit(requester) room_ids = yield self.store.get_rooms_for_user( - user.to_string(), + target_user.to_string(), ) for room_id in room_ids: handler = self.hs.get_handlers().room_member_handler try: - # Assume the user isn't a guest because we don't let guests set - # profile or avatar data. - # XXX why are we recreating `requester` here for each room? - # what was wrong with the `requester` we were passed? - requester = synapse.types.create_requester(user) + # Assume the target_user isn't a guest, + # because we don't let guests set profile or avatar data. yield handler.update_membership( requester, - user, + target_user, room_id, "join", # We treat a profile update like a join. ratelimit=False, # Try to hide that these events aren't atomic. From f103b91ffa536d3d36697c159d6c13a7b952ba3a Mon Sep 17 00:00:00 2001 From: Francois Granade Date: Fri, 3 Nov 2017 18:45:49 +0100 Subject: [PATCH 0405/1637] removed unused import flagged by flake8a --- synapse/handlers/profile.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index d6646825d8..5e5b1952dd 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -17,7 +17,6 @@ import logging from twisted.internet import defer -import synapse.types from synapse.api.errors import SynapseError, AuthError, CodeMessageException from synapse.types import UserID, get_domain_from_id from ._base import BaseHandler From 805196fbeb396623b30a6d748863046377223af6 Mon Sep 17 00:00:00 2001 From: Slavi Pantaleev Date: Sat, 4 Nov 2017 09:47:25 +0200 Subject: [PATCH 0406/1637] Avoid no-op media deletes If there are no media entries to delete, avoid creating transactions, prepared statements and unnecessary log entries. Signed-off-by: Slavi Pantaleev --- synapse/storage/media_repository.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py index 7110a71279..52e5cdad70 100644 --- a/synapse/storage/media_repository.py +++ b/synapse/storage/media_repository.py @@ -254,6 +254,9 @@ class MediaRepositoryStore(SQLBaseStore): return self.runInteraction("get_expired_url_cache", _get_expired_url_cache_txn) def delete_url_cache(self, media_ids): + if len(media_ids) == 0: + return + sql = ( "DELETE FROM local_media_repository_url_cache" " WHERE media_id = ?" @@ -281,6 +284,9 @@ class MediaRepositoryStore(SQLBaseStore): ) def delete_url_cache_media(self, media_ids): + if len(media_ids) == 0: + return + def _delete_url_cache_media_txn(txn): sql = ( "DELETE FROM local_media_repository" From 2ac6deafb7a2f00579092693e0392730a08a6b82 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 4 Nov 2017 19:34:59 +0000 Subject: [PATCH 0407/1637] simplify instructions for regenerating user_dir --- docs/user_directory.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 docs/user_directory.md diff --git a/docs/user_directory.md b/docs/user_directory.md new file mode 100644 index 0000000000..4c8ee44f37 --- /dev/null +++ b/docs/user_directory.md @@ -0,0 +1,17 @@ +User Directory API Implementation +================================= + +The user directory is currently maintained based on the 'visible' users +on this particular server - i.e. ones which your account shares a room with, or +who are present in a publicly viewable room present on the server. + +The directory info is stored in various tables, which can (typically after +DB corruption) get stale or out of sync. If this happens, for now the +quickest solution to fix it is: + +``` +UPDATE user_directory_stream_pos SET stream_id = NULL; +``` + +and restart the synapse, which should then start a background task to +flush the current tables and regenerate the directory. From d1622e080f6711ef1fef52560fc286212f2cbeb4 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 4 Nov 2017 19:35:14 +0000 Subject: [PATCH 0408/1637] s/intial/initial/ --- synapse/handlers/user_directory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 2a49456bfc..b5be5d9623 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -152,7 +152,7 @@ class UserDirectoyHandler(object): for room_id in room_ids: logger.info("Handling room %d/%d", num_processed_rooms, len(room_ids)) - yield self._handle_intial_room(room_id) + yield self._handle_initial_room(room_id) num_processed_rooms += 1 yield sleep(self.INITIAL_SLEEP_MS / 1000.) @@ -166,7 +166,7 @@ class UserDirectoyHandler(object): yield self.store.update_user_directory_stream_pos(new_pos) @defer.inlineCallbacks - def _handle_intial_room(self, room_id): + def _handle_initial_room(self, room_id): """Called when we initially fill out user_directory one room at a time """ is_in_room = yield self.store.is_host_joined(room_id, self.server_name) From b6b075fd49c785316f44bac3a5641f746f36b91f Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 4 Nov 2017 19:35:33 +0000 Subject: [PATCH 0409/1637] s/popualte/populate/ --- synapse/storage/schema/delta/43/user_share.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/43/user_share.sql b/synapse/storage/schema/delta/43/user_share.sql index 4501d90cbb..ee7062abe4 100644 --- a/synapse/storage/schema/delta/43/user_share.sql +++ b/synapse/storage/schema/delta/43/user_share.sql @@ -29,5 +29,5 @@ CREATE INDEX users_who_share_rooms_r_idx ON users_who_share_rooms(room_id); CREATE INDEX users_who_share_rooms_o_idx ON users_who_share_rooms(other_user_id); --- Make sure that we popualte the table initially +-- Make sure that we populate the table initially UPDATE user_directory_stream_pos SET stream_id = NULL; From a100700630fd0fd3cd1c1de5e64ea6e30dc6c71f Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 4 Nov 2017 19:35:49 +0000 Subject: [PATCH 0410/1637] fix copyright.... --- synapse/storage/schema/delta/46/group_server.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/46/group_server.sql b/synapse/storage/schema/delta/46/group_server.sql index e754b554f8..097679bc9a 100644 --- a/synapse/storage/schema/delta/46/group_server.sql +++ b/synapse/storage/schema/delta/46/group_server.sql @@ -1,4 +1,4 @@ -/* Copyright 2017 Vector Creations Ltd +/* Copyright 2017 New Vector Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. From d802e8ca6ad92fcb4ca1a8e12fb440b735205e42 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 4 Nov 2017 19:38:13 +0000 Subject: [PATCH 0411/1637] s/users_in_pubic_room/users_in_public_rooms/g --- .../schema/delta/46/user_dir_typos.sql | 22 +++++++++++++++++++ synapse/storage/user_directory.py | 20 ++++++++--------- 2 files changed, 32 insertions(+), 10 deletions(-) create mode 100644 synapse/storage/schema/delta/46/user_dir_typos.sql diff --git a/synapse/storage/schema/delta/46/user_dir_typos.sql b/synapse/storage/schema/delta/46/user_dir_typos.sql new file mode 100644 index 0000000000..47b9738e65 --- /dev/null +++ b/synapse/storage/schema/delta/46/user_dir_typos.sql @@ -0,0 +1,22 @@ +/* Copyright 2017 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- this is just embarassing :| +ALTER TABLE users_in_pubic_room RENAME TO users_in_public_rooms; + +DROP INDEX users_in_pubic_room_room_idx; +DROP INDEX users_in_pubic_room_user_idx; +CREATE INDEX users_in_pubic_room_room_idx ON users_in_public_rooms(room_id); +CREATE UNIQUE INDEX users_in_pubic_room_user_idx ON users_in_public_rooms(user_id); diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py index 2a4db3f03c..5dc5b9582a 100644 --- a/synapse/storage/user_directory.py +++ b/synapse/storage/user_directory.py @@ -63,7 +63,7 @@ class UserDirectoryStore(SQLBaseStore): user_ids (list(str)): Users to add """ yield self._simple_insert_many( - table="users_in_pubic_room", + table="users_in_public_rooms", values=[ { "user_id": user_id, @@ -219,7 +219,7 @@ class UserDirectoryStore(SQLBaseStore): @defer.inlineCallbacks def update_user_in_public_user_list(self, user_id, room_id): yield self._simple_update_one( - table="users_in_pubic_room", + table="users_in_public_rooms", keyvalues={"user_id": user_id}, updatevalues={"room_id": room_id}, desc="update_user_in_public_user_list", @@ -240,7 +240,7 @@ class UserDirectoryStore(SQLBaseStore): ) self._simple_delete_txn( txn, - table="users_in_pubic_room", + table="users_in_public_rooms", keyvalues={"user_id": user_id}, ) txn.call_after( @@ -256,7 +256,7 @@ class UserDirectoryStore(SQLBaseStore): @defer.inlineCallbacks def remove_from_user_in_public_room(self, user_id): yield self._simple_delete( - table="users_in_pubic_room", + table="users_in_public_rooms", keyvalues={"user_id": user_id}, desc="remove_from_user_in_public_room", ) @@ -267,7 +267,7 @@ class UserDirectoryStore(SQLBaseStore): in the given room_id """ return self._simple_select_onecol( - table="users_in_pubic_room", + table="users_in_public_rooms", keyvalues={"room_id": room_id}, retcol="user_id", desc="get_users_in_public_due_to_room", @@ -286,7 +286,7 @@ class UserDirectoryStore(SQLBaseStore): ) user_ids_pub = yield self._simple_select_onecol( - table="users_in_pubic_room", + table="users_in_public_rooms", keyvalues={"room_id": room_id}, retcol="user_id", desc="get_users_in_dir_due_to_room", @@ -514,7 +514,7 @@ class UserDirectoryStore(SQLBaseStore): def _delete_all_from_user_dir_txn(txn): txn.execute("DELETE FROM user_directory") txn.execute("DELETE FROM user_directory_search") - txn.execute("DELETE FROM users_in_pubic_room") + txn.execute("DELETE FROM users_in_public_rooms") txn.execute("DELETE FROM users_who_share_rooms") txn.call_after(self.get_user_in_directory.invalidate_all) txn.call_after(self.get_user_in_public_room.invalidate_all) @@ -537,7 +537,7 @@ class UserDirectoryStore(SQLBaseStore): @cached() def get_user_in_public_room(self, user_id): return self._simple_select_one( - table="users_in_pubic_room", + table="users_in_public_rooms", keyvalues={"user_id": user_id}, retcols=("room_id",), allow_none=True, @@ -641,7 +641,7 @@ class UserDirectoryStore(SQLBaseStore): SELECT d.user_id, display_name, avatar_url FROM user_directory_search INNER JOIN user_directory AS d USING (user_id) - LEFT JOIN users_in_pubic_room AS p USING (user_id) + LEFT JOIN users_in_public_rooms AS p USING (user_id) LEFT JOIN ( SELECT other_user_id AS user_id FROM users_who_share_rooms WHERE user_id = ? AND share_private @@ -680,7 +680,7 @@ class UserDirectoryStore(SQLBaseStore): SELECT d.user_id, display_name, avatar_url FROM user_directory_search INNER JOIN user_directory AS d USING (user_id) - LEFT JOIN users_in_pubic_room AS p USING (user_id) + LEFT JOIN users_in_public_rooms AS p USING (user_id) LEFT JOIN ( SELECT other_user_id AS user_id FROM users_who_share_rooms WHERE user_id = ? AND share_private From 4ad883398ffbf700ba9a448adb01605f72868ff4 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 4 Nov 2017 19:39:40 +0000 Subject: [PATCH 0412/1637] s/users_in_pubic_room/users_in_public_rooms/g --- synapse/storage/schema/delta/46/user_dir_typos.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/schema/delta/46/user_dir_typos.sql b/synapse/storage/schema/delta/46/user_dir_typos.sql index 47b9738e65..83ac0f2cef 100644 --- a/synapse/storage/schema/delta/46/user_dir_typos.sql +++ b/synapse/storage/schema/delta/46/user_dir_typos.sql @@ -18,5 +18,5 @@ ALTER TABLE users_in_pubic_room RENAME TO users_in_public_rooms; DROP INDEX users_in_pubic_room_room_idx; DROP INDEX users_in_pubic_room_user_idx; -CREATE INDEX users_in_pubic_room_room_idx ON users_in_public_rooms(room_id); -CREATE UNIQUE INDEX users_in_pubic_room_user_idx ON users_in_public_rooms(user_id); +CREATE INDEX users_in_public_rooms_room_idx ON users_in_public_rooms(room_id); +CREATE UNIQUE INDEX users_in_public_rooms_user_idx ON users_in_public_rooms(user_id); From bf993db11cac53cd4e77f2f7df07a5d8987b105e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 7 Nov 2017 00:48:57 +0000 Subject: [PATCH 0413/1637] Logging and logcontext fixes for Limiter Add some logging to the Limiter in a similar spirit to the Linearizer, to help debug issues. Also fix a logcontext leak. Also refactor slightly to avoid throwing exceptions. --- synapse/util/async.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/synapse/util/async.py b/synapse/util/async.py index 1a884e96ee..e786fb38a9 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -278,8 +278,13 @@ class Limiter(object): if entry[0] >= self.max_count: new_defer = defer.Deferred() entry[1].append(new_defer) + + logger.info("Waiting to acquire limiter lock for key %r", key) with PreserveLoggingContext(): yield new_defer + logger.info("Acquired limiter lock for key %r", key) + else: + logger.info("Acquired uncontended limiter lock for key %r", key) entry[0] += 1 @@ -288,16 +293,21 @@ class Limiter(object): try: yield finally: + logger.info("Releasing limiter lock for key %r", key) + # We've finished executing so check if there are any things # blocked waiting to execute and start one of them entry[0] -= 1 - try: - entry[1].pop(0).callback(None) - except IndexError: - # If nothing else is executing for this key then remove it - # from the map - if entry[0] == 0: - self.key_to_defer.pop(key, None) + + if entry[1]: + next_def = entry[1].pop(0) + + with PreserveLoggingContext(): + next_def.callback(None) + elif entry[0] == 0: + # We were the last thing for this key: remove it from the + # map. + del self.key_to_defer[key] defer.returnValue(_ctx_manager()) From 631fa4a1b71a563161b9f1ee91fb08ca10691e98 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 7 Nov 2017 10:41:55 +0000 Subject: [PATCH 0414/1637] create new indexes before dropping old ones to keep safetynet in place --- synapse/storage/schema/delta/46/user_dir_typos.sql | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/storage/schema/delta/46/user_dir_typos.sql b/synapse/storage/schema/delta/46/user_dir_typos.sql index 83ac0f2cef..d9505f8da1 100644 --- a/synapse/storage/schema/delta/46/user_dir_typos.sql +++ b/synapse/storage/schema/delta/46/user_dir_typos.sql @@ -16,7 +16,9 @@ -- this is just embarassing :| ALTER TABLE users_in_pubic_room RENAME TO users_in_public_rooms; -DROP INDEX users_in_pubic_room_room_idx; -DROP INDEX users_in_pubic_room_user_idx; +-- this is only 300K rows on matrix.org and takes ~3s to generate the index, +-- so is hopefully not going to block anyone else for that long... CREATE INDEX users_in_public_rooms_room_idx ON users_in_public_rooms(room_id); CREATE UNIQUE INDEX users_in_public_rooms_user_idx ON users_in_public_rooms(user_id); +DROP INDEX users_in_pubic_room_room_idx; +DROP INDEX users_in_pubic_room_user_idx; From 5561c09091c084d38ebbe3e449aa85f2955b4dd6 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Tue, 7 Nov 2017 11:18:45 +0000 Subject: [PATCH 0415/1637] Return whether a user is an admin within a group --- synapse/groups/groups_server.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index addc70ce94..11199dd215 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -426,14 +426,15 @@ class GroupsServerHandler(object): for user_result in user_results: g_user_id = user_result["user_id"] is_public = user_result["is_public"] + is_privileged = user_result["is_admin"] entry = {"user_id": g_user_id} profile = yield self.profile_handler.get_profile_from_cache(g_user_id) entry.update(profile) - if not is_public: - entry["is_public"] = False + entry["is_public"] = bool(is_public) + entry["is_privileged"] = bool(is_privileged) if not self.is_mine_id(g_user_id): attestation = yield self.store.get_remote_attestation(group_id, g_user_id) From 38b265cb515bba9899d41f58e3d67e654977b1c5 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Tue, 7 Nov 2017 11:24:04 +0000 Subject: [PATCH 0416/1637] Remember to pick is_admin out of the db --- synapse/storage/group_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index f6924e1a32..6b261dcc0f 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -54,7 +54,7 @@ class GroupServerStore(SQLBaseStore): return self._simple_select_list( table="group_users", keyvalues=keyvalues, - retcols=("user_id", "is_public",), + retcols=("user_id", "is_public", "is_admin",), desc="get_users_in_group", ) From 44ad6dd4bf7dad7ebfe554eabf78e56ba7a964c7 Mon Sep 17 00:00:00 2001 From: Krombel Date: Tue, 7 Nov 2017 13:35:35 +0100 Subject: [PATCH 0417/1637] update prometheus-config to new format --- contrib/prometheus/README | 19 +++++---- contrib/prometheus/synapse.rules | 71 +++++++++++++++++++++++++------- 2 files changed, 67 insertions(+), 23 deletions(-) diff --git a/contrib/prometheus/README b/contrib/prometheus/README index eb91db2de2..d2ddb3f6e4 100644 --- a/contrib/prometheus/README +++ b/contrib/prometheus/README @@ -5,15 +5,20 @@ To use it, first install prometheus by following the instructions at http://prometheus.io/ -Then add a new job to the main prometheus.conf file: +Then add a new job to the main prometheus.yml file: - job: { - name: "synapse" + - job_name: "synapse" + metrics_path: "/_synapse/metrics" + # when endpoint uses https: + scheme: "https" - target_group: { - target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics" - } - } + static_configs: + - targets: ['SERVER.LOCATION:PORT'] + +To use `synapse.rules` add + + rule_files: + - "/PATH/TO/synapse.rules" Metrics are disabled by default when running synapse; they must be enabled with the 'enable-metrics' option, either in the synapse config file or as a diff --git a/contrib/prometheus/synapse.rules b/contrib/prometheus/synapse.rules index b6f84174b0..07e37a885e 100644 --- a/contrib/prometheus/synapse.rules +++ b/contrib/prometheus/synapse.rules @@ -1,21 +1,60 @@ -synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0) -synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0) +groups: +- name: synapse + rules: + - record: "synapse_federation_transaction_queue_pendingEdus:total" + expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)" + - record: "synapse_federation_transaction_queue_pendingPdus:total" + expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)" + - record: 'synapse_http_server_requests:method' + labels: + servlet: "" + expr: "sum(synapse_http_server_requests) by (method)" + - record: 'synapse_http_server_requests:servlet' + labels: + method: "" + expr: 'sum(synapse_http_server_requests) by (servlet)' -synapse_http_server_requests:method{servlet=""} = sum(synapse_http_server_requests) by (method) -synapse_http_server_requests:servlet{method=""} = sum(synapse_http_server_requests) by (servlet) + - record: 'synapse_http_server_requests:total' + labels: + servlet: "" + expr: 'sum(synapse_http_server_requests:by_method) by (servlet)' -synapse_http_server_requests:total{servlet=""} = sum(synapse_http_server_requests:by_method) by (servlet) + - record: 'synapse_cache:hit_ratio_5m' + expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])' + - record: 'synapse_cache:hit_ratio_30s' + expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])' -synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m]) -synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s]) + - record: 'synapse_federation_client_sent' + labels: + type: "EDU" + expr: 'synapse_federation_client_sent_edus + 0' + - record: 'synapse_federation_client_sent' + labels: + type: "PDU" + expr: 'synapse_federation_client_sent_pdu_destinations:count + 0' + - record: 'synapse_federation_client_sent' + labels: + type: "Query" + expr: 'sum(synapse_federation_client_sent_queries) by (job)' -synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0 -synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0 -synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job) + - record: 'synapse_federation_server_received' + labels: + type: "EDU" + expr: 'synapse_federation_server_received_edus + 0' + - record: 'synapse_federation_server_received' + labels: + type: "PDU" + expr: 'synapse_federation_server_received_pdus + 0' + - record: 'synapse_federation_server_received' + labels: + type: "Query" + expr: 'sum(synapse_federation_server_received_queries) by (job)' -synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0 -synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0 -synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job) - -synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0 -synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0 + - record: 'synapse_federation_transaction_queue_pending' + labels: + type: "EDU" + expr: 'synapse_federation_transaction_queue_pending_edus + 0' + - record: 'synapse_federation_transaction_queue_pending' + labels: + type: "PDU" + expr: 'synapse_federation_transaction_queue_pending_pdus + 0' From 76c9af193cb46337cd1e618b46508d7f0ee912e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Nov 2017 13:32:35 +0000 Subject: [PATCH 0418/1637] Revert "Merge branch 'master' of github.com:matrix-org/synapse into develop" This reverts commit f9b255cd62fe724e16b2222f6af623b2d39282ab, reversing changes made to 1bd654dabde776bbb7ee365c115b307cd6a110b8. --- synapse/groups/attestations.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 784af9cbcf..1fb709e6c3 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -53,6 +53,11 @@ logger = logging.getLogger(__name__) # Default validity duration for new attestations we create DEFAULT_ATTESTATION_LENGTH_MS = 3 * 24 * 60 * 60 * 1000 +# We add some jitter to the validity duration of attestations so that if we +# add lots of users at once we don't need to renew them all at once. +# The jitter is a multiplier picked randomly between the first and second number +DEFAULT_ATTESTATION_JITTER = (0.9, 1.3) + # Start trying to update our attestations when they come this close to expiring UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000 @@ -101,10 +106,14 @@ class GroupAttestationSigning(object): """Create an attestation for the group_id and user_id with default validity length. """ + validity_period = DEFAULT_ATTESTATION_LENGTH_MS + validity_period *= random.uniform(*DEFAULT_ATTESTATION_JITTER) + valid_until_ms = int(self.clock.time_msec() + validity_period) + return sign_json({ "group_id": group_id, "user_id": user_id, - "valid_until_ms": self.clock.time_msec() + DEFAULT_ATTESTATION_LENGTH_MS, + "valid_until_ms": valid_until_ms, }, self.server_name, self.signing_key) From f5cf3638e9c6086e1c33ddad8eda9298cf53a58e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 7 Nov 2017 16:43:00 +0000 Subject: [PATCH 0419/1637] move _state_group_cache to statestore this is internal to statestore, so let's keep it there. --- synapse/storage/_base.py | 6 ------ synapse/storage/state.py | 19 ++++++++++++------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 6caf7b3356..a37d1934ec 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -16,8 +16,6 @@ import logging from synapse.api.errors import StoreError from synapse.util.logcontext import LoggingContext, PreserveLoggingContext -from synapse.util.caches import CACHE_SIZE_FACTOR -from synapse.util.caches.dictionary_cache import DictionaryCache from synapse.util.caches.descriptors import Cache from synapse.storage.engines import PostgresEngine import synapse.metrics @@ -180,10 +178,6 @@ class SQLBaseStore(object): self._get_event_cache = Cache("*getEvent*", keylen=3, max_entries=hs.config.event_cache_size) - self._state_group_cache = DictionaryCache( - "*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR - ) - self._event_fetch_lock = threading.Condition() self._event_fetch_list = [] self._event_fetch_ongoing = 0 diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 5673e4aa96..a1da3ad7a5 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -13,16 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cached, cachedList -from synapse.util.caches import intern_string -from synapse.util.stringutils import to_ascii -from synapse.storage.engines import PostgresEngine +from collections import namedtuple +import logging from twisted.internet import defer -from collections import namedtuple -import logging +from synapse.storage.engines import PostgresEngine +from synapse.util.caches import intern_string, CACHE_SIZE_FACTOR +from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.caches.dictionary_cache import DictionaryCache +from synapse.util.stringutils import to_ascii +from ._base import SQLBaseStore logger = logging.getLogger(__name__) @@ -81,6 +82,10 @@ class StateStore(SQLBaseStore): where_clause="type='m.room.member'", ) + self._state_group_cache = DictionaryCache( + "*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR + ) + @cached(max_entries=100000, iterable=True) def get_current_state_ids(self, room_id): """Get the current state event ids for a room based on the From 1ca42881350b26f58172dd2c82fcfd8d45ddd5c0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 7 Nov 2017 16:43:00 +0000 Subject: [PATCH 0420/1637] factor out _update_context_for_auth_events This is duplicated, so let's factor it out before fixing it --- synapse/handlers/federation.py | 62 +++++++++++++++++++++++----------- 1 file changed, 42 insertions(+), 20 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8b1e606754..6cceb8998e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1706,6 +1706,17 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks @log_function def do_auth(self, origin, event, context, auth_events): + """ + + Args: + origin (str): + event (synapse.events.FrozenEvent): + context (synapse.events.snapshot.EventContext): + auth_events (dict[(str, str)->str]): + + Returns: + defer.Deferred[None] + """ # Check if we have all the auth events. current_state = set(e.event_id for e in auth_events.values()) event_auth_events = set(e_id for e_id, _ in event.auth_events) @@ -1817,16 +1828,9 @@ class FederationHandler(BaseHandler): current_state = set(e.event_id for e in auth_events.values()) different_auth = event_auth_events - current_state - context.current_state_ids = dict(context.current_state_ids) - context.current_state_ids.update({ - k: a.event_id for k, a in auth_events.items() - if k != event_key - }) - context.prev_state_ids = dict(context.prev_state_ids) - context.prev_state_ids.update({ - k: a.event_id for k, a in auth_events.items() - }) - context.state_group = self.store.get_next_state_group() + self._update_context_for_auth_events( + context, auth_events, event_key, + ) if different_auth and not event.internal_metadata.is_outlier(): logger.info("Different auth after resolution: %s", different_auth) @@ -1906,16 +1910,9 @@ class FederationHandler(BaseHandler): # 4. Look at rejects and their proofs. # TODO. - context.current_state_ids = dict(context.current_state_ids) - context.current_state_ids.update({ - k: a.event_id for k, a in auth_events.items() - if k != event_key - }) - context.prev_state_ids = dict(context.prev_state_ids) - context.prev_state_ids.update({ - k: a.event_id for k, a in auth_events.items() - }) - context.state_group = self.store.get_next_state_group() + self._update_context_for_auth_events( + context, auth_events, event_key, + ) try: self.auth.check(event, auth_events=auth_events) @@ -1923,6 +1920,31 @@ class FederationHandler(BaseHandler): logger.warn("Failed auth resolution for %r because %s", event, e) raise e + def _update_context_for_auth_events(self, context, auth_events, + event_key): + """Update the state_ids in an event context after auth event resolution + + Args: + context (synapse.events.snapshot.EventContext): event context + to be updated + + auth_events (dict[(str, str)->str]): Events to update in the event + context. + + event_key ((str, str)): (type, state_key) for the current event. + this will not be included in the current_state in the context. + """ + context.current_state_ids = dict(context.current_state_ids) + context.current_state_ids.update({ + k: a.event_id for k, a in auth_events.items() + if k != event_key + }) + context.prev_state_ids = dict(context.prev_state_ids) + context.prev_state_ids.update({ + k: a.event_id for k, a in auth_events.items() + }) + context.state_group = self.store.get_next_state_group() + @defer.inlineCallbacks def construct_auth_difference(self, local_auth, remote_auth): """ Given a local and remote auth chain, find the differences. This From 780dbb378fba8c7fb2eb154c2ce9f640ab52128f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 7 Nov 2017 16:43:00 +0000 Subject: [PATCH 0421/1637] Update deltas when doing auth resolution Fixes a bug where the persisted state groups were different to those actually being used after auth resolution. --- synapse/handlers/federation.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6cceb8998e..b9e1b24dab 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1934,11 +1934,15 @@ class FederationHandler(BaseHandler): event_key ((str, str)): (type, state_key) for the current event. this will not be included in the current_state in the context. """ - context.current_state_ids = dict(context.current_state_ids) - context.current_state_ids.update({ + state_updates = { k: a.event_id for k, a in auth_events.items() if k != event_key - }) + } + context.current_state_ids = dict(context.current_state_ids) + context.current_state_ids.update(state_updates) + if context.delta_ids is not None: + context.delta_ids = dict(context.delta_ids) + context.delta_ids.update(state_updates) context.prev_state_ids = dict(context.prev_state_ids) context.prev_state_ids.update({ k: a.event_id for k, a in auth_events.items() From d46386d57e4756e3bd5ca6ed17337d54e73dbbbf Mon Sep 17 00:00:00 2001 From: Ilya Zhuravlev Date: Tue, 7 Nov 2017 22:23:22 +0300 Subject: [PATCH 0422/1637] Remove useless assignment in notify_interested_services --- synapse/handlers/appservice.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 5ce752a196..543bf28aec 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -70,7 +70,6 @@ class ApplicationServicesHandler(object): with Measure(self.clock, "notify_interested_services"): self.is_processing = True try: - upper_bound = self.current_max limit = 100 while True: upper_bound, events = yield self.store.get_new_events_for_appservice( From e148438e97c0d4bdd0dffbc3c1626dd4553f7d88 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 8 Nov 2017 09:21:41 +0000 Subject: [PATCH 0423/1637] s/items/iteritems/ --- synapse/handlers/federation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b9e1b24dab..ac70730885 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1935,7 +1935,7 @@ class FederationHandler(BaseHandler): this will not be included in the current_state in the context. """ state_updates = { - k: a.event_id for k, a in auth_events.items() + k: a.event_id for k, a in auth_events.iteritems() if k != event_key } context.current_state_ids = dict(context.current_state_ids) @@ -1945,7 +1945,7 @@ class FederationHandler(BaseHandler): context.delta_ids.update(state_updates) context.prev_state_ids = dict(context.prev_state_ids) context.prev_state_ids.update({ - k: a.event_id for k, a in auth_events.items() + k: a.event_id for k, a in auth_events.iteritems() }) context.state_group = self.store.get_next_state_group() From 2a98ba0ed31bdd51ea43c0867bee2a5256f2a289 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 8 Nov 2017 10:35:30 +0000 Subject: [PATCH 0424/1637] Rename redact_content option to include_content The redact_content option never worked because it read the wrong config section. The PR introducing it (https://github.com/matrix-org/synapse/pull/2301) had feedback suggesting the name be changed to not re-use the term 'redact' but this wasn't incorporated. This reanmes the option to give it a less confusing name, and also means that people who've set the redact_content option won't suddenly see a behaviour change when upgrading synapse, but instead can set include_content if they want to. This PR also updates the wording of the config comment to clarify that this has no effect on event_id_only push. Includes https://github.com/matrix-org/synapse/pull/2422 --- synapse/config/push.py | 28 +++++++++++++--------------- synapse/push/httppusher.py | 3 ++- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/synapse/config/push.py b/synapse/config/push.py index 9c68318b40..01d4a49784 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,28 +19,25 @@ from ._base import Config class PushConfig(Config): def read_config(self, config): - self.push_redact_content = False + self.push_include_content = True - push_config = config.get("email", {}) - self.push_redact_content = push_config.get("redact_content", False) + push_config = config.get("push", {}) + self.push_include_content = push_config.get("include_content", True) def default_config(self, config_dir_path, server_name, **kwargs): return """ - # Control how push messages are sent to google/apple to notifications. - # Normally every message said in a room with one or more people using - # mobile devices will be posted to a push server hosted by matrix.org - # which is registered with google and apple in order to allow push - # notifications to be sent to these mobile devices. - # - # Setting redact_content to true will make the push messages contain no - # message content which will provide increased privacy. This is a - # temporary solution pending improvements to Android and iPhone apps - # to get content from the app rather than the notification. - # + # Clients requesting push notifications can either have the body of + # the message sent in the notification poke along with other details + # like the sender, or just the event ID and room ID (`event_id_only`). + # If clients choose the former, this option controls whether the + # notification request includes the content of the event (other details + # like the sender are still included). For `event_id_only` push, it + # has no effect. + # For modern android devices the notification content will still appear # because it is loaded by the app. iPhone, however will send a # notification saying only that a message arrived and who it came from. # #push: - # redact_content: false + # include_content: false """ diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 74c0bc462c..c16f61452c 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -295,7 +296,7 @@ class HttpPusher(object): if event.type == 'm.room.member': d['notification']['membership'] = event.content['membership'] d['notification']['user_is_target'] = event.state_key == self.user_id - if not self.hs.config.push_redact_content and 'content' in event: + if self.hs.config.push_include_content and 'content' in event: d['notification']['content'] = event.content # We no longer send aliases separately, instead, we send the human From 1b870937ae2de0ba510f0e1db40ae0e9a316d83f Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 8 Nov 2017 11:46:24 +0000 Subject: [PATCH 0425/1637] Log if any of the old config flags are set --- synapse/config/push.py | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/synapse/config/push.py b/synapse/config/push.py index 01d4a49784..861f5f31a7 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -16,14 +16,36 @@ from ._base import Config +import logging + +from twisted.internet import reactor + + +logger = logging.getLogger(__name__) + class PushConfig(Config): def read_config(self, config): - self.push_include_content = True - push_config = config.get("push", {}) self.push_include_content = push_config.get("include_content", True) + if push_config.get("redact_content") is not None: + reactor.callWhenRunning(lambda: logger.warn( + "The push.redact_content content option has never worked. " + "Please set push.include_content if you want this behaviour" + )) + + # There was a a 'redact_content' setting but mistakenly read from the + # 'email' section: check for it and honour it, with a warning. + push_config = config.get("email", {}) + redact_content = push_config.get("redact_content") + if redact_content is not None: + reactor.callWhenRunning(lambda: logger.warn( + "The 'email.redact_content' option is deprecated: " + "please set push.include_content instead" + )) + self.push_include_content = not redact_content + def default_config(self, config_dir_path, server_name, **kwargs): return """ # Clients requesting push notifications can either have the body of From ad408beb663052bc5700015db4716583f40a4536 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 8 Nov 2017 11:50:08 +0000 Subject: [PATCH 0426/1637] better comments --- synapse/config/push.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/config/push.py b/synapse/config/push.py index 861f5f31a7..bbfeb05d50 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -29,14 +29,17 @@ class PushConfig(Config): push_config = config.get("push", {}) self.push_include_content = push_config.get("include_content", True) + # There was a a 'redact_content' setting but mistakenly read from the + # 'email'section'. Check for the flag in the 'push' section, and log, + # but do not honour it to avoid nasty surprises when people upgrade. if push_config.get("redact_content") is not None: reactor.callWhenRunning(lambda: logger.warn( "The push.redact_content content option has never worked. " "Please set push.include_content if you want this behaviour" )) - # There was a a 'redact_content' setting but mistakenly read from the - # 'email' section: check for it and honour it, with a warning. + # Now check for the one in the 'email' section and honour it, + # with a warning. push_config = config.get("email", {}) redact_content = push_config.get("redact_content") if redact_content is not None: From 94ff2cda73cbd1aa14b2dd07f9598733229abc00 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Nov 2017 15:43:34 +0000 Subject: [PATCH 0427/1637] Revert "Modify group room association API to allow modification of is_public" --- synapse/federation/transport/client.py | 9 ++++----- synapse/federation/transport/server.py | 4 ++-- synapse/groups/groups_server.py | 13 +++++-------- synapse/handlers/groups_local.py | 4 ++-- synapse/rest/client/v2_alpha/groups.py | 4 ++-- synapse/storage/group_server.py | 20 +++++++------------- 6 files changed, 22 insertions(+), 32 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index ed41dfc7ee..d25ae1b282 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -531,9 +531,9 @@ class TransportLayerClient(object): ignore_backoff=True, ) - def update_room_group_association(self, destination, group_id, requester_user_id, - room_id, content): - """Add or update an association between room and group + def add_room_to_group(self, destination, group_id, requester_user_id, room_id, + content): + """Add a room to a group """ path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) @@ -545,8 +545,7 @@ class TransportLayerClient(object): ignore_backoff=True, ) - def delete_room_group_association(self, destination, group_id, requester_user_id, - room_id): + def remove_room_from_group(self, destination, group_id, requester_user_id, room_id): """Remove a room from a group """ path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index ded6d4edc9..8f3c14c303 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -684,7 +684,7 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.update_room_group_association( + new_content = yield self.handler.add_room_to_group( group_id, requester_user_id, room_id, content ) @@ -696,7 +696,7 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.delete_room_group_association( + new_content = yield self.handler.remove_room_from_group( group_id, requester_user_id, room_id, ) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 11199dd215..81f21a36f5 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -530,9 +530,8 @@ class GroupsServerHandler(object): }) @defer.inlineCallbacks - def update_room_group_association(self, group_id, requester_user_id, room_id, - content): - """Add or update an association between room and group + def add_room_to_group(self, group_id, requester_user_id, room_id, content): + """Add room to group """ RoomID.from_string(room_id) # Ensure valid room id @@ -542,21 +541,19 @@ class GroupsServerHandler(object): is_public = _parse_visibility_from_contents(content) - yield self.store.update_room_group_association( - group_id, room_id, is_public=is_public - ) + yield self.store.add_room_to_group(group_id, room_id, is_public=is_public) defer.returnValue({}) @defer.inlineCallbacks - def delete_room_group_association(self, group_id, requester_user_id, room_id): + def remove_room_from_group(self, group_id, requester_user_id, room_id): """Remove room from group """ yield self.check_group_is_ours( group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id ) - yield self.store.delete_room_group_association(group_id, room_id) + yield self.store.remove_room_from_group(group_id, room_id) defer.returnValue({}) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index dabc2a3fbb..6699d0888f 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -70,8 +70,8 @@ class GroupsLocalHandler(object): get_invited_users_in_group = _create_rerouter("get_invited_users_in_group") - update_room_group_association = _create_rerouter("update_room_group_association") - delete_room_group_association = _create_rerouter("delete_room_group_association") + add_room_to_group = _create_rerouter("add_room_to_group") + remove_room_from_group = _create_rerouter("remove_room_from_group") update_group_summary_room = _create_rerouter("update_group_summary_room") delete_group_summary_room = _create_rerouter("delete_group_summary_room") diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 792608cd48..c97885cfc7 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -451,7 +451,7 @@ class GroupAdminRoomsServlet(RestServlet): requester_user_id = requester.user.to_string() content = parse_json_object_from_request(request) - result = yield self.groups_handler.update_room_group_association( + result = yield self.groups_handler.add_room_to_group( group_id, requester_user_id, room_id, content, ) @@ -462,7 +462,7 @@ class GroupAdminRoomsServlet(RestServlet): requester = yield self.auth.get_user_by_req(request) requester_user_id = requester.user.to_string() - result = yield self.groups_handler.delete_room_group_association( + result = yield self.groups_handler.remove_room_from_group( group_id, requester_user_id, room_id, ) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 6b261dcc0f..ede6bdfaee 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -846,25 +846,19 @@ class GroupServerStore(SQLBaseStore): ) return self.runInteraction("remove_user_from_group", _remove_user_from_group_txn) - def update_room_group_association(self, group_id, room_id, is_public): - return self._simple_upsert( + def add_room_to_group(self, group_id, room_id, is_public): + return self._simple_insert( table="group_rooms", - keyvalues={ + values={ "group_id": group_id, "room_id": room_id, - }, - values={ "is_public": is_public, }, - insertion_values={ - "group_id": group_id, - "room_id": room_id, - }, - desc="update_room_group_association", + desc="add_room_to_group", ) - def delete_room_group_association(self, group_id, room_id): - def _delete_room_group_association_txn(txn): + def remove_room_from_group(self, group_id, room_id): + def _remove_room_from_group_txn(txn): self._simple_delete_txn( txn, table="group_rooms", @@ -883,7 +877,7 @@ class GroupServerStore(SQLBaseStore): }, ) return self.runInteraction( - "delete_room_group_association", _delete_room_group_association_txn, + "remove_room_from_group", _remove_room_from_group_txn, ) def get_publicised_groups_for_user(self, user_id): From e8814410ef682a1e277a1cfe6fda7268fd7a33d6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Nov 2017 16:13:27 +0000 Subject: [PATCH 0428/1637] Have an explicit API to update room config --- synapse/federation/transport/client.py | 14 +++++++++++++ synapse/federation/transport/server.py | 23 +++++++++++++++++++++- synapse/groups/groups_server.py | 23 ++++++++++++++++++++++ synapse/handlers/groups_local.py | 1 + synapse/rest/client/v2_alpha/groups.py | 27 ++++++++++++++++++++++++++ synapse/storage/group_server.py | 13 +++++++++++++ 6 files changed, 100 insertions(+), 1 deletion(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index d25ae1b282..1f3ce238f6 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -545,6 +545,20 @@ class TransportLayerClient(object): ignore_backoff=True, ) + def update_room_in_group(self, destination, group_id, requester_user_id, room_id, + config_key, content): + """Update room in group + """ + path = PREFIX + "/groups/%s/room/%s/config/%s" % (group_id, room_id, config_key,) + + return self.client.post_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + data=content, + ignore_backoff=True, + ) + def remove_room_from_group(self, destination, group_id, requester_user_id, room_id): """Remove a room from a group """ diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 8f3c14c303..6ef6cce592 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -676,7 +676,7 @@ class FederationGroupsRoomsServlet(BaseFederationServlet): class FederationGroupsAddRoomsServlet(BaseFederationServlet): """Add/remove room from group """ - PATH = "/groups/(?P[^/]*)/room/(?)$" + PATH = "/groups/(?P[^/]*)/room/(?P[^/]*)$" @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, room_id): @@ -703,6 +703,25 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet): + """Update room config in group + """ + PATH = "/groups/(?P[^/]*)/room/(?P[^/]*)" + "/config/(?P[^/]*)$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, room_id, config_key): + requester_user_id = parse_string_from_args(query, "requester_user_id") + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + result = yield self.groups_handler.update_room_in_group( + group_id, requester_user_id, room_id, config_key, content, + ) + + defer.returnValue((200, result)) + + class FederationGroupsUsersServlet(BaseFederationServlet): """Get the users in a group on behalf of a user """ @@ -1142,6 +1161,8 @@ GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsRolesServlet, FederationGroupsRoleServlet, FederationGroupsSummaryUsersServlet, + FederationGroupsAddRoomsServlet, + FederationGroupsAddRoomsConfigServlet, ) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 81f21a36f5..a8039f4788 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -545,6 +545,29 @@ class GroupsServerHandler(object): defer.returnValue({}) + @defer.inlineCallbacks + def update_room_in_group(self, group_id, requester_user_id, room_id, config_key, + content): + """Update room in group + """ + RoomID.from_string(room_id) # Ensure valid room id + + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + if config_key == "visibility": + is_public = _parse_visibility_from_contents(content) + + yield self.store.update_room_in_group_visibility( + group_id, room_id, + is_public=is_public, + ) + else: + raise SynapseError(400, "Uknown config option") + + defer.returnValue({}) + @defer.inlineCallbacks def remove_room_from_group(self, group_id, requester_user_id, room_id): """Remove room from group diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 6699d0888f..da00aeb0f4 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -71,6 +71,7 @@ class GroupsLocalHandler(object): get_invited_users_in_group = _create_rerouter("get_invited_users_in_group") add_room_to_group = _create_rerouter("add_room_to_group") + update_room_in_group = _create_rerouter("update_room_in_group") remove_room_from_group = _create_rerouter("remove_room_from_group") update_group_summary_room = _create_rerouter("update_group_summary_room") diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index c97885cfc7..67f163e812 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -469,6 +469,33 @@ class GroupAdminRoomsServlet(RestServlet): defer.returnValue((200, result)) +class GroupAdminRoomsConfigServlet(RestServlet): + """Update the config of a room in a group + """ + PATTERNS = client_v2_patterns( + "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)" + "/config/(?P[^/]*)$" + ) + + def __init__(self, hs): + super(GroupAdminRoomsConfigServlet, self).__init__() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_PUT(self, request, group_id, room_id, config_key): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + result = yield self.groups_handler.update_room_in_group( + group_id, requester_user_id, room_id, config_key, content, + ) + + defer.returnValue((200, result)) + + class GroupAdminUsersInviteServlet(RestServlet): """Invite a user to the group """ diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index ede6bdfaee..6cb4ac28be 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -857,6 +857,19 @@ class GroupServerStore(SQLBaseStore): desc="add_room_to_group", ) + def update_room_in_group_visibility(self, group_id, room_id, is_public): + return self._simple_update( + table="group_rooms", + keyvalues={ + "group_id": group_id, + "room_id": room_id, + }, + values={ + "is_public": is_public, + }, + desc="update_room_in_group_visibility", + ) + def remove_room_from_group(self, group_id, room_id): def _remove_room_from_group_txn(txn): self._simple_delete_txn( From 82e4bfb53da20402f1c62264ffb13ec2e3e85e48 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Nov 2017 10:06:42 +0000 Subject: [PATCH 0429/1637] Add brackets --- synapse/federation/transport/server.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 6ef6cce592..2b02b021ec 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -706,8 +706,10 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet): """Update room config in group """ - PATH = "/groups/(?P[^/]*)/room/(?P[^/]*)" - "/config/(?P[^/]*)$" + PATH = ( + "/groups/(?P[^/]*)/room/(?P[^/]*)" + "/config/(?P[^/]*)$" + ) @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id, room_id, config_key): From b2a788e902c6fb6d3c516177fbb9f7e201e5cf0e Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 9 Nov 2017 10:11:42 +0000 Subject: [PATCH 0430/1637] Make the commented config have the default --- synapse/config/push.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/push.py b/synapse/config/push.py index bbfeb05d50..8fc1b98eba 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -64,5 +64,5 @@ class PushConfig(Config): # notification saying only that a message arrived and who it came from. # #push: - # include_content: false + # include_content: true """ From 889102315e07026a8ba5c2b5159b10da9fc7a9b9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 9 Nov 2017 15:15:33 +0000 Subject: [PATCH 0431/1637] Fix 'NoneType' not iterable in /deactivate make sure we actually return a value from user_delete_access_tokens --- synapse/storage/registration.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 9c4f61da76..71748de733 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -241,7 +241,6 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): "user_set_password_hash", user_set_password_hash_txn ) - @defer.inlineCallbacks def user_delete_access_tokens(self, user_id, except_token_id=None, device_id=None): """ @@ -290,7 +289,7 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): return tokens_and_devices - yield self.runInteraction( + return self.runInteraction( "user_delete_access_tokens", f, ) From 13735843c76547d37f22a2bd9079479fc918ab8c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Nov 2017 15:27:18 +0000 Subject: [PATCH 0432/1637] Namespace visibility options for groups --- synapse/groups/groups_server.py | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index a8039f4788..0b995aed70 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -556,8 +556,8 @@ class GroupsServerHandler(object): group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id ) - if config_key == "visibility": - is_public = _parse_visibility_from_contents(content) + if config_key == "m.visibility": + is_public = _parse_visibility_dict(content) yield self.store.update_room_in_group_visibility( group_id, room_id, @@ -840,15 +840,25 @@ def _parse_visibility_from_contents(content): public or not """ - visibility = content.get("visibility") + visibility = content.get("m.visibility") if visibility: - vis_type = visibility["type"] - if vis_type not in ("public", "private"): - raise SynapseError( - 400, "Synapse only supports 'public'/'private' visibility" - ) - is_public = vis_type == "public" + return _parse_visibility_dict(visibility) else: is_public = True return is_public + + +def _parse_visibility_dict(visibility): + """Given a dict for the "m.visibility" config return if the entity should + be public or not + """ + vis_type = visibility.get("type") + if not vis_type: + return True + + if vis_type not in ("public", "private"): + raise SynapseError( + 400, "Synapse only supports 'public'/'private' visibility" + ) + return vis_type == "public" From 4e2b2508af9cb7ee4bd02c7835f3a98f30c2130f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Nov 2017 15:49:42 +0000 Subject: [PATCH 0433/1637] Register group servlet --- synapse/rest/client/v2_alpha/groups.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 67f163e812..089ec71c81 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -740,6 +740,7 @@ def register_servlets(hs, http_server): GroupRoomServlet(hs).register(http_server) GroupCreateServlet(hs).register(http_server) GroupAdminRoomsServlet(hs).register(http_server) + GroupAdminRoomsConfigServlet(hs).register(http_server) GroupAdminUsersInviteServlet(hs).register(http_server) GroupAdminUsersKickServlet(hs).register(http_server) GroupSelfLeaveServlet(hs).register(http_server) From 2dce6b15c3c65cad82a53eb2213fd406f4be94f0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Nov 2017 15:56:16 +0000 Subject: [PATCH 0434/1637] Fix typo --- synapse/storage/group_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 6cb4ac28be..8fde1aab8e 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -864,7 +864,7 @@ class GroupServerStore(SQLBaseStore): "group_id": group_id, "room_id": room_id, }, - values={ + updatevalues={ "is_public": is_public, }, desc="update_room_in_group_visibility", From b70b64690330c25cbd04c1b2cacf8276b566efc8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 9 Nov 2017 19:36:13 +0000 Subject: [PATCH 0435/1637] Allow upper-case characters in mxids Because we're never going to be able to fix this :'( --- synapse/handlers/register.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index f6e7e58563..bd5ba6e1d8 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -15,6 +15,7 @@ """Contains functions for registering clients.""" import logging +import urllib from twisted.internet import defer @@ -22,7 +23,6 @@ from synapse.api.errors import ( AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError ) from synapse.http.client import CaptchaServerHttpClient -from synapse import types from synapse.types import UserID from synapse.util.async import run_on_reactor from ._base import BaseHandler @@ -47,7 +47,7 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def check_username(self, localpart, guest_access_token=None, assigned_user_id=None): - if types.contains_invalid_mxid_characters(localpart): + if urllib.quote(localpart.encode('utf-8')) != localpart: raise SynapseError( 400, "User ID can only contain characters a-z, 0-9, or '=_-./'", @@ -253,7 +253,7 @@ class RegistrationHandler(BaseHandler): """ Registers email_id as SAML2 Based Auth. """ - if types.contains_invalid_mxid_characters(localpart): + if urllib.quote(localpart) != localpart: raise SynapseError( 400, "User ID can only contain characters a-z, 0-9, or '=_-./'", From 9b803ccc9886d4f77a00d903eefa092b74f13dc3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 9 Nov 2017 21:57:24 +0000 Subject: [PATCH 0436/1637] Revert "Allow upper-case characters in mxids" This reverts commit b70b64690330c25cbd04c1b2cacf8276b566efc8. --- synapse/handlers/register.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index bd5ba6e1d8..f6e7e58563 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -15,7 +15,6 @@ """Contains functions for registering clients.""" import logging -import urllib from twisted.internet import defer @@ -23,6 +22,7 @@ from synapse.api.errors import ( AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError ) from synapse.http.client import CaptchaServerHttpClient +from synapse import types from synapse.types import UserID from synapse.util.async import run_on_reactor from ._base import BaseHandler @@ -47,7 +47,7 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def check_username(self, localpart, guest_access_token=None, assigned_user_id=None): - if urllib.quote(localpart.encode('utf-8')) != localpart: + if types.contains_invalid_mxid_characters(localpart): raise SynapseError( 400, "User ID can only contain characters a-z, 0-9, or '=_-./'", @@ -253,7 +253,7 @@ class RegistrationHandler(BaseHandler): """ Registers email_id as SAML2 Based Auth. """ - if urllib.quote(localpart) != localpart: + if types.contains_invalid_mxid_characters(localpart): raise SynapseError( 400, "User ID can only contain characters a-z, 0-9, or '=_-./'", From 9b599bc18d844208b94219e7aa23c6157bae3b00 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 9 Nov 2017 22:20:01 +0000 Subject: [PATCH 0437/1637] Downcase userid on registration Force username to lowercase before attempting to register https://github.com/matrix-org/synapse/issues/2660 --- synapse/rest/client/v2_alpha/register.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index eebd071e59..884edde119 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -224,6 +224,9 @@ class RegisterRestServlet(RestServlet): # 'user' key not 'username'). Since this is a new addition, we'll # fallback to 'username' if they gave one. desired_username = body.get("user", desired_username) + + # XXX we should check that desired_username is valid + access_token = get_access_token_from_request(request) if isinstance(desired_username, basestring): @@ -273,7 +276,7 @@ class RegisterRestServlet(RestServlet): if desired_username is not None: yield self.registration_handler.check_username( - desired_username, + desired_username.lower(), guest_access_token=guest_access_token, assigned_user_id=registered_user_id, ) @@ -336,6 +339,9 @@ class RegisterRestServlet(RestServlet): new_password = params.get("password", None) guest_access_token = params.get("guest_access_token", None) + if desired_username is not None: + desired_username = desired_username.lower() + (registered_user_id, _) = yield self.registration_handler.register( localpart=desired_username, password=new_password, From f90649eb2b0988c771fa329ba7a0a5ba81fe2396 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 10 Nov 2017 09:15:39 +0000 Subject: [PATCH 0438/1637] Fix 500 on invalid utf-8 in request If somebody sends us a request where the the body is invalid utf-8, we should return a 400 rather than a 500. (json.loads throws a UnicodeError in this situation) We might as well catch all Exceptions here: it seems very unlikely that we would get a request that *isn't caused by invalid json. --- synapse/http/servlet.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 8118ee7cc2..71420e54db 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -167,7 +167,8 @@ def parse_json_value_from_request(request): try: content = simplejson.loads(content_bytes) - except simplejson.JSONDecodeError: + except Exception as e: + logger.warn("Unable to parse JSON: %s", e) raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) return content From e0ebd1e4bd770231b1e7c31c870ab22b992213ba Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 10 Nov 2017 12:39:05 +0000 Subject: [PATCH 0439/1637] Downcase userids for shared-secret registration --- synapse/rest/client/v1/register.py | 2 +- synapse/rest/client/v2_alpha/register.py | 22 ++++++++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index ecf7e311a9..32ed1d3ab2 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -359,7 +359,7 @@ class RegisterRestServlet(ClientV1RestServlet): if compare_digest(want_mac, got_mac): handler = self.handlers.registration_handler user_id, token = yield handler.register( - localpart=user, + localpart=user.lower(), password=password, admin=bool(admin), ) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 884edde119..b3d918080d 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -236,6 +236,15 @@ class RegisterRestServlet(RestServlet): defer.returnValue((200, result)) # we throw for non 200 responses return + # for either shared secret or regular registration, downcase the + # provided username before attempting to register it. This should mean + # that people who try to register with upper-case in their usernames + # don't get a nasty surprise. (Note that we treat username + # case-insenstively in login, so they are free to carry on imagining + # that their username is CrAzYh4cKeR if that keeps them happy) + if desired_username is not None: + desired_username = desired_username.lower() + # == Shared Secret Registration == (e.g. create new user scripts) if 'mac' in body: # FIXME: Should we really be determining if this is shared secret @@ -276,7 +285,7 @@ class RegisterRestServlet(RestServlet): if desired_username is not None: yield self.registration_handler.check_username( - desired_username.lower(), + desired_username, guest_access_token=guest_access_token, assigned_user_id=registered_user_id, ) @@ -423,13 +432,22 @@ class RegisterRestServlet(RestServlet): def _do_shared_secret_registration(self, username, password, body): if not self.hs.config.registration_shared_secret: raise SynapseError(400, "Shared secret registration is not enabled") + if not username: + raise SynapseError( + 400, "username must be specified", errcode=Codes.BAD_JSON, + ) - user = username.encode("utf-8") + # use the username from the original request rather than the + # downcased one in `username` for the mac calculation + user = body["username"].encode("utf-8") # str() because otherwise hmac complains that 'unicode' does not # have the buffer interface got_mac = str(body["mac"]) + # FIXME this is different to the /v1/register endpoint, which + # includes the password and admin flag in the hashed text. Why are + # these different? want_mac = hmac.new( key=self.hs.config.registration_shared_secret, msg=user, From e508145c9b4b57d3e92df65bc768107e043e6399 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 10 Nov 2017 12:39:45 +0000 Subject: [PATCH 0440/1637] Add some more comments appservice user registration Explain why we don't validate userids registered via app services --- synapse/rest/client/v2_alpha/register.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index b3d918080d..9e2f7308ce 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -225,7 +225,10 @@ class RegisterRestServlet(RestServlet): # fallback to 'username' if they gave one. desired_username = body.get("user", desired_username) - # XXX we should check that desired_username is valid + # XXX we should check that desired_username is valid. Currently + # we give appservices carte blanche for any insanity in mxids, + # because the IRC bridges rely on being able to register stupid + # IDs. access_token = get_access_token_from_request(request) From 46790f50cfcc1049974b468d4b08402935e8ac84 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 10 Nov 2017 16:34:33 +0000 Subject: [PATCH 0441/1637] Cache failures in url_preview handler Reshuffle the caching logic in the url_preview handler so that failures are cached (and to generally simplify things and fix the logcontext leaks). --- synapse/rest/media/v1/preview_url_resource.py | 86 ++++++++++--------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 7907a9d17a..38e1afd34b 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -20,6 +20,7 @@ from twisted.web.resource import Resource from synapse.api.errors import ( SynapseError, Codes, ) +from synapse.util.logcontext import preserve_fn, make_deferred_yieldable from synapse.util.stringutils import random_string from synapse.util.caches.expiringcache import ExpiringCache from synapse.http.client import SpiderHttpClient @@ -63,16 +64,15 @@ class PreviewUrlResource(Resource): self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist - # simple memory cache mapping urls to OG metadata - self.cache = ExpiringCache( + # memory cache mapping urls to an ObservableDeferred returning + # JSON-encoded OG metadata + self._cache = ExpiringCache( cache_name="url_previews", clock=self.clock, # don't spider URLs more often than once an hour expiry_ms=60 * 60 * 1000, ) - self.cache.start() - - self.downloads = {} + self._cache.start() self._cleaner_loop = self.clock.looping_call( self._expire_url_cache_data, 10 * 1000 @@ -94,6 +94,7 @@ class PreviewUrlResource(Resource): else: ts = self.clock.time_msec() + # XXX: we could move this into _do_preview if we wanted. url_tuple = urlparse.urlsplit(url) for entry in self.url_preview_url_blacklist: match = True @@ -126,14 +127,40 @@ class PreviewUrlResource(Resource): Codes.UNKNOWN ) - # first check the memory cache - good to handle all the clients on this - # HS thundering away to preview the same URL at the same time. - og = self.cache.get(url) - if og: - respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True) - return + # the in-memory cache: + # * ensures that only one request is active at a time + # * takes load off the DB for the thundering herds + # * also caches any failures (unlike the DB) so we don't keep + # requesting the same endpoint - # then check the URL cache in the DB (which will also provide us with + observable = self._cache.get(url) + + if not observable: + download = preserve_fn(self._do_preview)( + url, requester.user, ts, + ) + observable = ObservableDeferred( + download, + consumeErrors=True + ) + self._cache[url] = observable + + og = yield make_deferred_yieldable(observable.observe()) + respond_with_json_bytes(request, 200, og, send_cors=True) + + @defer.inlineCallbacks + def _do_preview(self, url, user, ts): + """Check the db, and download the URL and build a preview + + Args: + url (str): + user (str): + ts (int): + + Returns: + Deferred[str]: json-encoded og data + """ + # check the URL cache in the DB (which will also provide us with # historical previews, if we have any) cache_result = yield self.store.get_url_cache(url, ts) if ( @@ -141,32 +168,10 @@ class PreviewUrlResource(Resource): cache_result["expires_ts"] > ts and cache_result["response_code"] / 100 == 2 ): - respond_with_json_bytes( - request, 200, cache_result["og"].encode('utf-8'), - send_cors=True - ) + defer.returnValue(cache_result["og"]) return - # Ensure only one download for a given URL is active at a time - download = self.downloads.get(url) - if download is None: - download = self._download_url(url, requester.user) - download = ObservableDeferred( - download, - consumeErrors=True - ) - self.downloads[url] = download - - @download.addBoth - def callback(media_info): - del self.downloads[url] - return media_info - media_info = yield download.observe() - - # FIXME: we should probably update our cache now anyway, so that - # even if the OG calculation raises, we don't keep hammering on the - # remote server. For now, leave it uncached to aid debugging OG - # calculation problems + media_info = yield self._download_url(url, user) logger.debug("got media_info of '%s'" % media_info) @@ -212,7 +217,7 @@ class PreviewUrlResource(Resource): # just rely on the caching on the master request to speed things up. if 'og:image' in og and og['og:image']: image_info = yield self._download_url( - _rebase_url(og['og:image'], media_info['uri']), requester.user + _rebase_url(og['og:image'], media_info['uri']), user ) if _is_media(image_info['media_type']): @@ -239,8 +244,7 @@ class PreviewUrlResource(Resource): logger.debug("Calculated OG for %s as %s" % (url, og)) - # store OG in ephemeral in-memory cache - self.cache[url] = og + jsonog = json.dumps(og) # store OG in history-aware DB cache yield self.store.store_url_cache( @@ -248,12 +252,12 @@ class PreviewUrlResource(Resource): media_info["response_code"], media_info["etag"], media_info["expires"] + media_info["created_ts"], - json.dumps(og), + jsonog, media_info["filesystem_id"], media_info["created_ts"], ) - respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True) + defer.returnValue(jsonog) @defer.inlineCallbacks def _download_url(self, url, user): From 5d15abb120a483395804d7e500dfa0bc42e49d51 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 10 Nov 2017 16:58:04 +0000 Subject: [PATCH 0442/1637] Bit more logging --- synapse/rest/media/v1/preview_url_resource.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 38e1afd34b..723f7043f4 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -144,6 +144,8 @@ class PreviewUrlResource(Resource): consumeErrors=True ) self._cache[url] = observable + else: + logger.info("Returning cached response") og = yield make_deferred_yieldable(observable.observe()) respond_with_json_bytes(request, 200, og, send_cors=True) From 2d314b771f032441595f931210fde67d25b90075 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Sun, 12 Nov 2017 23:30:23 -0700 Subject: [PATCH 0443/1637] Add a route for determining who you are Useful for applications which may have an access token, but no idea as to who owns it. Signed-off-by: Travis Ralston --- synapse/rest/client/v2_alpha/account.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 3062e04c59..0efbcb10d7 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -382,6 +382,22 @@ class ThreepidDeleteRestServlet(RestServlet): defer.returnValue((200, {})) +class WhoamiRestServlet(RestServlet): + PATTERNS = client_v2_patterns("/account/whoami$") + + def __init__(self, hs): + super(WhoamiRestServlet, self).__init__() + self.auth = hs.get_auth() + + @defer.inlineCallbacks + def on_GET(self, request): + yield run_on_reactor() + + requester = yield self.auth.get_user_by_req(request) + + defer.returnValue((200, {'user_id': requester.user.to_string()})) + + def register_servlets(hs, http_server): EmailPasswordRequestTokenRestServlet(hs).register(http_server) MsisdnPasswordRequestTokenRestServlet(hs).register(http_server) @@ -391,3 +407,4 @@ def register_servlets(hs, http_server): MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) ThreepidRestServlet(hs).register(http_server) ThreepidDeleteRestServlet(hs).register(http_server) + WhoamiRestServlet(hs).register(http_server) From bfbf1e1f1a78d730b7296101c85c3080e0ea2b01 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 13 Nov 2017 09:52:11 +0000 Subject: [PATCH 0444/1637] Up cache size of get_global_account_data_by_type_for_user --- synapse/storage/account_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index ff14e54c11..c8a1eb016b 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -63,7 +63,7 @@ class AccountDataStore(SQLBaseStore): "get_account_data_for_user", get_account_data_for_user_txn ) - @cachedInlineCallbacks(num_args=2) + @cachedInlineCallbacks(num_args=2, max_entries=5000) def get_global_account_data_by_type_for_user(self, data_type, user_id): """ Returns: From ab335edb023d66cd0be439e045b10ca104b73cb5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 13 Nov 2017 10:05:33 +0000 Subject: [PATCH 0445/1637] Revert "move _state_group_cache to statestore" This reverts commit f5cf3638e9c6086e1c33ddad8eda9298cf53a58e. --- synapse/storage/_base.py | 6 ++++++ synapse/storage/state.py | 19 +++++++------------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index a37d1934ec..6caf7b3356 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -16,6 +16,8 @@ import logging from synapse.api.errors import StoreError from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.util.caches import CACHE_SIZE_FACTOR +from synapse.util.caches.dictionary_cache import DictionaryCache from synapse.util.caches.descriptors import Cache from synapse.storage.engines import PostgresEngine import synapse.metrics @@ -178,6 +180,10 @@ class SQLBaseStore(object): self._get_event_cache = Cache("*getEvent*", keylen=3, max_entries=hs.config.event_cache_size) + self._state_group_cache = DictionaryCache( + "*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR + ) + self._event_fetch_lock = threading.Condition() self._event_fetch_list = [] self._event_fetch_ongoing = 0 diff --git a/synapse/storage/state.py b/synapse/storage/state.py index a1da3ad7a5..5673e4aa96 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -13,17 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple -import logging +from ._base import SQLBaseStore +from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.caches import intern_string +from synapse.util.stringutils import to_ascii +from synapse.storage.engines import PostgresEngine from twisted.internet import defer +from collections import namedtuple -from synapse.storage.engines import PostgresEngine -from synapse.util.caches import intern_string, CACHE_SIZE_FACTOR -from synapse.util.caches.descriptors import cached, cachedList -from synapse.util.caches.dictionary_cache import DictionaryCache -from synapse.util.stringutils import to_ascii -from ._base import SQLBaseStore +import logging logger = logging.getLogger(__name__) @@ -82,10 +81,6 @@ class StateStore(SQLBaseStore): where_clause="type='m.room.member'", ) - self._state_group_cache = DictionaryCache( - "*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR - ) - @cached(max_entries=100000, iterable=True) def get_current_state_ids(self, room_id): """Get the current state event ids for a room based on the From 6cfee09be9b5f58b83ef30bb35fa70453c7c2329 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 9 Nov 2017 18:51:27 +0000 Subject: [PATCH 0446/1637] Make __init__ consitstent across Store heirarchy Add db_conn parameters to the `__init__` methods of the *Store classes, so that they are all consistent, which makes the multiple inheritance work correctly (and so that we can later extract mixins which can be used in the slavedstores) --- synapse/replication/slave/storage/_base.py | 2 +- synapse/storage/__init__.py | 2 +- synapse/storage/_base.py | 2 +- synapse/storage/appservice.py | 8 ++++---- synapse/storage/background_updates.py | 4 ++-- synapse/storage/client_ips.py | 4 ++-- synapse/storage/deviceinbox.py | 4 ++-- synapse/storage/devices.py | 4 ++-- synapse/storage/event_federation.py | 4 ++-- synapse/storage/event_push_actions.py | 4 ++-- synapse/storage/events.py | 4 ++-- synapse/storage/receipts.py | 4 ++-- synapse/storage/registration.py | 4 ++-- synapse/storage/roommember.py | 4 ++-- synapse/storage/search.py | 4 ++-- synapse/storage/state.py | 4 ++-- synapse/storage/transactions.py | 4 ++-- 17 files changed, 33 insertions(+), 33 deletions(-) diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index b962641166..61f5590c53 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) class BaseSlavedStore(SQLBaseStore): def __init__(self, db_conn, hs): - super(BaseSlavedStore, self).__init__(hs) + super(BaseSlavedStore, self).__init__(db_conn, hs) if isinstance(self.database_engine, PostgresEngine): self._cache_id_gen = SlavedIdTracker( db_conn, "cache_invalidation_stream", "stream_id", diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 594566eb38..d01d46338a 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -268,7 +268,7 @@ class DataStore(RoomMemberStore, RoomStore, self._stream_order_on_start = self.get_room_max_stream_ordering() self._min_stream_order_on_start = self.get_room_min_stream_ordering() - super(DataStore, self).__init__(hs) + super(DataStore, self).__init__(db_conn, hs) def take_presence_startup_info(self): active_on_startup = self._presence_on_startup diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 6caf7b3356..e94917d9cd 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -162,7 +162,7 @@ class PerformanceCounters(object): class SQLBaseStore(object): _TXN_ID = 0 - def __init__(self, hs): + def __init__(self, db_conn, hs): self.hs = hs self._clock = hs.get_clock() self._db_pool = hs.get_db_pool() diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index c63935cb07..d8c84b7141 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -48,8 +48,8 @@ def _make_exclusive_regex(services_cache): class ApplicationServiceStore(SQLBaseStore): - def __init__(self, hs): - super(ApplicationServiceStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(ApplicationServiceStore, self).__init__(db_conn, hs) self.hostname = hs.hostname self.services_cache = load_appservices( hs.hostname, @@ -173,8 +173,8 @@ class ApplicationServiceStore(SQLBaseStore): class ApplicationServiceTransactionStore(SQLBaseStore): - def __init__(self, hs): - super(ApplicationServiceTransactionStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(ApplicationServiceTransactionStore, self).__init__(db_conn, hs) @defer.inlineCallbacks def get_appservices_by_state(self, state): diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index a6e6f52a6a..6f235ac051 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -80,8 +80,8 @@ class BackgroundUpdateStore(SQLBaseStore): BACKGROUND_UPDATE_INTERVAL_MS = 1000 BACKGROUND_UPDATE_DURATION_MS = 100 - def __init__(self, hs): - super(BackgroundUpdateStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(BackgroundUpdateStore, self).__init__(db_conn, hs) self._background_update_performance = {} self._background_update_queue = [] self._background_update_handlers = {} diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index 3c95e90eca..a03d1d6104 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -32,14 +32,14 @@ LAST_SEEN_GRANULARITY = 120 * 1000 class ClientIpStore(background_updates.BackgroundUpdateStore): - def __init__(self, hs): + def __init__(self, db_conn, hs): self.client_ip_last_seen = Cache( name="client_ip_last_seen", keylen=4, max_entries=50000 * CACHE_SIZE_FACTOR, ) - super(ClientIpStore, self).__init__(hs) + super(ClientIpStore, self).__init__(db_conn, hs) self.register_background_index_update( "user_ips_device_index", diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 0b62b493d5..548e795daf 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -29,8 +29,8 @@ logger = logging.getLogger(__name__) class DeviceInboxStore(BackgroundUpdateStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" - def __init__(self, hs): - super(DeviceInboxStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(DeviceInboxStore, self).__init__(db_conn, hs) self.register_background_index_update( "device_inbox_stream_index", diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index bb27fd1f70..bd2effdf34 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -26,8 +26,8 @@ logger = logging.getLogger(__name__) class DeviceStore(SQLBaseStore): - def __init__(self, hs): - super(DeviceStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(DeviceStore, self).__init__(db_conn, hs) # Map of (user_id, device_id) -> bool. If there is an entry that implies # the device exists. diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index e8133de2fa..55a05c59d5 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -39,8 +39,8 @@ class EventFederationStore(SQLBaseStore): EVENT_AUTH_STATE_ONLY = "event_auth_state_only" - def __init__(self, hs): - super(EventFederationStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(EventFederationStore, self).__init__(db_conn, hs) self.register_background_update_handler( self.EVENT_AUTH_STATE_ONLY, diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index d6d8723b4a..8efe2fd4bb 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -65,8 +65,8 @@ def _deserialize_action(actions, is_highlight): class EventPushActionsStore(SQLBaseStore): EPA_HIGHLIGHT_INDEX = "epa_highlight_index" - def __init__(self, hs): - super(EventPushActionsStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(EventPushActionsStore, self).__init__(db_conn, hs) self.register_background_index_update( self.EPA_HIGHLIGHT_INDEX, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 4298d8baf1..d08f7571d7 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -197,8 +197,8 @@ class EventsStore(SQLBaseStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - def __init__(self, hs): - super(EventsStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(EventsStore, self).__init__(db_conn, hs) self._clock = hs.get_clock() self.register_background_update_handler( self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index f42b8014c7..12b3cc7f5f 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -27,8 +27,8 @@ logger = logging.getLogger(__name__) class ReceiptsStore(SQLBaseStore): - def __init__(self, hs): - super(ReceiptsStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(ReceiptsStore, self).__init__(db_conn, hs) self._receipts_stream_cache = StreamChangeCache( "ReceiptsRoomChangeCache", self._receipts_id_gen.get_current_token() diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 71748de733..8b9544c209 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -24,8 +24,8 @@ from synapse.util.caches.descriptors import cached, cachedInlineCallbacks class RegistrationStore(background_updates.BackgroundUpdateStore): - def __init__(self, hs): - super(RegistrationStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(RegistrationStore, self).__init__(db_conn, hs) self.clock = hs.get_clock() diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 3fa8019eb7..3e77fd3901 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -49,8 +49,8 @@ _MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update" class RoomMemberStore(SQLBaseStore): - def __init__(self, hs): - super(RoomMemberStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(RoomMemberStore, self).__init__(db_conn, hs) self.register_background_update_handler( _MEMBERSHIP_PROFILE_UPDATE_NAME, self._background_add_membership_profile ) diff --git a/synapse/storage/search.py b/synapse/storage/search.py index 05d4ef586e..479b04c636 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -33,8 +33,8 @@ class SearchStore(BackgroundUpdateStore): EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order" EVENT_SEARCH_USE_GIST_POSTGRES_NAME = "event_search_postgres_gist" - def __init__(self, hs): - super(SearchStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(SearchStore, self).__init__(db_conn, hs) self.register_background_update_handler( self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search ) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 5673e4aa96..dd01b68762 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -63,8 +63,8 @@ class StateStore(SQLBaseStore): STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" - def __init__(self, hs): - super(StateStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(StateStore, self).__init__(db_conn, hs) self.register_background_update_handler( self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, self._background_deduplicate_state, diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 809fdd311f..8f61f7ffae 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -46,8 +46,8 @@ class TransactionStore(SQLBaseStore): """A collection of queries for handling PDUs. """ - def __init__(self, hs): - super(TransactionStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(TransactionStore, self).__init__(db_conn, hs) self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000) From 63ef607f1f6a9f998796cd3b6bcbcdb95fd08557 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 9 Nov 2017 20:53:11 +0000 Subject: [PATCH 0447/1637] Fix tests for Store.__init__ update Fix the test to pass the right number of args to the Store constructors --- tests/storage/test_appservice.py | 14 +++++++------- tests/storage/test_base.py | 2 +- tests/storage/test_directory.py | 2 +- tests/storage/test_presence.py | 2 +- tests/storage/test_profile.py | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 79f569e787..13d81f972b 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -58,7 +58,7 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): self._add_appservice("token2", "as2", "some_url", "some_hs_token", "bob") self._add_appservice("token3", "as3", "some_url", "some_hs_token", "bob") # must be done after inserts - self.store = ApplicationServiceStore(hs) + self.store = ApplicationServiceStore(None, hs) def tearDown(self): # TODO: suboptimal that we need to create files for tests! @@ -150,7 +150,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): self.as_yaml_files = [] - self.store = TestTransactionStore(hs) + self.store = TestTransactionStore(None, hs) def _add_service(self, url, as_token, id): as_yaml = dict(url=url, as_token=as_token, hs_token="something", @@ -420,8 +420,8 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): class TestTransactionStore(ApplicationServiceTransactionStore, ApplicationServiceStore): - def __init__(self, hs): - super(TestTransactionStore, self).__init__(hs) + def __init__(self, db_conn, hs): + super(TestTransactionStore, self).__init__(db_conn, hs) class ApplicationServiceStoreConfigTestCase(unittest.TestCase): @@ -458,7 +458,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): replication_layer=Mock(), ) - ApplicationServiceStore(hs) + ApplicationServiceStore(None, hs) @defer.inlineCallbacks def test_duplicate_ids(self): @@ -477,7 +477,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): ) with self.assertRaises(ConfigError) as cm: - ApplicationServiceStore(hs) + ApplicationServiceStore(None, hs) e = cm.exception self.assertIn(f1, e.message) @@ -501,7 +501,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): ) with self.assertRaises(ConfigError) as cm: - ApplicationServiceStore(hs) + ApplicationServiceStore(None, hs) e = cm.exception self.assertIn(f1, e.message) diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 91e971190c..0ac910e76f 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -56,7 +56,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): database_engine=create_engine(config.database_config), ) - self.datastore = SQLBaseStore(hs) + self.datastore = SQLBaseStore(None, hs) @defer.inlineCallbacks def test_insert_1col(self): diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py index b087892e0b..95709cd50a 100644 --- a/tests/storage/test_directory.py +++ b/tests/storage/test_directory.py @@ -29,7 +29,7 @@ class DirectoryStoreTestCase(unittest.TestCase): def setUp(self): hs = yield setup_test_homeserver() - self.store = DirectoryStore(hs) + self.store = DirectoryStore(None, hs) self.room = RoomID.from_string("!abcde:test") self.alias = RoomAlias.from_string("#my-room:test") diff --git a/tests/storage/test_presence.py b/tests/storage/test_presence.py index 63203cea35..f5fcb611d4 100644 --- a/tests/storage/test_presence.py +++ b/tests/storage/test_presence.py @@ -29,7 +29,7 @@ class PresenceStoreTestCase(unittest.TestCase): def setUp(self): hs = yield setup_test_homeserver(clock=MockClock()) - self.store = PresenceStore(hs) + self.store = PresenceStore(None, hs) self.u_apple = UserID.from_string("@apple:test") self.u_banana = UserID.from_string("@banana:test") diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index 24118bbc86..423710c9c1 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -29,7 +29,7 @@ class ProfileStoreTestCase(unittest.TestCase): def setUp(self): hs = yield setup_test_homeserver() - self.store = ProfileStore(hs) + self.store = ProfileStore(None, hs) self.u_frank = UserID.from_string("@frank:test") From 45ab288e072341447cc6375f37df8bace3d1c525 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 13 Nov 2017 18:32:08 +0000 Subject: [PATCH 0448/1637] Print instead of logging because we had to wait until the logger was set up --- synapse/config/push.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/synapse/config/push.py b/synapse/config/push.py index 8fc1b98eba..b7e0d46afa 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -16,13 +16,6 @@ from ._base import Config -import logging - -from twisted.internet import reactor - - -logger = logging.getLogger(__name__) - class PushConfig(Config): def read_config(self, config): @@ -33,20 +26,20 @@ class PushConfig(Config): # 'email'section'. Check for the flag in the 'push' section, and log, # but do not honour it to avoid nasty surprises when people upgrade. if push_config.get("redact_content") is not None: - reactor.callWhenRunning(lambda: logger.warn( + print( "The push.redact_content content option has never worked. " "Please set push.include_content if you want this behaviour" - )) + ) # Now check for the one in the 'email' section and honour it, # with a warning. push_config = config.get("email", {}) redact_content = push_config.get("redact_content") if redact_content is not None: - reactor.callWhenRunning(lambda: logger.warn( + print( "The 'email.redact_content' option is deprecated: " "please set push.include_content instead" - )) + ) self.push_include_content = not redact_content def default_config(self, config_dir_path, server_name, **kwargs): From 812c1919392c8ae8aa93969fb0679bd03d73da05 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 13 Nov 2017 12:44:21 -0700 Subject: [PATCH 0449/1637] Remove redundent call Signed-off-by: Travis Ralston --- synapse/rest/client/v2_alpha/account.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 0efbcb10d7..726e0a2826 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -391,8 +391,6 @@ class WhoamiRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request): - yield run_on_reactor() - requester = yield self.auth.get_user_by_req(request) defer.returnValue((200, {'user_id': requester.user.to_string()})) From 1fc66c7460b7e6c503dbeb6577fb0ba3cf7dfd83 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 14 Nov 2017 09:23:56 +0000 Subject: [PATCH 0450/1637] Add a load of logging to the room_list handler So we can see what it gets up to. --- synapse/handlers/room_list.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 2cf34e51cb..928ee38aea 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -154,6 +154,8 @@ class RoomListHandler(BaseHandler): # We want larger rooms to be first, hence negating num_joined_users rooms_to_order_value[room_id] = (-num_joined_users, room_id) + logger.info("Getting ordering for %i rooms since %s", + len(room_ids), stream_token) yield concurrently_execute(get_order_for_room, room_ids, 10) sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1]) @@ -181,17 +183,25 @@ class RoomListHandler(BaseHandler): rooms_to_scan = rooms_to_scan[:since_token.current_limit] rooms_to_scan.reverse() + logger.info("After sorting and filtering, %i rooms remain", + len(rooms_to_scan)) + # Actually generate the entries. _append_room_entry_to_chunk will append to # chunk but will stop if len(chunk) > limit chunk = [] if limit and not search_filter: step = limit + 1 for i in xrange(0, len(rooms_to_scan), step): + logger.info("Processing %i rooms for result", step) # We iterate here because the vast majority of cases we'll stop # at first iteration, but occaisonally _append_room_entry_to_chunk # won't append to the chunk and so we need to loop again. # We don't want to scan over the entire range either as that # would potentially waste a lot of work. + # + # XXX why would that happen? _append_room_entry_to_chunk will + # only exclude rooms which don't match search_filter, but we + # know search_filter is None here. yield concurrently_execute( lambda r: self._append_room_entry_to_chunk( r, rooms_to_num_joined[r], @@ -199,9 +209,11 @@ class RoomListHandler(BaseHandler): ), rooms_to_scan[i:i + step], 10 ) + logger.info("Now %i rooms in result", len(chunk)) if len(chunk) >= limit + 1: break else: + logger.info("Processing %i rooms for result", len(rooms_to_scan)) yield concurrently_execute( lambda r: self._append_room_entry_to_chunk( r, rooms_to_num_joined[r], @@ -209,6 +221,7 @@ class RoomListHandler(BaseHandler): ), rooms_to_scan, 5 ) + logger.info("Now %i rooms in result", len(chunk)) chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"])) From 44a1bfd6a6a1cda272677c9ea8704957bc940509 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 14 Nov 2017 09:39:54 +0000 Subject: [PATCH 0451/1637] Reshuffle room list request code I'm not entirely sure if this will actually help anything, but it simplifies the code and might give further clues about why room list search requests are blowing out the get_current_state_ids caches. --- synapse/handlers/room_list.py | 51 +++++++++++++++++------------------ 1 file changed, 24 insertions(+), 27 deletions(-) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 928ee38aea..bb40075387 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -186,42 +186,39 @@ class RoomListHandler(BaseHandler): logger.info("After sorting and filtering, %i rooms remain", len(rooms_to_scan)) - # Actually generate the entries. _append_room_entry_to_chunk will append to - # chunk but will stop if len(chunk) > limit - chunk = [] - if limit and not search_filter: + # _append_room_entry_to_chunk will append to chunk but will stop if + # len(chunk) > limit + # + # Normally we will generate enough results on the first iteration here, + # but if there is a search filter, _append_room_entry_to_chunk may + # filter some results out, in which case we loop again. + # + # We don't want to scan over the entire range either as that + # would potentially waste a lot of work. + # + # XXX if there is no limit, we may end up DoSing the server with + # calls to get_current_state_ids for every single room on the + # server. Surely we should cap this somehow? + # + if limit: step = limit + 1 - for i in xrange(0, len(rooms_to_scan), step): - logger.info("Processing %i rooms for result", step) - # We iterate here because the vast majority of cases we'll stop - # at first iteration, but occaisonally _append_room_entry_to_chunk - # won't append to the chunk and so we need to loop again. - # We don't want to scan over the entire range either as that - # would potentially waste a lot of work. - # - # XXX why would that happen? _append_room_entry_to_chunk will - # only exclude rooms which don't match search_filter, but we - # know search_filter is None here. - yield concurrently_execute( - lambda r: self._append_room_entry_to_chunk( - r, rooms_to_num_joined[r], - chunk, limit, search_filter - ), - rooms_to_scan[i:i + step], 10 - ) - logger.info("Now %i rooms in result", len(chunk)) - if len(chunk) >= limit + 1: - break else: - logger.info("Processing %i rooms for result", len(rooms_to_scan)) + step = len(rooms_to_scan) + + chunk = [] + for i in xrange(0, len(rooms_to_scan), step): + batch = rooms_to_scan[i:i + step] + logger.info("Processing %i rooms for result", len(batch)) yield concurrently_execute( lambda r: self._append_room_entry_to_chunk( r, rooms_to_num_joined[r], chunk, limit, search_filter ), - rooms_to_scan, 5 + batch, 5, ) logger.info("Now %i rooms in result", len(chunk)) + if len(chunk) >= limit + 1: + break chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"])) From 7e6fa29cb5ba1abd8b4f3873b0ef171c7c8aba26 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 14 Nov 2017 11:22:42 +0000 Subject: [PATCH 0452/1637] Remove preserve_context_over_{fn, deferred} Both of these functions ae known to leak logcontexts. Replace the remaining calls to them and kill them off. --- docs/log_contexts.rst | 4 ---- synapse/federation/federation_client.py | 4 ++-- synapse/handlers/appservice.py | 4 ++-- synapse/handlers/initial_sync.py | 4 ++-- synapse/push/pusherpool.py | 6 ++--- synapse/storage/stream.py | 4 ++-- synapse/util/async.py | 6 ++--- synapse/util/distributor.py | 24 +++++++------------ synapse/util/logcontext.py | 31 ------------------------- synapse/visibility.py | 4 ++-- 10 files changed, 24 insertions(+), 67 deletions(-) diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst index eb1784e700..b19b7fa1ea 100644 --- a/docs/log_contexts.rst +++ b/docs/log_contexts.rst @@ -298,10 +298,6 @@ It can be used like this: # this will now be logged against the request context logger.debug("Request handling complete") -XXX: I think ``preserve_context_over_fn`` is supposed to do the first option, -but the fact that it does ``preserve_context_over_deferred`` on its results -means that its use is fraught with difficulty. - Passing synapse deferreds into third-party functions ---------------------------------------------------- diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 7c5e5d957f..b8f02f5391 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -25,7 +25,7 @@ from synapse.api.errors import ( from synapse.util import unwrapFirstError, logcontext from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.logutils import log_function -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.events import FrozenEvent, builder import synapse.metrics @@ -420,7 +420,7 @@ class FederationClient(FederationBase): for e_id in batch ] - res = yield preserve_context_over_deferred( + res = yield make_deferred_yieldable( defer.DeferredList(deferreds, consumeErrors=True) ) for success, result in res: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 543bf28aec..feca3e4c10 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -17,7 +17,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.util.metrics import Measure -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn import logging @@ -159,7 +159,7 @@ class ApplicationServicesHandler(object): def query_3pe(self, kind, protocol, fields): services = yield self._get_services_for_3pn(protocol) - results = yield preserve_context_over_deferred(defer.DeferredList([ + results = yield make_deferred_yieldable(defer.DeferredList([ preserve_fn(self.appservice_api.query_3pe)(service, kind, protocol, fields) for service in services ], consumeErrors=True)) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 9718d4abc5..c5267b4b84 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -27,7 +27,7 @@ from synapse.types import ( from synapse.util import unwrapFirstError from synapse.util.async import concurrently_execute from synapse.util.caches.snapshot_cache import SnapshotCache -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -163,7 +163,7 @@ class InitialSyncHandler(BaseHandler): lambda states: states[event.event_id] ) - (messages, token), current_state = yield preserve_context_over_deferred( + (messages, token), current_state = yield make_deferred_yieldable( defer.gatherResults( [ preserve_fn(self.store.get_recent_events_for_room)( diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 7c069b662e..34cb108dcb 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -17,7 +17,7 @@ from twisted.internet import defer from .pusher import PusherFactory -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.util.async import run_on_reactor import logging @@ -136,7 +136,7 @@ class PusherPool: ) ) - yield preserve_context_over_deferred(defer.gatherResults(deferreds)) + yield make_deferred_yieldable(defer.gatherResults(deferreds)) except Exception: logger.exception("Exception in pusher on_new_notifications") @@ -161,7 +161,7 @@ class PusherPool: preserve_fn(p.on_new_receipts)(min_stream_id, max_stream_id) ) - yield preserve_context_over_deferred(defer.gatherResults(deferreds)) + yield make_deferred_yieldable(defer.gatherResults(deferreds)) except Exception: logger.exception("Exception in pusher on_new_receipts") diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index dddd5fc0e7..52bdce5be2 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -39,7 +39,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn from synapse.storage.engines import PostgresEngine, Sqlite3Engine import logging @@ -234,7 +234,7 @@ class StreamStore(SQLBaseStore): results = {} room_ids = list(room_ids) for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): - res = yield preserve_context_over_deferred(defer.gatherResults([ + res = yield make_deferred_yieldable(defer.gatherResults([ preserve_fn(self.get_room_events_stream_for_room)( room_id, from_key, to_key, limit, order=order, ) diff --git a/synapse/util/async.py b/synapse/util/async.py index e786fb38a9..0729bb2863 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -17,7 +17,7 @@ from twisted.internet import defer, reactor from .logcontext import ( - PreserveLoggingContext, preserve_fn, preserve_context_over_deferred, + PreserveLoggingContext, make_deferred_yieldable, preserve_fn ) from synapse.util import logcontext, unwrapFirstError @@ -351,7 +351,7 @@ class ReadWriteLock(object): # We wait for the latest writer to finish writing. We can safely ignore # any existing readers... as they're readers. - yield curr_writer + yield make_deferred_yieldable(curr_writer) @contextmanager def _ctx_manager(): @@ -380,7 +380,7 @@ class ReadWriteLock(object): curr_readers.clear() self.key_to_current_writer[key] = new_defer - yield preserve_context_over_deferred(defer.gatherResults(to_wait_on)) + yield make_deferred_yieldable(defer.gatherResults(to_wait_on)) @contextmanager def _ctx_manager(): diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index e68f94ce77..734331caaa 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -13,32 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - -from synapse.util.logcontext import ( - PreserveLoggingContext, preserve_context_over_fn -) - -from synapse.util import unwrapFirstError - import logging +from twisted.internet import defer + +from synapse.util import unwrapFirstError +from synapse.util.logcontext import PreserveLoggingContext logger = logging.getLogger(__name__) def user_left_room(distributor, user, room_id): - return preserve_context_over_fn( - distributor.fire, - "user_left_room", user=user, room_id=room_id - ) + with PreserveLoggingContext(): + distributor.fire("user_left_room", user=user, room_id=room_id) def user_joined_room(distributor, user, room_id): - return preserve_context_over_fn( - distributor.fire, - "user_joined_room", user=user, room_id=room_id - ) + with PreserveLoggingContext(): + distributor.fire("user_joined_room", user=user, room_id=room_id) class Distributor(object): diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 9683cc7265..92b9413a35 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -291,37 +291,6 @@ class _PreservingContextDeferred(defer.Deferred): return g -def preserve_context_over_fn(fn, *args, **kwargs): - """Takes a function and invokes it with the given arguments, but removes - and restores the current logging context while doing so. - - If the result is a deferred, call preserve_context_over_deferred before - returning it. - """ - with PreserveLoggingContext(): - res = fn(*args, **kwargs) - - if isinstance(res, defer.Deferred): - return preserve_context_over_deferred(res) - else: - return res - - -def preserve_context_over_deferred(deferred, context=None): - """Given a deferred wrap it such that any callbacks added later to it will - be invoked with the current context. - - Deprecated: this almost certainly doesn't do want you want, ie make - the deferred follow the synapse logcontext rules: try - ``make_deferred_yieldable`` instead. - """ - if context is None: - context = LoggingContext.current_context() - d = _PreservingContextDeferred(context) - deferred.chainDeferred(d) - return d - - def preserve_fn(f): """Wraps a function, to ensure that the current context is restored after return from the function, and that the sentinel context is set once the diff --git a/synapse/visibility.py b/synapse/visibility.py index d7dbdc77ff..aaca2c584c 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -17,7 +17,7 @@ from twisted.internet import defer from synapse.api.constants import Membership, EventTypes -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred +from synapse.util.logcontext import make_deferred_yieldable, preserve_fn import logging @@ -58,7 +58,7 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state, always_include_ids (set(event_id)): set of event ids to specifically include (unless sender is ignored) """ - forgotten = yield preserve_context_over_deferred(defer.gatherResults([ + forgotten = yield make_deferred_yieldable(defer.gatherResults([ defer.maybeDeferred( preserve_fn(store.who_forgot_in_room), room_id, From 4dd1bfa8c18a3dc9df934a61771ae1e85b313b7e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 13 Nov 2017 10:30:38 +0000 Subject: [PATCH 0453/1637] Revert "Revert "move _state_group_cache to statestore"" We're going to fix this properly on this branch, so that the _state_group_cache can end up in StateGroupReadStore. This reverts commit ab335edb023d66cd0be439e045b10ca104b73cb5. --- synapse/storage/_base.py | 6 ------ synapse/storage/state.py | 19 ++++++++++++------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index e94917d9cd..7ebd4f189d 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -16,8 +16,6 @@ import logging from synapse.api.errors import StoreError from synapse.util.logcontext import LoggingContext, PreserveLoggingContext -from synapse.util.caches import CACHE_SIZE_FACTOR -from synapse.util.caches.dictionary_cache import DictionaryCache from synapse.util.caches.descriptors import Cache from synapse.storage.engines import PostgresEngine import synapse.metrics @@ -180,10 +178,6 @@ class SQLBaseStore(object): self._get_event_cache = Cache("*getEvent*", keylen=3, max_entries=hs.config.event_cache_size) - self._state_group_cache = DictionaryCache( - "*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR - ) - self._event_fetch_lock = threading.Condition() self._event_fetch_list = [] self._event_fetch_ongoing = 0 diff --git a/synapse/storage/state.py b/synapse/storage/state.py index dd01b68762..ee3496123e 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -13,16 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cached, cachedList -from synapse.util.caches import intern_string -from synapse.util.stringutils import to_ascii -from synapse.storage.engines import PostgresEngine +from collections import namedtuple +import logging from twisted.internet import defer -from collections import namedtuple -import logging +from synapse.storage.engines import PostgresEngine +from synapse.util.caches import intern_string, CACHE_SIZE_FACTOR +from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.caches.dictionary_cache import DictionaryCache +from synapse.util.stringutils import to_ascii +from ._base import SQLBaseStore logger = logging.getLogger(__name__) @@ -81,6 +82,10 @@ class StateStore(SQLBaseStore): where_clause="type='m.room.member'", ) + self._state_group_cache = DictionaryCache( + "*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR + ) + @cached(max_entries=100000, iterable=True) def get_current_state_ids(self, room_id): """Get the current state event ids for a room based on the From 35a4b632405be2ca91039f63a8c9c550f0f44ea3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 9 Nov 2017 19:00:20 +0000 Subject: [PATCH 0454/1637] Pull out bits of StateStore to a mixin ... so that we don't need to secretly gut-wrench it for use in the slaved stores. I haven't done the other stores yet, but we should. I'm tired of the workers breaking every time we tweak the stores because I forgot to gut-wrench the right method. fixes https://github.com/matrix-org/synapse/issues/2655. --- synapse/replication/slave/storage/events.py | 39 +- synapse/storage/state.py | 424 ++++++++++---------- 2 files changed, 226 insertions(+), 237 deletions(-) diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 94ebbffc1b..29d7296b43 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -12,20 +12,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from ._base import BaseSlavedStore -from ._slaved_id_tracker import SlavedIdTracker +import logging from synapse.api.constants import EventTypes from synapse.storage import DataStore -from synapse.storage.roommember import RoomMemberStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.event_push_actions import EventPushActionsStore -from synapse.storage.state import StateStore +from synapse.storage.roommember import RoomMemberStore +from synapse.storage.state import StateGroupReadStore from synapse.storage.stream import StreamStore from synapse.util.caches.stream_change_cache import StreamChangeCache - -import logging - +from ._base import BaseSlavedStore +from ._slaved_id_tracker import SlavedIdTracker logger = logging.getLogger(__name__) @@ -39,7 +37,7 @@ logger = logging.getLogger(__name__) # the method descriptor on the DataStore and chuck them into our class. -class SlavedEventStore(BaseSlavedStore): +class SlavedEventStore(StateGroupReadStore, BaseSlavedStore): def __init__(self, db_conn, hs): super(SlavedEventStore, self).__init__(db_conn, hs) @@ -90,25 +88,9 @@ class SlavedEventStore(BaseSlavedStore): _get_unread_counts_by_pos_txn = ( DataStore._get_unread_counts_by_pos_txn.__func__ ) - _get_state_group_for_events = ( - StateStore.__dict__["_get_state_group_for_events"] - ) - _get_state_group_for_event = ( - StateStore.__dict__["_get_state_group_for_event"] - ) - _get_state_groups_from_groups = ( - StateStore.__dict__["_get_state_groups_from_groups"] - ) - _get_state_groups_from_groups_txn = ( - DataStore._get_state_groups_from_groups_txn.__func__ - ) get_recent_event_ids_for_room = ( StreamStore.__dict__["get_recent_event_ids_for_room"] ) - get_current_state_ids = ( - StateStore.__dict__["get_current_state_ids"] - ) - get_state_group_delta = StateStore.__dict__["get_state_group_delta"] _get_joined_hosts_cache = RoomMemberStore.__dict__["_get_joined_hosts_cache"] has_room_changed_since = DataStore.has_room_changed_since.__func__ @@ -134,12 +116,6 @@ class SlavedEventStore(BaseSlavedStore): DataStore.get_room_events_stream_for_room.__func__ ) get_events_around = DataStore.get_events_around.__func__ - get_state_for_event = DataStore.get_state_for_event.__func__ - get_state_for_events = DataStore.get_state_for_events.__func__ - get_state_groups = DataStore.get_state_groups.__func__ - get_state_groups_ids = DataStore.get_state_groups_ids.__func__ - get_state_ids_for_event = DataStore.get_state_ids_for_event.__func__ - get_state_ids_for_events = DataStore.get_state_ids_for_events.__func__ get_joined_users_from_state = DataStore.get_joined_users_from_state.__func__ get_joined_users_from_context = DataStore.get_joined_users_from_context.__func__ _get_joined_users_from_context = ( @@ -169,10 +145,7 @@ class SlavedEventStore(BaseSlavedStore): _get_rooms_for_user_where_membership_is_txn = ( DataStore._get_rooms_for_user_where_membership_is_txn.__func__ ) - _get_state_for_groups = DataStore._get_state_for_groups.__func__ - _get_all_state_from_cache = DataStore._get_all_state_from_cache.__func__ _get_events_around_txn = DataStore._get_events_around_txn.__func__ - _get_some_state_from_cache = DataStore._get_some_state_from_cache.__func__ get_backfill_events = DataStore.get_backfill_events.__func__ _get_backfill_events = DataStore._get_backfill_events.__func__ diff --git a/synapse/storage/state.py b/synapse/storage/state.py index ee3496123e..360e3e4355 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -18,6 +18,7 @@ import logging from twisted.internet import defer +from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.engines import PostgresEngine from synapse.util.caches import intern_string, CACHE_SIZE_FACTOR from synapse.util.caches.descriptors import cached, cachedList @@ -41,23 +42,11 @@ class _GetStateGroupDelta(namedtuple("_GetStateGroupDelta", ("prev_group", "delt return len(self.delta_ids) if self.delta_ids else 0 -class StateStore(SQLBaseStore): - """ Keeps track of the state at a given event. +class StateGroupReadStore(SQLBaseStore): + """The read-only parts of StateGroupStore - This is done by the concept of `state groups`. Every event is a assigned - a state group (identified by an arbitrary string), which references a - collection of state events. The current state of an event is then the - collection of state events referenced by the event's state group. - - Hence, every change in the current state causes a new state group to be - generated. However, if no change happens (e.g., if we get a message event - with only one parent it inherits the state group from its parent.) - - There are three tables: - * `state_groups`: Stores group name, first event with in the group and - room id. - * `event_to_state_groups`: Maps events to state groups. - * `state_groups_state`: Maps state group to state events. + None of these functions write to the state tables, so are suitable for + including in the SlavedStores. """ STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication" @@ -65,22 +54,7 @@ class StateStore(SQLBaseStore): CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" def __init__(self, db_conn, hs): - super(StateStore, self).__init__(db_conn, hs) - self.register_background_update_handler( - self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, - self._background_deduplicate_state, - ) - self.register_background_update_handler( - self.STATE_GROUP_INDEX_UPDATE_NAME, - self._background_index_state, - ) - self.register_background_index_update( - self.CURRENT_STATE_INDEX_UPDATE_NAME, - index_name="current_state_events_member_index", - table="current_state_events", - columns=["state_key"], - where_clause="type='m.room.member'", - ) + super(StateGroupReadStore, self).__init__(db_conn, hs) self._state_group_cache = DictionaryCache( "*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR @@ -195,178 +169,6 @@ class StateStore(SQLBaseStore): for group, event_id_map in group_to_ids.iteritems() }) - def _have_persisted_state_group_txn(self, txn, state_group): - txn.execute( - "SELECT count(*) FROM state_groups WHERE id = ?", - (state_group,) - ) - row = txn.fetchone() - return row and row[0] - - def _store_mult_state_groups_txn(self, txn, events_and_contexts): - state_groups = {} - for event, context in events_and_contexts: - if event.internal_metadata.is_outlier(): - continue - - if context.current_state_ids is None: - # AFAIK, this can never happen - logger.error( - "Non-outlier event %s had current_state_ids==None", - event.event_id) - continue - - # if the event was rejected, just give it the same state as its - # predecessor. - if context.rejected: - state_groups[event.event_id] = context.prev_group - continue - - state_groups[event.event_id] = context.state_group - - if self._have_persisted_state_group_txn(txn, context.state_group): - continue - - self._simple_insert_txn( - txn, - table="state_groups", - values={ - "id": context.state_group, - "room_id": event.room_id, - "event_id": event.event_id, - }, - ) - - # We persist as a delta if we can, while also ensuring the chain - # of deltas isn't tooo long, as otherwise read performance degrades. - if context.prev_group: - is_in_db = self._simple_select_one_onecol_txn( - txn, - table="state_groups", - keyvalues={"id": context.prev_group}, - retcol="id", - allow_none=True, - ) - if not is_in_db: - raise Exception( - "Trying to persist state with unpersisted prev_group: %r" - % (context.prev_group,) - ) - - potential_hops = self._count_state_group_hops_txn( - txn, context.prev_group - ) - if context.prev_group and potential_hops < MAX_STATE_DELTA_HOPS: - self._simple_insert_txn( - txn, - table="state_group_edges", - values={ - "state_group": context.state_group, - "prev_state_group": context.prev_group, - }, - ) - - self._simple_insert_many_txn( - txn, - table="state_groups_state", - values=[ - { - "state_group": context.state_group, - "room_id": event.room_id, - "type": key[0], - "state_key": key[1], - "event_id": state_id, - } - for key, state_id in context.delta_ids.iteritems() - ], - ) - else: - self._simple_insert_many_txn( - txn, - table="state_groups_state", - values=[ - { - "state_group": context.state_group, - "room_id": event.room_id, - "type": key[0], - "state_key": key[1], - "event_id": state_id, - } - for key, state_id in context.current_state_ids.iteritems() - ], - ) - - # Prefill the state group cache with this group. - # It's fine to use the sequence like this as the state group map - # is immutable. (If the map wasn't immutable then this prefill could - # race with another update) - txn.call_after( - self._state_group_cache.update, - self._state_group_cache.sequence, - key=context.state_group, - value=dict(context.current_state_ids), - full=True, - ) - - self._simple_insert_many_txn( - txn, - table="event_to_state_groups", - values=[ - { - "state_group": state_group_id, - "event_id": event_id, - } - for event_id, state_group_id in state_groups.iteritems() - ], - ) - - for event_id, state_group_id in state_groups.iteritems(): - txn.call_after( - self._get_state_group_for_event.prefill, - (event_id,), state_group_id - ) - - def _count_state_group_hops_txn(self, txn, state_group): - """Given a state group, count how many hops there are in the tree. - - This is used to ensure the delta chains don't get too long. - """ - if isinstance(self.database_engine, PostgresEngine): - sql = (""" - WITH RECURSIVE state(state_group) AS ( - VALUES(?::bigint) - UNION ALL - SELECT prev_state_group FROM state_group_edges e, state s - WHERE s.state_group = e.state_group - ) - SELECT count(*) FROM state; - """) - - txn.execute(sql, (state_group,)) - row = txn.fetchone() - if row and row[0]: - return row[0] - else: - return 0 - else: - # We don't use WITH RECURSIVE on sqlite3 as there are distributions - # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) - next_group = state_group - count = 0 - - while next_group: - next_group = self._simple_select_one_onecol_txn( - txn, - table="state_group_edges", - keyvalues={"state_group": next_group}, - retcol="prev_state_group", - allow_none=True, - ) - if next_group: - count += 1 - - return count - @defer.inlineCallbacks def _get_state_groups_from_groups(self, groups, types): """Returns dictionary state_group -> (dict of (type, state_key) -> event id) @@ -747,6 +549,220 @@ class StateStore(SQLBaseStore): defer.returnValue(results) + +class StateStore(StateGroupReadStore, BackgroundUpdateStore): + """ Keeps track of the state at a given event. + + This is done by the concept of `state groups`. Every event is a assigned + a state group (identified by an arbitrary string), which references a + collection of state events. The current state of an event is then the + collection of state events referenced by the event's state group. + + Hence, every change in the current state causes a new state group to be + generated. However, if no change happens (e.g., if we get a message event + with only one parent it inherits the state group from its parent.) + + There are three tables: + * `state_groups`: Stores group name, first event with in the group and + room id. + * `event_to_state_groups`: Maps events to state groups. + * `state_groups_state`: Maps state group to state events. + """ + + STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication" + STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" + CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" + + def __init__(self, db_conn, hs): + super(StateStore, self).__init__(db_conn, hs) + self.register_background_update_handler( + self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, + self._background_deduplicate_state, + ) + self.register_background_update_handler( + self.STATE_GROUP_INDEX_UPDATE_NAME, + self._background_index_state, + ) + self.register_background_index_update( + self.CURRENT_STATE_INDEX_UPDATE_NAME, + index_name="current_state_events_member_index", + table="current_state_events", + columns=["state_key"], + where_clause="type='m.room.member'", + ) + + def _have_persisted_state_group_txn(self, txn, state_group): + txn.execute( + "SELECT count(*) FROM state_groups WHERE id = ?", + (state_group,) + ) + row = txn.fetchone() + return row and row[0] + + def _store_mult_state_groups_txn(self, txn, events_and_contexts): + state_groups = {} + for event, context in events_and_contexts: + if event.internal_metadata.is_outlier(): + continue + + if context.current_state_ids is None: + # AFAIK, this can never happen + logger.error( + "Non-outlier event %s had current_state_ids==None", + event.event_id) + continue + + # if the event was rejected, just give it the same state as its + # predecessor. + if context.rejected: + state_groups[event.event_id] = context.prev_group + continue + + state_groups[event.event_id] = context.state_group + + if self._have_persisted_state_group_txn(txn, context.state_group): + continue + + self._simple_insert_txn( + txn, + table="state_groups", + values={ + "id": context.state_group, + "room_id": event.room_id, + "event_id": event.event_id, + }, + ) + + # We persist as a delta if we can, while also ensuring the chain + # of deltas isn't tooo long, as otherwise read performance degrades. + if context.prev_group: + is_in_db = self._simple_select_one_onecol_txn( + txn, + table="state_groups", + keyvalues={"id": context.prev_group}, + retcol="id", + allow_none=True, + ) + if not is_in_db: + raise Exception( + "Trying to persist state with unpersisted prev_group: %r" + % (context.prev_group,) + ) + + potential_hops = self._count_state_group_hops_txn( + txn, context.prev_group + ) + if context.prev_group and potential_hops < MAX_STATE_DELTA_HOPS: + self._simple_insert_txn( + txn, + table="state_group_edges", + values={ + "state_group": context.state_group, + "prev_state_group": context.prev_group, + }, + ) + + self._simple_insert_many_txn( + txn, + table="state_groups_state", + values=[ + { + "state_group": context.state_group, + "room_id": event.room_id, + "type": key[0], + "state_key": key[1], + "event_id": state_id, + } + for key, state_id in context.delta_ids.iteritems() + ], + ) + else: + self._simple_insert_many_txn( + txn, + table="state_groups_state", + values=[ + { + "state_group": context.state_group, + "room_id": event.room_id, + "type": key[0], + "state_key": key[1], + "event_id": state_id, + } + for key, state_id in context.current_state_ids.iteritems() + ], + ) + + # Prefill the state group cache with this group. + # It's fine to use the sequence like this as the state group map + # is immutable. (If the map wasn't immutable then this prefill could + # race with another update) + txn.call_after( + self._state_group_cache.update, + self._state_group_cache.sequence, + key=context.state_group, + value=dict(context.current_state_ids), + full=True, + ) + + self._simple_insert_many_txn( + txn, + table="event_to_state_groups", + values=[ + { + "state_group": state_group_id, + "event_id": event_id, + } + for event_id, state_group_id in state_groups.iteritems() + ], + ) + + for event_id, state_group_id in state_groups.iteritems(): + txn.call_after( + self._get_state_group_for_event.prefill, + (event_id,), state_group_id + ) + + def _count_state_group_hops_txn(self, txn, state_group): + """Given a state group, count how many hops there are in the tree. + + This is used to ensure the delta chains don't get too long. + """ + if isinstance(self.database_engine, PostgresEngine): + sql = (""" + WITH RECURSIVE state(state_group) AS ( + VALUES(?::bigint) + UNION ALL + SELECT prev_state_group FROM state_group_edges e, state s + WHERE s.state_group = e.state_group + ) + SELECT count(*) FROM state; + """) + + txn.execute(sql, (state_group,)) + row = txn.fetchone() + if row and row[0]: + return row[0] + else: + return 0 + else: + # We don't use WITH RECURSIVE on sqlite3 as there are distributions + # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) + next_group = state_group + count = 0 + + while next_group: + next_group = self._simple_select_one_onecol_txn( + txn, + table="state_group_edges", + keyvalues={"state_group": next_group}, + retcol="prev_state_group", + allow_none=True, + ) + if next_group: + count += 1 + + return count + def get_next_state_group(self): return self._state_groups_id_gen.get_next() From 03feb7a34d0496bc3f9cc350e74fde4cd0c38a17 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 Nov 2017 14:51:25 +0000 Subject: [PATCH 0455/1637] Bump version and changelog --- CHANGES.rst | 50 +++++++++++++++++++++++++++++++++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 4911cfa284..8e84323079 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,53 @@ +Changes in synapse v0.25.0-rc1 (2017-11-14) +=========================================== + +Features: + +* Add is_public to groups table to allow for private groups (PR #2582) +* Add a route for determining who you are (PR #2668) Thanks to @turt2live! +* Add more features to the password providers (PR #2608, #2610, #2620, #2622, + #2623, #2624, #2626, #2628, #2629) +* Add a hook for custom rest endpoints (PR #2627) +* Add API to update group room visibility (PR #2651) + + +Changes: + +* Ignore