From 975903ae1735f690a387c10b1ffd1a9384353213 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 22 Jan 2016 10:41:30 +0000 Subject: [PATCH 001/349] Sanitize filters --- synapse/api/filtering.py | 48 +++++++++++++++----------- synapse/rest/client/v2_alpha/filter.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 24 ++++++------- 3 files changed, 40 insertions(+), 34 deletions(-) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index c7f021d1ff..5530b8c48f 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -28,14 +28,14 @@ class Filtering(object): return result def add_user_filter(self, user_localpart, user_filter): - self._check_valid_filter(user_filter) + self.check_valid_filter(user_filter) return self.store.add_user_filter(user_localpart, user_filter) # TODO(paul): surely we should probably add a delete_user_filter or # replace_user_filter at some point? There's no REST API specified for # them however - def _check_valid_filter(self, user_filter_json): + def check_valid_filter(self, user_filter_json): """Check if the provided filter is valid. This inspects all definitions contained within the filter. @@ -129,52 +129,55 @@ class Filtering(object): class FilterCollection(object): def __init__(self, filter_json): - self.filter_json = filter_json + self._filter_json = filter_json - room_filter_json = self.filter_json.get("room", {}) + room_filter_json = self._filter_json.get("room", {}) - self.room_filter = Filter({ + self._room_filter = Filter({ k: v for k, v in room_filter_json.items() if k in ("rooms", "not_rooms") }) - self.room_timeline_filter = Filter(room_filter_json.get("timeline", {})) - self.room_state_filter = Filter(room_filter_json.get("state", {})) - self.room_ephemeral_filter = Filter(room_filter_json.get("ephemeral", {})) - self.room_account_data = Filter(room_filter_json.get("account_data", {})) - self.presence_filter = Filter(self.filter_json.get("presence", {})) - self.account_data = Filter(self.filter_json.get("account_data", {})) + self._room_timeline_filter = Filter(room_filter_json.get("timeline", {})) + self._room_state_filter = Filter(room_filter_json.get("state", {})) + self._room_ephemeral_filter = Filter(room_filter_json.get("ephemeral", {})) + self._room_account_data = Filter(room_filter_json.get("account_data", {})) + self._presence_filter = Filter(filter_json.get("presence", {})) + self._account_data = Filter(filter_json.get("account_data", {})) - self.include_leave = self.filter_json.get("room", {}).get( + self.include_leave = filter_json.get("room", {}).get( "include_leave", False ) + def get_filter_json(self): + return self._filter_json + def timeline_limit(self): - return self.room_timeline_filter.limit() + return self._room_timeline_filter.limit() def presence_limit(self): - return self.presence_filter.limit() + return self._presence_filter.limit() def ephemeral_limit(self): - return self.room_ephemeral_filter.limit() + return self._room_ephemeral_filter.limit() def filter_presence(self, events): - return self.presence_filter.filter(events) + return self._presence_filter.filter(events) def filter_account_data(self, events): - return self.account_data.filter(events) + return self._account_data.filter(events) def filter_room_state(self, events): - return self.room_state_filter.filter(self.room_filter.filter(events)) + return self._room_state_filter.filter(self._room_filter.filter(events)) def filter_room_timeline(self, events): - return self.room_timeline_filter.filter(self.room_filter.filter(events)) + return self._room_timeline_filter.filter(self._room_filter.filter(events)) def filter_room_ephemeral(self, events): - return self.room_ephemeral_filter.filter(self.room_filter.filter(events)) + return self._room_ephemeral_filter.filter(self._room_filter.filter(events)) def filter_room_account_data(self, events): - return self.room_account_data.filter(self.room_filter.filter(events)) + return self._room_account_data.filter(self._room_filter.filter(events)) class Filter(object): @@ -258,3 +261,6 @@ def _matches_wildcard(actual_value, filter_value): return actual_value.startswith(type_prefix) else: return actual_value == filter_value + + +DEFAULT_FILTER_COLLECTION = FilterCollection({}) diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 7695bebc28..7c94f6ec41 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -59,7 +59,7 @@ class GetFilterRestServlet(RestServlet): filter_id=filter_id, ) - defer.returnValue((200, filter.filter_json)) + defer.returnValue((200, filter.get_filter_json())) except KeyError: raise SynapseError(400, "No such filter") diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 4114a7e430..ab924ad9e0 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -24,7 +24,7 @@ from synapse.events import FrozenEvent from synapse.events.utils import ( serialize_event, format_event_for_client_v2_without_room_id, ) -from synapse.api.filtering import FilterCollection +from synapse.api.filtering import FilterCollection, DEFAULT_FILTER_COLLECTION from synapse.api.errors import SynapseError from ._base import client_v2_patterns @@ -113,20 +113,20 @@ class SyncRestServlet(RestServlet): ) ) - if filter_id and filter_id.startswith('{'): - try: - filter_object = json.loads(filter_id) - except: - raise SynapseError(400, "Invalid filter JSON") - self.filtering._check_valid_filter(filter_object) - filter = FilterCollection(filter_object) - else: - try: + if filter_id: + if filter_id.startswith('{'): + try: + filter_object = json.loads(filter_id) + except: + raise SynapseError(400, "Invalid filter JSON") + self.filtering.check_valid_filter(filter_object) + filter = FilterCollection(filter_object) + else: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) - except: - filter = FilterCollection({}) + else: + filter = DEFAULT_FILTER_COLLECTION sync_config = SyncConfig( user=user, From 8f9c74e9f12050b6680355fc93758b28672e9358 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 22 Jan 2016 10:48:27 +0000 Subject: [PATCH 002/349] Fix tests --- tests/api/test_filtering.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 14cddee679..16ee6bbe6a 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -504,4 +504,4 @@ class FilteringTestCase(unittest.TestCase): filter_id=filter_id, ) - self.assertEquals(filter.filter_json, user_filter_json) + self.assertEquals(filter.get_filter_json(), user_filter_json) From 7959e8b76475527c58c1dda364779a84c6e16368 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 22 Jan 2016 14:59:49 +0000 Subject: [PATCH 003/349] Underscores are allowed in user ids --- synapse/handlers/register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 1e99c1303c..c11b98d0b7 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -52,7 +52,7 @@ class RegistrationHandler(BaseHandler): if urllib.quote(localpart.encode('utf-8')) != localpart: raise SynapseError( 400, - "User ID can only contain characters a-z, 0-9, or '-./'", + "User ID can only contain characters a-z, 0-9, or '_-./'", Codes.INVALID_USERNAME ) From 4021f95261ebdcca0ec2c3c91e8dd442a85c5ed4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 10:10:44 +0000 Subject: [PATCH 004/349] Move logic from rest/ to handlers/ --- synapse/api/filtering.py | 22 ++-- synapse/handlers/sync.py | 189 +++++++++++++++++++++------ synapse/rest/client/v2_alpha/sync.py | 57 ++++---- 3 files changed, 181 insertions(+), 87 deletions(-) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 5530b8c48f..116060ee7f 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -190,18 +190,16 @@ class Filter(object): Returns: bool: True if the event matches """ - if isinstance(event, dict): - return self.check_fields( - event.get("room_id", None), - event.get("sender", None), - event.get("type", None), - ) - else: - return self.check_fields( - getattr(event, "room_id", None), - getattr(event, "sender", None), - event.type, - ) + sender = event.get("sender", None) + if not sender: + # Presence events have their 'sender' in content.user_id + sender = event.get("conntent", {}).get("user_id", None) + + return self.check_fields( + event.get("room_id", None), + sender, + event.get("type", None), + ) def check_fields(self, room_id, sender, event_type): """Checks whether the filter matches the given event fields. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 53e1eb0508..9b5b4d2c9f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -17,6 +17,7 @@ from ._base import BaseHandler from synapse.streams.config import PaginationConfig from synapse.api.constants import Membership, EventTypes +from synapse.api.filtering import DEFAULT_FILTER_COLLECTION from synapse.util import unwrapFirstError from twisted.internet import defer @@ -29,7 +30,7 @@ logger = logging.getLogger(__name__) SyncConfig = collections.namedtuple("SyncConfig", [ "user", - "filter", + "filter_collection", "is_guest", ]) @@ -129,6 +130,11 @@ class SyncHandler(BaseHandler): self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() + @defer.inlineCallbacks + def get_sync_for_user(self, sync_config, since_token=None, timeout=0, + filter_collection=DEFAULT_FILTER_COLLECTION): + pass + @defer.inlineCallbacks def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0, full_state=False): @@ -142,8 +148,9 @@ class SyncHandler(BaseHandler): if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. - result = yield self.current_sync_for_user(sync_config, since_token, - full_state=full_state) + result = yield self.current_sync_for_user( + sync_config, since_token, full_state=full_state, + ) defer.returnValue(result) else: def current_sync_callback(before_token, after_token): @@ -151,7 +158,7 @@ class SyncHandler(BaseHandler): result = yield self.notifier.wait_for_events( sync_config.user.to_string(), timeout, current_sync_callback, - from_token=since_token + from_token=since_token, ) defer.returnValue(result) @@ -205,7 +212,7 @@ class SyncHandler(BaseHandler): ) membership_list = (Membership.INVITE, Membership.JOIN) - if sync_config.filter.include_leave: + if sync_config.filter_collection.include_leave: membership_list += (Membership.LEAVE, Membership.BAN) room_list = yield self.store.get_rooms_for_user_where_membership_is( @@ -266,9 +273,17 @@ class SyncHandler(BaseHandler): deferreds, consumeErrors=True ).addErrback(unwrapFirstError) + account_data_for_user = sync_config.filter_collection.filter_account_data( + self.account_data_for_user(account_data) + ) + + presence = sync_config.filter_collection.filter_presence( + presence + ) + defer.returnValue(SyncResult( presence=presence, - account_data=self.account_data_for_user(account_data), + account_data=account_data_for_user, joined=joined, invited=invited, archived=archived, @@ -302,14 +317,31 @@ class SyncHandler(BaseHandler): current_state = yield self.get_state_at(room_id, now_token) + current_state = { + (e.type, e.state_key): e + for e in sync_config.filter_collection.filter_room_state( + current_state.values() + ) + } + + account_data = self.account_data_for_room( + room_id, tags_by_room, account_data_by_room + ) + + account_data = sync_config.filter_collection.filter_room_account_data( + account_data + ) + + ephemeral = sync_config.filter_collection.filter_room_ephemeral( + ephemeral_by_room.get(room_id, []) + ) + defer.returnValue(JoinedSyncResult( room_id=room_id, timeline=batch, state=current_state, - ephemeral=ephemeral_by_room.get(room_id, []), - account_data=self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ), + ephemeral=ephemeral, + account_data=account_data, unread_notifications=unread_notifications, )) @@ -365,7 +397,7 @@ class SyncHandler(BaseHandler): typing, typing_key = yield typing_source.get_new_events( user=sync_config.user, from_key=typing_key, - limit=sync_config.filter.ephemeral_limit(), + limit=sync_config.filter_collection.ephemeral_limit(), room_ids=room_ids, is_guest=sync_config.is_guest, ) @@ -388,7 +420,7 @@ class SyncHandler(BaseHandler): receipts, receipt_key = yield receipt_source.get_new_events( user=sync_config.user, from_key=receipt_key, - limit=sync_config.filter.ephemeral_limit(), + limit=sync_config.filter_collection.ephemeral_limit(), room_ids=room_ids, is_guest=sync_config.is_guest, ) @@ -419,13 +451,26 @@ class SyncHandler(BaseHandler): leave_state = yield self.store.get_state_for_event(leave_event_id) + leave_state = { + (e.type, e.state_key): e + for e in sync_config.filter_collection.filter_room_state( + leave_state.values() + ) + } + + account_data = self.account_data_for_room( + room_id, tags_by_room, account_data_by_room + ) + + account_data = sync_config.filter_collection.filter_room_account_data( + account_data + ) + defer.returnValue(ArchivedSyncResult( room_id=room_id, timeline=batch, state=leave_state, - account_data=self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ), + account_data=account_data, )) @defer.inlineCallbacks @@ -444,7 +489,7 @@ class SyncHandler(BaseHandler): presence, presence_key = yield presence_source.get_new_events( user=sync_config.user, from_key=since_token.presence_key, - limit=sync_config.filter.presence_limit(), + limit=sync_config.filter_collection.presence_limit(), room_ids=room_ids, is_guest=sync_config.is_guest, ) @@ -473,7 +518,7 @@ class SyncHandler(BaseHandler): sync_config.user ) - timeline_limit = sync_config.filter.timeline_limit() + timeline_limit = sync_config.filter_collection.timeline_limit() room_events, _ = yield self.store.get_room_events_stream( sync_config.user.to_string(), @@ -538,6 +583,27 @@ class SyncHandler(BaseHandler): # the timeline is inherently limited if we've just joined limited = True + recents = sync_config.filter_collection.filter_room_timeline(recents) + + state = { + (e.type, e.state_key): e + for e in sync_config.filter_collection.filter_room_state( + state.values() + ) + } + + acc_data = self.account_data_for_room( + room_id, tags_by_room, account_data_by_room + ) + + acc_data = sync_config.filter_collection.filter_room_account_data( + acc_data + ) + + ephemeral = sync_config.filter_collection.filter_room_ephemeral( + ephemeral_by_room.get(room_id, []) + ) + room_sync = JoinedSyncResult( room_id=room_id, timeline=TimelineBatch( @@ -546,10 +612,8 @@ class SyncHandler(BaseHandler): limited=limited, ), state=state, - ephemeral=ephemeral_by_room.get(room_id, []), - account_data=self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ), + ephemeral=ephemeral, + account_data=acc_data, unread_notifications={}, ) logger.debug("Result for room %s: %r", room_id, room_sync) @@ -603,9 +667,17 @@ class SyncHandler(BaseHandler): for event in invite_events ] + account_data_for_user = sync_config.filter_collection.filter_account_data( + self.account_data_for_user(account_data) + ) + + presence = sync_config.filter_collection.filter_presence( + presence + ) + defer.returnValue(SyncResult( presence=presence, - account_data=self.account_data_for_user(account_data), + account_data=account_data_for_user, joined=joined, invited=invited, archived=archived, @@ -621,7 +693,7 @@ class SyncHandler(BaseHandler): limited = True recents = [] filtering_factor = 2 - timeline_limit = sync_config.filter.timeline_limit() + timeline_limit = sync_config.filter_collection.timeline_limit() load_limit = max(timeline_limit * filtering_factor, 100) max_repeat = 3 # Only try a few times per room, otherwise room_key = now_token.room_key @@ -634,9 +706,9 @@ class SyncHandler(BaseHandler): from_token=since_token.room_key if since_token else None, end_token=end_key, ) - (room_key, _) = keys + room_key, _ = keys end_key = "s" + room_key.split('-')[-1] - loaded_recents = sync_config.filter.filter_room_timeline(events) + loaded_recents = sync_config.filter_collection.filter_room_timeline(events) loaded_recents = yield self._filter_events_for_client( sync_config.user.to_string(), loaded_recents, @@ -684,21 +756,28 @@ class SyncHandler(BaseHandler): logger.debug("Recents %r", batch) - current_state = yield self.get_state_at(room_id, now_token) + if batch.limited: + current_state = yield self.get_state_at(room_id, now_token) - state_at_previous_sync = yield self.get_state_at( - room_id, stream_position=since_token - ) + state_at_previous_sync = yield self.get_state_at( + room_id, stream_position=since_token + ) - state = yield self.compute_state_delta( - since_token=since_token, - previous_state=state_at_previous_sync, - current_state=current_state, - ) + state = yield self.compute_state_delta( + since_token=since_token, + previous_state=state_at_previous_sync, + current_state=current_state, + ) + else: + state = { + (event.type, event.state_key): event + for event in batch.events if event.is_state() + } just_joined = yield self.check_joined_room(sync_config, state) if just_joined: state = yield self.get_state_at(room_id, now_token) + # batch.limited = True notifs = yield self.unread_notifs_for_room_id( room_id, sync_config, all_ephemeral_by_room @@ -711,14 +790,29 @@ class SyncHandler(BaseHandler): 1 for notif in notifs if _action_has_highlight(notif["actions"]) ]) + state = { + (e.type, e.state_key): e + for e in sync_config.filter_collection.filter_room_state(state.values()) + } + + account_data = self.account_data_for_room( + room_id, tags_by_room, account_data_by_room + ) + + account_data = sync_config.filter_collection.filter_room_account_data( + account_data + ) + + ephemeral = sync_config.filter_collection.filter_room_ephemeral( + ephemeral_by_room.get(room_id, []) + ) + room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, state=state, - ephemeral=ephemeral_by_room.get(room_id, []), - account_data=self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ), + ephemeral=ephemeral, + account_data=account_data, unread_notifications=unread_notifications, ) @@ -765,13 +859,26 @@ class SyncHandler(BaseHandler): current_state=state_events_at_leave, ) + state_events_delta = { + (e.type, e.state_key): e + for e in sync_config.filter_collection.filter_room_state( + state_events_delta.values() + ) + } + + account_data = self.account_data_for_room( + leave_event.room_id, tags_by_room, account_data_by_room + ) + + account_data = sync_config.filter_collection.filter_room_account_data( + account_data + ) + room_sync = ArchivedSyncResult( room_id=leave_event.room_id, timeline=batch, state=state_events_delta, - account_data=self.account_data_for_room( - leave_event.room_id, tags_by_room, account_data_by_room - ), + account_data=account_data, ) logger.debug("Room sync: %r", room_sync) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index ab924ad9e0..07b5b5dfd5 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -130,7 +130,7 @@ class SyncRestServlet(RestServlet): sync_config = SyncConfig( user=user, - filter=filter, + filter_collection=filter, is_guest=requester.is_guest, ) @@ -154,23 +154,21 @@ class SyncRestServlet(RestServlet): time_now = self.clock.time_msec() joined = self.encode_joined( - sync_result.joined, filter, time_now, requester.access_token_id + sync_result.joined, time_now, requester.access_token_id ) invited = self.encode_invited( - sync_result.invited, filter, time_now, requester.access_token_id + sync_result.invited, time_now, requester.access_token_id ) archived = self.encode_archived( - sync_result.archived, filter, time_now, requester.access_token_id + sync_result.archived, time_now, requester.access_token_id ) response_content = { - "account_data": self.encode_account_data( - sync_result.account_data, filter, time_now - ), + "account_data": {"events": sync_result.account_data}, "presence": self.encode_presence( - sync_result.presence, filter, time_now + sync_result.presence, time_now ), "rooms": { "join": joined, @@ -182,24 +180,20 @@ class SyncRestServlet(RestServlet): defer.returnValue((200, response_content)) - def encode_presence(self, events, filter, time_now): + def encode_presence(self, events, time_now): formatted = [] for event in events: event = copy.deepcopy(event) event['sender'] = event['content'].pop('user_id') formatted.append(event) - return {"events": filter.filter_presence(formatted)} + return {"events": formatted} - def encode_account_data(self, events, filter, time_now): - return {"events": filter.filter_account_data(events)} - - def encode_joined(self, rooms, filter, time_now, token_id): + def encode_joined(self, rooms, time_now, token_id): """ Encode the joined rooms in a sync result :param list[synapse.handlers.sync.JoinedSyncResult] rooms: list of sync results for rooms this user is joined to - :param FilterCollection filter: filters to apply to the results :param int time_now: current time - used as a baseline for age calculations :param int token_id: ID of the user's auth token - used for namespacing @@ -211,18 +205,17 @@ class SyncRestServlet(RestServlet): joined = {} for room in rooms: joined[room.room_id] = self.encode_room( - room, filter, time_now, token_id + room, time_now, token_id ) return joined - def encode_invited(self, rooms, filter, time_now, token_id): + def encode_invited(self, rooms, time_now, token_id): """ Encode the invited rooms in a sync result :param list[synapse.handlers.sync.InvitedSyncResult] rooms: list of sync results for rooms this user is joined to - :param FilterCollection filter: filters to apply to the results :param int time_now: current time - used as a baseline for age calculations :param int token_id: ID of the user's auth token - used for namespacing @@ -237,7 +230,9 @@ class SyncRestServlet(RestServlet): room.invite, time_now, token_id=token_id, event_format=format_event_for_client_v2_without_room_id, ) - invited_state = invite.get("unsigned", {}).pop("invite_room_state", []) + unsigned = dict(invite.get("unsigned", {})) + invite["unsigned"] = unsigned + invited_state = list(unsigned.pop("invite_room_state", [])) invited_state.append(invite) invited[room.room_id] = { "invite_state": {"events": invited_state} @@ -245,13 +240,12 @@ class SyncRestServlet(RestServlet): return invited - def encode_archived(self, rooms, filter, time_now, token_id): + def encode_archived(self, rooms, time_now, token_id): """ Encode the archived rooms in a sync result :param list[synapse.handlers.sync.ArchivedSyncResult] rooms: list of sync results for rooms this user is joined to - :param FilterCollection filter: filters to apply to the results :param int time_now: current time - used as a baseline for age calculations :param int token_id: ID of the user's auth token - used for namespacing @@ -263,17 +257,16 @@ class SyncRestServlet(RestServlet): joined = {} for room in rooms: joined[room.room_id] = self.encode_room( - room, filter, time_now, token_id, joined=False + room, time_now, token_id, joined=False ) return joined @staticmethod - def encode_room(room, filter, time_now, token_id, joined=True): + def encode_room(room, time_now, token_id, joined=True): """ :param JoinedSyncResult|ArchivedSyncResult room: sync result for a single room - :param FilterCollection filter: filters to apply to the results :param int time_now: current time - used as a baseline for age calculations :param int token_id: ID of the user's auth token - used for namespacing @@ -292,19 +285,17 @@ class SyncRestServlet(RestServlet): ) state_dict = room.state - timeline_events = filter.filter_room_timeline(room.timeline.events) + timeline_events = room.timeline.events state_dict = SyncRestServlet._rollback_state_for_timeline( state_dict, timeline_events) - state_events = filter.filter_room_state(state_dict.values()) + state_events = state_dict.values() serialized_state = [serialize(e) for e in state_events] serialized_timeline = [serialize(e) for e in timeline_events] - account_data = filter.filter_room_account_data( - room.account_data - ) + account_data = room.account_data result = { "timeline": { @@ -317,7 +308,7 @@ class SyncRestServlet(RestServlet): } if joined: - ephemeral_events = filter.filter_room_ephemeral(room.ephemeral) + ephemeral_events = room.ephemeral result["ephemeral"] = {"events": ephemeral_events} result["unread_notifications"] = room.unread_notifications @@ -334,8 +325,6 @@ class SyncRestServlet(RestServlet): :param list[synapse.events.EventBase] timeline: the event timeline :return: updated state dictionary """ - logger.debug("Processing state dict %r; timeline %r", state, - [e.get_dict() for e in timeline]) result = state.copy() @@ -357,8 +346,8 @@ class SyncRestServlet(RestServlet): # the event graph, and the state is no longer valid. Really, # the event shouldn't be in the timeline. We're going to ignore # it for now, however. - logger.warn("Found state event %r in timeline which doesn't " - "match state dictionary", timeline_event) + logger.debug("Found state event %r in timeline which doesn't " + "match state dictionary", timeline_event) continue prev_event_id = timeline_event.unsigned.get("replaces_state", None) From 8c6012a4af4973b0a53af65a31cbdb92a3dec5a2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 13:12:35 +0000 Subject: [PATCH 005/349] Fix tests --- synapse/api/filtering.py | 2 +- tests/api/test_filtering.py | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 116060ee7f..6c13ada5df 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -193,7 +193,7 @@ class Filter(object): sender = event.get("sender", None) if not sender: # Presence events have their 'sender' in content.user_id - sender = event.get("conntent", {}).get("user_id", None) + sender = event.get("content", {}).get("user_id", None) return self.check_fields( event.get("room_id", None), diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 16ee6bbe6a..1a4e439d30 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -13,26 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple from tests import unittest from twisted.internet import defer -from mock import Mock, NonCallableMock +from mock import Mock from tests.utils import ( MockHttpResource, DeferredMockCallable, setup_test_homeserver ) from synapse.types import UserID -from synapse.api.filtering import FilterCollection, Filter +from synapse.api.filtering import Filter +from synapse.events import FrozenEvent user_localpart = "test_user" # MockEvent = namedtuple("MockEvent", "sender type room_id") def MockEvent(**kwargs): - ev = NonCallableMock(spec_set=kwargs.keys()) - ev.configure_mock(**kwargs) - return ev + return FrozenEvent(kwargs) class FilteringTestCase(unittest.TestCase): From ddd25def01e3909a34c52954100763bb2a91f648 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 13:36:02 +0000 Subject: [PATCH 006/349] Implement a _simple_select_many_batch --- synapse/storage/_base.py | 67 ++++++++++++++++++++++++++++++++++++ synapse/storage/presence.py | 33 +++++++++--------- synapse/storage/push_rule.py | 63 +++++++++++---------------------- 3 files changed, 104 insertions(+), 59 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 183a752387..897c5d8d73 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -629,6 +629,73 @@ class SQLBaseStore(object): return self.cursor_to_dict(txn) + @defer.inlineCallbacks + def _simple_select_many_batch(self, table, column, iterable, retcols, + keyvalues={}, desc="_simple_select_many_batch", + batch_size=100): + """Executes a SELECT query on the named table, which may return zero or + more rows, returning the result as a list of dicts. + + Filters rows by if value of `column` is in `iterable`. + + Args: + txn : Transaction object + table : string giving the table name + column : column name to test for inclusion against `iterable` + iterable : list + keyvalues : dict of column names and values to select the rows with + retcols : list of strings giving the names of the columns to return + """ + results = [] + + chunks = [iterable[i:i+batch_size] for i in xrange(0, len(iterable), batch_size)] + for chunk in chunks: + rows = yield self.runInteraction( + desc, + self._simple_select_many_txn, + table, column, chunk, keyvalues, retcols + ) + + results.extend(rows) + + defer.returnValue(results) + + def _simple_select_many_txn(self, txn, table, column, iterable, keyvalues, retcols): + """Executes a SELECT query on the named table, which may return zero or + more rows, returning the result as a list of dicts. + + Filters rows by if value of `column` is in `iterable`. + + Args: + txn : Transaction object + table : string giving the table name + column : column name to test for inclusion against `iterable` + iterable : list + keyvalues : dict of column names and values to select the rows with + retcols : list of strings giving the names of the columns to return + """ + sql = "SELECT %s FROM %s" % (", ".join(retcols), table) + + clauses = [] + values = [] + clauses.append( + "%s IN (%s)" % (column, ",".join("?" for _ in iterable)) + ) + values.extend(iterable) + + for key, value in keyvalues.items(): + clauses.append("%s = ?" % (key,)) + values.append(value) + + if clauses: + sql = "%s WHERE %s" % ( + sql, + " AND ".join(clauses), + ) + + txn.execute(sql, values) + return self.cursor_to_dict(txn) + def _simple_update_one(self, table, keyvalues, updatevalues, desc="_simple_update_one"): """Executes an UPDATE query on the named table, setting new values for diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 1095d52ace..9b3aecaf8c 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -48,24 +48,25 @@ class PresenceStore(SQLBaseStore): desc="get_presence_state", ) - @cachedList(get_presence_state.cache, list_name="user_localparts") + @cachedList(get_presence_state.cache, list_name="user_localparts", + inlineCallbacks=True) def get_presence_states(self, user_localparts): - def f(txn): - results = {} - for user_localpart in user_localparts: - res = self._simple_select_one_txn( - txn, - table="presence", - keyvalues={"user_id": user_localpart}, - retcols=["state", "status_msg", "mtime"], - allow_none=True, - ) - if res: - results[user_localpart] = res + rows = yield self._simple_select_many_batch( + table="presence", + column="user_id", + iterable=user_localparts, + retcols=("user_id", "state", "status_msg", "mtime",), + desc="get_presence_states", + ) - return results - - return self.runInteraction("get_presence_states", f) + defer.returnValue({ + row["user_id"]: { + "state": row["state"], + "status_msg": row["status_msg"], + "mtime": row["mtime"], + } + for row in rows + }) def set_presence_state(self, user_localpart, new_state): res = self._simple_update_one( diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 35ec7e8cef..1f51c90ee5 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -65,32 +65,20 @@ class PushRuleStore(SQLBaseStore): if not user_ids: defer.returnValue({}) - batch_size = 100 - - def f(txn, user_ids_to_fetch): - sql = ( - "SELECT pr.*" - " FROM push_rules AS pr" - " LEFT JOIN push_rules_enable AS pre" - " ON pr.user_name = pre.user_name AND pr.rule_id = pre.rule_id" - " WHERE pr.user_name" - " IN (" + ",".join("?" for _ in user_ids_to_fetch) + ")" - " AND (pre.enabled IS NULL OR pre.enabled = 1)" - " ORDER BY pr.user_name, pr.priority_class DESC, pr.priority DESC" - ) - txn.execute(sql, user_ids_to_fetch) - return self.cursor_to_dict(txn) - results = {} - chunks = [user_ids[i:i+batch_size] for i in xrange(0, len(user_ids), batch_size)] - for batch_user_ids in chunks: - rows = yield self.runInteraction( - "bulk_get_push_rules", f, batch_user_ids - ) + rows = yield self._simple_select_many_batch( + table="push_rules", + column="user_name", + iterable=user_ids, + retcols=("*",), + desc="bulk_get_push_rules", + ) - for row in rows: - results.setdefault(row['user_name'], []).append(row) + rows.sort(key=lambda e: (-e["priority_class"], -e["priority"])) + + for row in rows: + results.setdefault(row['user_name'], []).append(row) defer.returnValue(results) @defer.inlineCallbacks @@ -98,28 +86,17 @@ class PushRuleStore(SQLBaseStore): if not user_ids: defer.returnValue({}) - batch_size = 100 - - def f(txn, user_ids_to_fetch): - sql = ( - "SELECT user_name, rule_id, enabled" - " FROM push_rules_enable" - " WHERE user_name" - " IN (" + ",".join("?" for _ in user_ids_to_fetch) + ")" - ) - txn.execute(sql, user_ids_to_fetch) - return self.cursor_to_dict(txn) - results = {} - chunks = [user_ids[i:i+batch_size] for i in xrange(0, len(user_ids), batch_size)] - for batch_user_ids in chunks: - rows = yield self.runInteraction( - "bulk_get_push_rules_enabled", f, batch_user_ids - ) - - for row in rows: - results.setdefault(row['user_name'], {})[row['rule_id']] = row['enabled'] + rows = yield self._simple_select_many_batch( + table="push_rules_enable", + column="user_name", + iterable=user_ids, + retcols=("user_name", "rule_id", "enabled",), + desc="bulk_get_push_rules_enabled", + ) + for row in rows: + results.setdefault(row['user_name'], {})[row['rule_id']] = row['enabled'] defer.returnValue(results) @defer.inlineCallbacks From d59c58bc95e3ca6ea4ad9a7e62c6bc3f5f724978 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 13:13:46 +0000 Subject: [PATCH 007/349] Remove weird stuff --- synapse/handlers/sync.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9b5b4d2c9f..84b0599174 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -130,11 +130,6 @@ class SyncHandler(BaseHandler): self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() - @defer.inlineCallbacks - def get_sync_for_user(self, sync_config, since_token=None, timeout=0, - filter_collection=DEFAULT_FILTER_COLLECTION): - pass - @defer.inlineCallbacks def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0, full_state=False): @@ -777,7 +772,6 @@ class SyncHandler(BaseHandler): just_joined = yield self.check_joined_room(sync_config, state) if just_joined: state = yield self.get_state_at(room_id, now_token) - # batch.limited = True notifs = yield self.unread_notifs_for_room_id( room_id, sync_config, all_ephemeral_by_room From 53cb17366391a13e8d8297c9f4fb3f77fd6b85b7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 13:53:05 +0000 Subject: [PATCH 008/349] Push: Use storage apis that are cached --- synapse/push/__init__.py | 30 +++++++++++++----------------- synapse/storage/roommember.py | 1 + 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index e6a28bd8c0..9bc0b356f4 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -17,7 +17,6 @@ from twisted.internet import defer from synapse.streams.config import PaginationConfig from synapse.types import StreamToken -from synapse.api.constants import Membership import synapse.util.async import push_rule_evaluator as push_rule_evaluator @@ -296,31 +295,28 @@ class Pusher(object): @defer.inlineCallbacks def _get_badge_count(self): - room_list = yield self.store.get_rooms_for_user_where_membership_is( - user_id=self.user_id, - membership_list=(Membership.INVITE, Membership.JOIN) - ) + invites, joins = yield defer.gatherResults([ + self.store.get_invites_for_user(self.user_id), + self.store.get_rooms_for_user(self.user_id), + ], consumeErrors=True) my_receipts_by_room = yield self.store.get_receipts_for_user( self.user_id, "m.read", ) - badge = 0 + badge = len(invites) - for r in room_list: - if r.membership == Membership.INVITE: - badge += 1 - else: - if r.room_id in my_receipts_by_room: - last_unread_event_id = my_receipts_by_room[r.room_id] + for r in joins: + if r.room_id in my_receipts_by_room: + last_unread_event_id = my_receipts_by_room[r.room_id] - notifs = yield ( - self.store.get_unread_event_push_actions_by_room_for_user( - r.room_id, self.user_id, last_unread_event_id - ) + notifs = yield ( + self.store.get_unread_event_push_actions_by_room_for_user( + r.room_id, self.user_id, last_unread_event_id ) - badge += len(notifs) + ) + badge += len(notifs) defer.returnValue(badge) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 68ac88905f..edfecced05 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -110,6 +110,7 @@ class RoomMemberStore(SQLBaseStore): membership=membership, ).addCallback(self._get_events) + @cached() def get_invites_for_user(self, user_id): """ Get all the invite events for a user Args: From 86896408b0a3c0d0c82356c8cc0bdaf3fe236b45 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 15:30:32 +0000 Subject: [PATCH 009/349] Add index to event_push_actions --- synapse/storage/schema/delta/28/event_push_actions.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/schema/delta/28/event_push_actions.sql b/synapse/storage/schema/delta/28/event_push_actions.sql index bdf6ae3f24..4d519849df 100644 --- a/synapse/storage/schema/delta/28/event_push_actions.sql +++ b/synapse/storage/schema/delta/28/event_push_actions.sql @@ -24,3 +24,4 @@ CREATE TABLE IF NOT EXISTS event_push_actions( CREATE INDEX event_push_actions_room_id_event_id_user_id_profile_tag on event_push_actions(room_id, event_id, user_id, profile_tag); +CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id); From dc2647cd3d1e8891399f309344e375a25142e43e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 15:48:54 +0000 Subject: [PATCH 010/349] PEP8 --- synapse/handlers/sync.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 84b0599174..328c049b03 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -17,7 +17,6 @@ from ._base import BaseHandler from synapse.streams.config import PaginationConfig from synapse.api.constants import Membership, EventTypes -from synapse.api.filtering import DEFAULT_FILTER_COLLECTION from synapse.util import unwrapFirstError from twisted.internet import defer From 1ebf5e3d03a2a2ce9ff278b2eac07acc0f7cde66 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 15:53:36 +0000 Subject: [PATCH 011/349] Correct docstring --- synapse/storage/_base.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 897c5d8d73..304ebdc825 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -639,7 +639,6 @@ class SQLBaseStore(object): Filters rows by if value of `column` is in `iterable`. Args: - txn : Transaction object table : string giving the table name column : column name to test for inclusion against `iterable` iterable : list From aea5da0ef6f4d3907ace2c1fdba743312118660c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Jan 2016 15:59:29 +0000 Subject: [PATCH 012/349] Guard against empty iterables --- synapse/storage/_base.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 304ebdc825..90d7aee94a 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -647,6 +647,9 @@ class SQLBaseStore(object): """ results = [] + if not iterable: + defer.returnValue(results) + chunks = [iterable[i:i+batch_size] for i in xrange(0, len(iterable), batch_size)] for chunk in chunks: rows = yield self.runInteraction( @@ -673,6 +676,9 @@ class SQLBaseStore(object): keyvalues : dict of column names and values to select the rows with retcols : list of strings giving the names of the columns to return """ + if not iterable: + return [] + sql = "SELECT %s FROM %s" % (", ".join(retcols), table) clauses = [] From e18257f0e52cf587ab4d7bb8937dbcaefeb26e54 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 25 Jan 2016 16:51:56 +0000 Subject: [PATCH 013/349] Add missing yield in push_rules set enabled --- synapse/rest/client/v1/push_rule.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 2272d66dc7..cb3ec23872 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -52,7 +52,7 @@ class PushRuleRestServlet(ClientV1RestServlet): content = _parse_json(request) if 'attr' in spec: - self.set_rule_attr(requester.user.to_string(), spec, content) + yield self.set_rule_attr(requester.user.to_string(), spec, content) defer.returnValue((200, {})) try: @@ -228,7 +228,7 @@ class PushRuleRestServlet(ClientV1RestServlet): # bools directly, so let's not break them. raise SynapseError(400, "Value for 'enabled' must be boolean") namespaced_rule_id = _namespaced_rule_id_from_spec(spec) - self.hs.get_datastore().set_push_rule_enabled( + return self.hs.get_datastore().set_push_rule_enabled( user_id, namespaced_rule_id, val ) else: From 766c24b2e6efbb5d250f5a36809caf3026140593 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 26 Jan 2016 10:21:41 +0000 Subject: [PATCH 014/349] Only notify for messages in one to one rooms, not every event Fixes the fact that candidate events and hangups generated notifications. --- synapse/push/baserules.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 186281dfa3..0832c77cb4 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -173,6 +173,12 @@ BASE_APPEND_UNDERRIDE_RULES = [ 'kind': 'room_member_count', 'is': '2', '_id': 'member_count', + }, + { + 'kind': 'event_match', + 'key': 'type', + 'pattern': 'm.room.message', + '_id': '_message', } ], 'actions': [ From 9959d9ece84d85dae3ed06b22e3f234575b93fd1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Jan 2016 13:52:29 +0000 Subject: [PATCH 015/349] Remove redundated BaseHomeServer --- synapse/app/homeserver.py | 139 ++++------- synapse/federation/__init__.py | 9 +- synapse/federation/replication.py | 2 - synapse/federation/transport/__init__.py | 52 ---- synapse/federation/transport/client.py | 4 + synapse/federation/transport/server.py | 82 +++--- synapse/server.py | 106 ++++---- tests/federation/__init__.py | 0 tests/federation/test_federation.py | 303 ----------------------- tests/rest/client/v1/test_presence.py | 18 +- tests/test_types.py | 5 +- tests/utils.py | 18 ++ 12 files changed, 192 insertions(+), 546 deletions(-) delete mode 100644 tests/federation/__init__.py delete mode 100644 tests/federation/test_federation.py diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 6928d9d3e4..795c655ae3 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -50,16 +50,14 @@ from twisted.cred import checkers, portal from twisted.internet import reactor, task, defer from twisted.application import service -from twisted.enterprise import adbapi from twisted.web.resource import Resource, EncodingResourceWrapper from twisted.web.static import File from twisted.web.server import Site, GzipEncoderFactory, Request -from synapse.http.server import JsonResource, RootRedirect +from synapse.http.server import RootRedirect from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.rest.media.v1.media_repository import MediaRepositoryResource from synapse.rest.key.v1.server_key_resource import LocalKey from synapse.rest.key.v2 import KeyApiV2Resource -from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.api.urls import ( FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX, SERVER_KEY_PREFIX, MEDIA_PREFIX, STATIC_PREFIX, @@ -69,6 +67,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory from synapse.util.logcontext import LoggingContext from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.federation.transport.server import TransportLayerServer from synapse import events @@ -95,80 +94,37 @@ def gz_wrap(r): return EncodingResourceWrapper(r, [GzipEncoderFactory()]) +def build_resource_for_web_client(hs): + webclient_path = hs.get_config().web_client_location + if not webclient_path: + try: + import syweb + except ImportError: + quit_with_error( + "Could not find a webclient.\n\n" + "Please either install the matrix-angular-sdk or configure\n" + "the location of the source to serve via the configuration\n" + "option `web_client_location`\n\n" + "To install the `matrix-angular-sdk` via pip, run:\n\n" + " pip install '%(dep)s'\n" + "\n" + "You can also disable hosting of the webclient via the\n" + "configuration option `web_client`\n" + % {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]} + ) + syweb_path = os.path.dirname(syweb.__file__) + webclient_path = os.path.join(syweb_path, "webclient") + # GZip is disabled here due to + # https://twistedmatrix.com/trac/ticket/7678 + # (It can stay enabled for the API resources: they call + # write() with the whole body and then finish() straight + # after and so do not trigger the bug. + # GzipFile was removed in commit 184ba09 + # return GzipFile(webclient_path) # TODO configurable? + return File(webclient_path) # TODO configurable? + + class SynapseHomeServer(HomeServer): - - def build_http_client(self): - return MatrixFederationHttpClient(self) - - def build_client_resource(self): - return ClientRestResource(self) - - def build_resource_for_federation(self): - return JsonResource(self) - - def build_resource_for_web_client(self): - webclient_path = self.get_config().web_client_location - if not webclient_path: - try: - import syweb - except ImportError: - quit_with_error( - "Could not find a webclient.\n\n" - "Please either install the matrix-angular-sdk or configure\n" - "the location of the source to serve via the configuration\n" - "option `web_client_location`\n\n" - "To install the `matrix-angular-sdk` via pip, run:\n\n" - " pip install '%(dep)s'\n" - "\n" - "You can also disable hosting of the webclient via the\n" - "configuration option `web_client`\n" - % {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]} - ) - syweb_path = os.path.dirname(syweb.__file__) - webclient_path = os.path.join(syweb_path, "webclient") - # GZip is disabled here due to - # https://twistedmatrix.com/trac/ticket/7678 - # (It can stay enabled for the API resources: they call - # write() with the whole body and then finish() straight - # after and so do not trigger the bug. - # GzipFile was removed in commit 184ba09 - # return GzipFile(webclient_path) # TODO configurable? - return File(webclient_path) # TODO configurable? - - def build_resource_for_static_content(self): - # This is old and should go away: not going to bother adding gzip - return File( - os.path.join(os.path.dirname(synapse.__file__), "static") - ) - - def build_resource_for_content_repo(self): - return ContentRepoResource( - self, self.config.uploads_path, self.auth, self.content_addr - ) - - def build_resource_for_media_repository(self): - return MediaRepositoryResource(self) - - def build_resource_for_server_key(self): - return LocalKey(self) - - def build_resource_for_server_key_v2(self): - return KeyApiV2Resource(self) - - def build_resource_for_metrics(self): - if self.get_config().enable_metrics: - return MetricsResource(self) - else: - return None - - def build_db_pool(self): - name = self.db_config["name"] - - return adbapi.ConnectionPool( - name, - **self.db_config.get("args", {}) - ) - def _listener_http(self, config, listener_config): port = listener_config["port"] bind_address = listener_config.get("bind_address", "") @@ -178,13 +134,11 @@ class SynapseHomeServer(HomeServer): if tls and config.no_tls: return - metrics_resource = self.get_resource_for_metrics() - resources = {} for res in listener_config["resources"]: for name in res["names"]: if name == "client": - client_resource = self.get_client_resource() + client_resource = ClientRestResource(self) if res["compress"]: client_resource = gz_wrap(client_resource) @@ -198,31 +152,35 @@ class SynapseHomeServer(HomeServer): if name == "federation": resources.update({ - FEDERATION_PREFIX: self.get_resource_for_federation(), + FEDERATION_PREFIX: TransportLayerServer(self), }) if name in ["static", "client"]: resources.update({ - STATIC_PREFIX: self.get_resource_for_static_content(), + STATIC_PREFIX: File( + os.path.join(os.path.dirname(synapse.__file__), "static") + ), }) if name in ["media", "federation", "client"]: resources.update({ - MEDIA_PREFIX: self.get_resource_for_media_repository(), - CONTENT_REPO_PREFIX: self.get_resource_for_content_repo(), + MEDIA_PREFIX: MediaRepositoryResource(self), + CONTENT_REPO_PREFIX: ContentRepoResource( + self, self.config.uploads_path, self.auth, self.content_addr + ), }) if name in ["keys", "federation"]: resources.update({ - SERVER_KEY_PREFIX: self.get_resource_for_server_key(), - SERVER_KEY_V2_PREFIX: self.get_resource_for_server_key_v2(), + SERVER_KEY_PREFIX: LocalKey(self), + SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self), }) if name == "webclient": - resources[WEB_CLIENT_PREFIX] = self.get_resource_for_web_client() + resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self) - if name == "metrics" and metrics_resource: - resources[METRICS_PREFIX] = metrics_resource + if name == "metrics" and self.get_config().enable_metrics: + resources[METRICS_PREFIX] = MetricsResource(self) root_resource = create_resource_tree(resources) if tls: @@ -675,7 +633,7 @@ def _resource_id(resource, path_seg): the mapping should looks like _resource_id(A,C) = B. Args: - resource (Resource): The *parent* Resource + resource (Resource): The *parent* Resourceb path_seg (str): The name of the child Resource to be attached. Returns: str: A unique string which can be a key to the child Resource. @@ -684,7 +642,7 @@ def _resource_id(resource, path_seg): def run(hs): - PROFILE_SYNAPSE = False + PROFILE_SYNAPSE = True if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile @@ -761,6 +719,7 @@ def run(hs): auto_close_fds=False, verbose=True, logger=logger, + chdir=os.path.dirname(os.path.abspath(__file__)), ) daemon.start() diff --git a/synapse/federation/__init__.py b/synapse/federation/__init__.py index 0bfb79d09f..979fdf2431 100644 --- a/synapse/federation/__init__.py +++ b/synapse/federation/__init__.py @@ -17,15 +17,10 @@ """ from .replication import ReplicationLayer -from .transport import TransportLayer +from .transport.client import TransportLayerClient def initialize_http_replication(homeserver): - transport = TransportLayer( - homeserver, - homeserver.hostname, - server=homeserver.get_resource_for_federation(), - client=homeserver.get_http_client() - ) + transport = TransportLayerClient(homeserver) return ReplicationLayer(homeserver, transport) diff --git a/synapse/federation/replication.py b/synapse/federation/replication.py index 6e0be8ef15..3e062a5eab 100644 --- a/synapse/federation/replication.py +++ b/synapse/federation/replication.py @@ -54,8 +54,6 @@ class ReplicationLayer(FederationClient, FederationServer): self.keyring = hs.get_keyring() self.transport_layer = transport_layer - self.transport_layer.register_received_handler(self) - self.transport_layer.register_request_handler(self) self.federation_client = self diff --git a/synapse/federation/transport/__init__.py b/synapse/federation/transport/__init__.py index 155a7d5870..d9fcc520a0 100644 --- a/synapse/federation/transport/__init__.py +++ b/synapse/federation/transport/__init__.py @@ -20,55 +20,3 @@ By default this is done over HTTPS (and all home servers are required to support HTTPS), however individual pairings of servers may decide to communicate over a different (albeit still reliable) protocol. """ - -from .server import TransportLayerServer -from .client import TransportLayerClient - -from synapse.util.ratelimitutils import FederationRateLimiter - - -class TransportLayer(TransportLayerServer, TransportLayerClient): - """This is a basic implementation of the transport layer that translates - transactions and other requests to/from HTTP. - - Attributes: - server_name (str): Local home server host - - server (synapse.http.server.HttpServer): the http server to - register listeners on - - client (synapse.http.client.HttpClient): the http client used to - send requests - - request_handler (TransportRequestHandler): The handler to fire when we - receive requests for data. - - received_handler (TransportReceivedHandler): The handler to fire when - we receive data. - """ - - def __init__(self, homeserver, server_name, server, client): - """ - Args: - server_name (str): Local home server host - server (synapse.protocol.http.HttpServer): the http server to - register listeners on - client (synapse.protocol.http.HttpClient): the http client used to - send requests - """ - self.keyring = homeserver.get_keyring() - self.clock = homeserver.get_clock() - self.server_name = server_name - self.server = server - self.client = client - self.request_handler = None - self.received_handler = None - - self.ratelimiter = FederationRateLimiter( - self.clock, - window_size=homeserver.config.federation_rc_window_size, - sleep_limit=homeserver.config.federation_rc_sleep_limit, - sleep_msec=homeserver.config.federation_rc_sleep_delay, - reject_limit=homeserver.config.federation_rc_reject_limit, - concurrent_requests=homeserver.config.federation_rc_concurrent, - ) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 949d01dea8..2b5d40ea7f 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -28,6 +28,10 @@ logger = logging.getLogger(__name__) class TransportLayerClient(object): """Sends federation HTTP requests to other servers""" + def __init__(self, hs): + self.server_name = hs.hostname + self.client = hs.get_http_client() + @log_function def get_room_state(self, destination, room_id, event_id): """ Requests all state for a given room from the given server at the diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 8dca0a7f6b..65e054f7dd 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -17,7 +17,8 @@ from twisted.internet import defer from synapse.api.urls import FEDERATION_PREFIX as PREFIX from synapse.api.errors import Codes, SynapseError -from synapse.util.logutils import log_function +from synapse.http.server import JsonResource +from synapse.util.ratelimitutils import FederationRateLimiter import functools import logging @@ -28,9 +29,41 @@ import re logger = logging.getLogger(__name__) -class TransportLayerServer(object): +class TransportLayerServer(JsonResource): """Handles incoming federation HTTP requests""" + def __init__(self, hs): + self.hs = hs + self.clock = hs.get_clock() + + super(TransportLayerServer, self).__init__(hs) + + self.authenticator = Authenticator(hs) + self.ratelimiter = FederationRateLimiter( + self.clock, + window_size=hs.config.federation_rc_window_size, + sleep_limit=hs.config.federation_rc_sleep_limit, + sleep_msec=hs.config.federation_rc_sleep_delay, + reject_limit=hs.config.federation_rc_reject_limit, + concurrent_requests=hs.config.federation_rc_concurrent, + ) + + self.register_servlets() + + def register_servlets(self): + register_servlets( + self.hs, + resource=self, + ratelimiter=self.ratelimiter, + authenticator=self.authenticator, + ) + + +class Authenticator(object): + def __init__(self, hs): + self.keyring = hs.get_keyring() + self.server_name = hs.hostname + # A method just so we can pass 'self' as the authenticator to the Servlets @defer.inlineCallbacks def authenticate_request(self, request): @@ -98,37 +131,9 @@ class TransportLayerServer(object): defer.returnValue((origin, content)) - @log_function - def register_received_handler(self, handler): - """ Register a handler that will be fired when we receive data. - - Args: - handler (TransportReceivedHandler) - """ - FederationSendServlet( - handler, - authenticator=self, - ratelimiter=self.ratelimiter, - server_name=self.server_name, - ).register(self.server) - - @log_function - def register_request_handler(self, handler): - """ Register a handler that will be fired when we get asked for data. - - Args: - handler (TransportRequestHandler) - """ - for servletclass in SERVLET_CLASSES: - servletclass( - handler, - authenticator=self, - ratelimiter=self.ratelimiter, - ).register(self.server) - class BaseFederationServlet(object): - def __init__(self, handler, authenticator, ratelimiter): + def __init__(self, handler, authenticator, ratelimiter, server_name): self.handler = handler self.authenticator = authenticator self.ratelimiter = ratelimiter @@ -172,7 +177,9 @@ class FederationSendServlet(BaseFederationServlet): PATH = "/send/([^/]*)/" def __init__(self, handler, server_name, **kwargs): - super(FederationSendServlet, self).__init__(handler, **kwargs) + super(FederationSendServlet, self).__init__( + handler, server_name=server_name, **kwargs + ) self.server_name = server_name # This is when someone is trying to send us a bunch of data. @@ -432,6 +439,7 @@ class On3pidBindServlet(BaseFederationServlet): SERVLET_CLASSES = ( + FederationSendServlet, FederationPullServlet, FederationEventServlet, FederationStateServlet, @@ -451,3 +459,13 @@ SERVLET_CLASSES = ( FederationThirdPartyInviteExchangeServlet, On3pidBindServlet, ) + + +def register_servlets(hs, resource, authenticator, ratelimiter): + for servletclass in SERVLET_CLASSES: + servletclass( + handler=hs.get_replication_layer(), + authenticator=authenticator, + ratelimiter=ratelimiter, + server_name=hs.hostname, + ).register(resource) diff --git a/synapse/server.py b/synapse/server.py index 4a5796b982..a59e46ca2d 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -20,6 +20,8 @@ # Imports required for the default HomeServer() implementation from twisted.web.client import BrowserLikePolicyForHTTPS +from twisted.enterprise import adbapi + from synapse.federation import initialize_http_replication from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory from synapse.notifier import Notifier @@ -36,8 +38,10 @@ from synapse.push.pusherpool import PusherPool from synapse.events.builder import EventBuilderFactory from synapse.api.filtering import Filtering +from synapse.http.matrixfederationclient import MatrixFederationHttpClient -class BaseHomeServer(object): + +class HomeServer(object): """A basic homeserver object without lazy component builders. This will need all of the components it requires to either be passed as @@ -102,36 +106,6 @@ class BaseHomeServer(object): for depname in kwargs: setattr(self, depname, kwargs[depname]) - @classmethod - def _make_dependency_method(cls, depname): - def _get(self): - if hasattr(self, depname): - return getattr(self, depname) - - if hasattr(self, "build_%s" % (depname)): - # Prevent cyclic dependencies from deadlocking - if depname in self._building: - raise ValueError("Cyclic dependency while building %s" % ( - depname, - )) - self._building[depname] = 1 - - builder = getattr(self, "build_%s" % (depname)) - dep = builder() - setattr(self, depname, dep) - - del self._building[depname] - - return dep - - raise NotImplementedError( - "%s has no %s nor a builder for it" % ( - type(self).__name__, depname, - ) - ) - - setattr(BaseHomeServer, "get_%s" % (depname), _get) - def get_ip_from_request(self, request): # X-Forwarded-For is handled by our custom request type. return request.getClientIP() @@ -142,24 +116,6 @@ class BaseHomeServer(object): def is_mine_id(self, string): return string.split(":", 1)[1] == self.hostname -# Build magic accessors for every dependency -for depname in BaseHomeServer.DEPENDENCIES: - BaseHomeServer._make_dependency_method(depname) - - -class HomeServer(BaseHomeServer): - """A homeserver object that will construct most of its dependencies as - required. - - It still requires the following to be specified by the caller: - resource_for_client - resource_for_web_client - resource_for_federation - resource_for_content_repo - http_client - db_pool - """ - def build_clock(self): return Clock() @@ -224,3 +180,55 @@ class HomeServer(BaseHomeServer): def build_pusherpool(self): return PusherPool(self) + + def build_http_client(self): + return MatrixFederationHttpClient(self) + + def build_db_pool(self): + name = self.db_config["name"] + + return adbapi.ConnectionPool( + name, + **self.db_config.get("args", {}) + ) + + +def _make_dependency_method(depname): + def _get(hs): + try: + return getattr(hs, depname) + except AttributeError: + pass + + try: + builder = getattr(hs, "build_%s" % (depname)) + except AttributeError: + builder = None + + if builder: + # Prevent cyclic dependencies from deadlocking + if depname in hs._building: + raise ValueError("Cyclic dependency while building %s" % ( + depname, + )) + hs._building[depname] = 1 + + dep = builder() + setattr(hs, depname, dep) + + del hs._building[depname] + + return dep + + raise NotImplementedError( + "%s has no %s nor a builder for it" % ( + type(hs).__name__, depname, + ) + ) + + setattr(HomeServer, "get_%s" % (depname), _get) + + +# Build magic accessors for every dependency +for depname in HomeServer.DEPENDENCIES: + _make_dependency_method(depname) diff --git a/tests/federation/__init__.py b/tests/federation/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/federation/test_federation.py b/tests/federation/test_federation.py deleted file mode 100644 index f2c2ee4127..0000000000 --- a/tests/federation/test_federation.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# trial imports -from twisted.internet import defer -from tests import unittest - -# python imports -from mock import Mock, ANY - -from ..utils import MockHttpResource, MockClock, setup_test_homeserver - -from synapse.federation import initialize_http_replication -from synapse.events import FrozenEvent - - -def make_pdu(prev_pdus=[], **kwargs): - """Provide some default fields for making a PduTuple.""" - pdu_fields = { - "state_key": None, - "prev_events": prev_pdus, - } - pdu_fields.update(kwargs) - - return FrozenEvent(pdu_fields) - - -class FederationTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - self.mock_resource = MockHttpResource() - self.mock_http_client = Mock(spec=[ - "get_json", - "put_json", - ]) - self.mock_persistence = Mock(spec=[ - "prep_send_transaction", - "delivered_txn", - "get_received_txn_response", - "set_received_txn_response", - "get_destination_retry_timings", - "get_auth_chain", - ]) - self.mock_persistence.get_received_txn_response.return_value = ( - defer.succeed(None) - ) - - retry_timings_res = { - "destination": "", - "retry_last_ts": 0, - "retry_interval": 0, - } - self.mock_persistence.get_destination_retry_timings.return_value = ( - defer.succeed(retry_timings_res) - ) - self.mock_persistence.get_auth_chain.return_value = [] - self.clock = MockClock() - hs = yield setup_test_homeserver( - resource_for_federation=self.mock_resource, - http_client=self.mock_http_client, - datastore=self.mock_persistence, - clock=self.clock, - keyring=Mock(), - ) - self.federation = initialize_http_replication(hs) - self.distributor = hs.get_distributor() - - @defer.inlineCallbacks - def test_get_state(self): - mock_handler = Mock(spec=[ - "get_state_for_pdu", - ]) - - self.federation.set_handler(mock_handler) - - mock_handler.get_state_for_pdu.return_value = defer.succeed([]) - - # Empty context initially - (code, response) = yield self.mock_resource.trigger( - "GET", - "/_matrix/federation/v1/state/my-context/", - None - ) - self.assertEquals(200, code) - self.assertFalse(response["pdus"]) - - # Now lets give the context some state - mock_handler.get_state_for_pdu.return_value = ( - defer.succeed([ - make_pdu( - event_id="the-pdu-id", - origin="red", - user_id="@a:red", - room_id="my-context", - type="m.topic", - origin_server_ts=123456789000, - depth=1, - content={"topic": "The topic"}, - state_key="", - power_level=1000, - prev_state="last-pdu-id", - ), - ]) - ) - - (code, response) = yield self.mock_resource.trigger( - "GET", - "/_matrix/federation/v1/state/my-context/", - None - ) - self.assertEquals(200, code) - self.assertEquals(1, len(response["pdus"])) - - @defer.inlineCallbacks - def test_get_pdu(self): - mock_handler = Mock(spec=[ - "get_persisted_pdu", - ]) - - self.federation.set_handler(mock_handler) - - mock_handler.get_persisted_pdu.return_value = ( - defer.succeed(None) - ) - - (code, response) = yield self.mock_resource.trigger( - "GET", - "/_matrix/federation/v1/event/abc123def456/", - None - ) - self.assertEquals(404, code) - - # Now insert such a PDU - mock_handler.get_persisted_pdu.return_value = ( - defer.succeed( - make_pdu( - event_id="abc123def456", - origin="red", - user_id="@a:red", - room_id="my-context", - type="m.text", - origin_server_ts=123456789001, - depth=1, - content={"text": "Here is the message"}, - ) - ) - ) - - (code, response) = yield self.mock_resource.trigger( - "GET", - "/_matrix/federation/v1/event/abc123def456/", - None - ) - self.assertEquals(200, code) - self.assertEquals(1, len(response["pdus"])) - self.assertEquals("m.text", response["pdus"][0]["type"]) - - @defer.inlineCallbacks - def test_send_pdu(self): - self.mock_http_client.put_json.return_value = defer.succeed( - (200, "OK") - ) - - pdu = make_pdu( - event_id="abc123def456", - origin="red", - user_id="@a:red", - room_id="my-context", - type="m.text", - origin_server_ts=123456789001, - depth=1, - content={"text": "Here is the message"}, - ) - - yield self.federation.send_pdu(pdu, ["remote"]) - - self.mock_http_client.put_json.assert_called_with( - "remote", - path="/_matrix/federation/v1/send/1000000/", - data={ - "origin_server_ts": 1000000, - "origin": "test", - "pdus": [ - pdu.get_pdu_json(), - ], - 'pdu_failures': [], - }, - json_data_callback=ANY, - long_retries=True, - ) - - @defer.inlineCallbacks - def test_send_edu(self): - self.mock_http_client.put_json.return_value = defer.succeed( - (200, "OK") - ) - - yield self.federation.send_edu( - destination="remote", - edu_type="m.test", - content={"testing": "content here"}, - ) - - # MockClock ensures we can guess these timestamps - self.mock_http_client.put_json.assert_called_with( - "remote", - path="/_matrix/federation/v1/send/1000000/", - data={ - "origin": "test", - "origin_server_ts": 1000000, - "pdus": [], - "edus": [ - { - "edu_type": "m.test", - "content": {"testing": "content here"}, - } - ], - 'pdu_failures': [], - }, - json_data_callback=ANY, - long_retries=True, - ) - - @defer.inlineCallbacks - def test_recv_edu(self): - recv_observer = Mock() - recv_observer.return_value = defer.succeed(()) - - self.federation.register_edu_handler("m.test", recv_observer) - - yield self.mock_resource.trigger( - "PUT", - "/_matrix/federation/v1/send/1001000/", - """{ - "origin": "remote", - "origin_server_ts": 1001000, - "pdus": [], - "edus": [ - { - "origin": "remote", - "destination": "test", - "edu_type": "m.test", - "content": {"testing": "reply here"} - } - ] - }""" - ) - - recv_observer.assert_called_with( - "remote", {"testing": "reply here"} - ) - - @defer.inlineCallbacks - def test_send_query(self): - self.mock_http_client.get_json.return_value = defer.succeed( - {"your": "response"} - ) - - response = yield self.federation.make_query( - destination="remote", - query_type="a-question", - args={"one": "1", "two": "2"}, - ) - - self.assertEquals({"your": "response"}, response) - - self.mock_http_client.get_json.assert_called_with( - destination="remote", - path="/_matrix/federation/v1/query/a-question", - args={"one": "1", "two": "2"}, - retry_on_dns_fail=True, - ) - - @defer.inlineCallbacks - def test_recv_query(self): - recv_handler = Mock() - recv_handler.return_value = defer.succeed({"another": "response"}) - - self.federation.register_query_handler("a-question", recv_handler) - - code, response = yield self.mock_resource.trigger( - "GET", - "/_matrix/federation/v1/query/a-question?three=3&four=4", - None - ) - - self.assertEquals(200, code) - self.assertEquals({"another": "response"}, response) - - recv_handler.assert_called_with( - {"three": "3", "four": "4"} - ) diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py index 90b911f879..8d7cfd79ab 100644 --- a/tests/rest/client/v1/test_presence.py +++ b/tests/rest/client/v1/test_presence.py @@ -280,6 +280,15 @@ class PresenceEventStreamTestCase(unittest.TestCase): } EventSources.SOURCE_TYPES["presence"] = PresenceEventSource + clock = Mock(spec=[ + "call_later", + "cancel_call_later", + "time_msec", + "looping_call", + ]) + + clock.time_msec.return_value = 1000000 + hs = yield setup_test_homeserver( http_client=None, resource_for_client=self.mock_resource, @@ -289,16 +298,9 @@ class PresenceEventStreamTestCase(unittest.TestCase): "get_presence_list", "get_rooms_for_user", ]), - clock=Mock(spec=[ - "call_later", - "cancel_call_later", - "time_msec", - "looping_call", - ]), + clock=clock, ) - hs.get_clock().time_msec.return_value = 1000000 - def _get_user_by_req(req=None, allow_guest=False): return Requester(UserID.from_string(myid), "", False) diff --git a/tests/test_types.py b/tests/test_types.py index b9534329e6..24d61dbe54 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -16,10 +16,10 @@ from tests import unittest from synapse.api.errors import SynapseError -from synapse.server import BaseHomeServer +from synapse.server import HomeServer from synapse.types import UserID, RoomAlias -mock_homeserver = BaseHomeServer(hostname="my.domain") +mock_homeserver = HomeServer(hostname="my.domain") class UserIDTestCase(unittest.TestCase): @@ -34,7 +34,6 @@ class UserIDTestCase(unittest.TestCase): with self.assertRaises(SynapseError): UserID.from_string("") - def test_build(self): user = UserID("5678efgh", "my.domain") diff --git a/tests/utils.py b/tests/utils.py index 358b5b72b7..d75d492cb5 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -19,6 +19,8 @@ from synapse.api.constants import EventTypes from synapse.storage.prepare_database import prepare_database from synapse.storage.engines import create_engine from synapse.server import HomeServer +from synapse.federation.transport import server +from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.logcontext import LoggingContext @@ -80,6 +82,22 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): hs.build_handlers = swap_out_hash_for_testing(hs.build_handlers) + fed = kargs.get("resource_for_federation", None) + if fed: + server.register_servlets( + hs, + resource=fed, + authenticator=server.Authenticator(hs), + ratelimiter=FederationRateLimiter( + hs.get_clock(), + window_size=hs.config.federation_rc_window_size, + sleep_limit=hs.config.federation_rc_sleep_limit, + sleep_msec=hs.config.federation_rc_sleep_delay, + reject_limit=hs.config.federation_rc_reject_limit, + concurrent_requests=hs.config.federation_rc_concurrent + ), + ) + defer.returnValue(hs) From 87f9477b105b4e8216d1df186492ec6d9872967f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Jan 2016 15:51:06 +0000 Subject: [PATCH 016/349] Add a Homeserver.setup method. This is for setting up dependencies that require work on startup. This is useful for the DataStore that wants to read a bunch from the database before initiliazing. --- synapse/app/homeserver.py | 33 +++++++++++------- synapse/server.py | 32 +++++++++-------- synapse/storage/__init__.py | 45 +++++++++++++++++++++--- synapse/storage/_base.py | 49 +++++++++++++-------------- synapse/storage/events.py | 14 +++----- synapse/storage/receipts.py | 8 +++-- synapse/storage/stream.py | 13 ------- synapse/storage/tags.py | 7 ---- synapse/storage/util/id_generators.py | 36 +++++--------------- 9 files changed, 121 insertions(+), 116 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 795c655ae3..fb76be58a2 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -254,6 +254,17 @@ class SynapseHomeServer(HomeServer): except IncorrectDatabaseSetup as e: quit_with_error(e.message) + def get_db_conn(self): + db_conn = self.database_engine.module.connect( + **{ + k: v for k, v in self.db_config.get("args", {}).items() + if not k.startswith("cp_") + } + ) + + self.database_engine.on_new_connection(db_conn) + return db_conn + def quit_with_error(error_string): message_lines = error_string.split("\n") @@ -390,13 +401,7 @@ def setup(config_options): logger.info("Preparing database: %s...", config.database_config['name']) try: - db_conn = database_engine.module.connect( - **{ - k: v for k, v in config.database_config.get("args", {}).items() - if not k.startswith("cp_") - } - ) - + db_conn = hs.get_db_conn() database_engine.prepare_database(db_conn) hs.run_startup_checks(db_conn, database_engine) @@ -411,13 +416,17 @@ def setup(config_options): logger.info("Database prepared in %s.", config.database_config['name']) + hs.setup() hs.start_listening() - hs.get_pusherpool().start() - hs.get_state_handler().start_caching() - hs.get_datastore().start_profiling() - hs.get_datastore().start_doing_background_updates() - hs.get_replication_layer().start_get_pdu_cache() + def start(): + hs.get_pusherpool().start() + hs.get_state_handler().start_caching() + hs.get_datastore().start_profiling() + hs.get_datastore().start_doing_background_updates() + hs.get_replication_layer().start_get_pdu_cache() + + reactor.callWhenRunning(start) return hs diff --git a/synapse/server.py b/synapse/server.py index a59e46ca2d..006e91b37c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -21,6 +21,7 @@ # Imports required for the default HomeServer() implementation from twisted.web.client import BrowserLikePolicyForHTTPS from twisted.enterprise import adbapi +from twisted.internet import defer from synapse.federation import initialize_http_replication from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory @@ -28,7 +29,7 @@ from synapse.notifier import Notifier from synapse.api.auth import Auth from synapse.handlers import Handlers from synapse.state import StateHandler -from synapse.storage import DataStore +from synapse.storage import get_datastore from synapse.util import Clock from synapse.util.distributor import Distributor from synapse.streams.events import EventSources @@ -40,6 +41,11 @@ from synapse.api.filtering import Filtering from synapse.http.matrixfederationclient import MatrixFederationHttpClient +import logging + + +logger = logging.getLogger(__name__) + class HomeServer(object): """A basic homeserver object without lazy component builders. @@ -102,10 +108,19 @@ class HomeServer(object): self.hostname = hostname self._building = {} + self.clock = Clock() + self.distributor = Distributor() + self.ratelimiter = Ratelimiter() + # Other kwargs are explicit dependencies for depname in kwargs: setattr(self, depname, kwargs[depname]) + def setup(self): + logger.info("Setting up.") + self.datastore = get_datastore(self) + logger.info("Finished setting up.") + def get_ip_from_request(self, request): # X-Forwarded-For is handled by our custom request type. return request.getClientIP() @@ -116,15 +131,9 @@ class HomeServer(object): def is_mine_id(self, string): return string.split(":", 1)[1] == self.hostname - def build_clock(self): - return Clock() - def build_replication_layer(self): return initialize_http_replication(self) - def build_datastore(self): - return DataStore(self) - def build_handlers(self): return Handlers(self) @@ -135,10 +144,9 @@ class HomeServer(object): return Auth(self) def build_http_client_context_factory(self): - config = self.get_config() return ( InsecureInterceptableContextFactory() - if config.use_insecure_ssl_client_just_for_testing_do_not_use + if self.config.use_insecure_ssl_client_just_for_testing_do_not_use else BrowserLikePolicyForHTTPS() ) @@ -157,15 +165,9 @@ class HomeServer(object): def build_state_handler(self): return StateHandler(self) - def build_distributor(self): - return Distributor() - def build_event_sources(self): return EventSources(self) - def build_ratelimiter(self): - return Ratelimiter() - def build_keyring(self): return Keyring(self) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 7a3f6c4662..c8cab45f77 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -46,6 +46,9 @@ from .tags import TagsStore from .account_data import AccountDataStore +from util.id_generators import IdGenerator, StreamIdGenerator + + import logging @@ -58,6 +61,22 @@ logger = logging.getLogger(__name__) LAST_SEEN_GRANULARITY = 120*1000 +def get_datastore(hs): + logger.info("getting called!") + + conn = hs.get_db_conn() + try: + cur = conn.cursor() + cur.execute("SELECT MIN(stream_ordering) FROM events",) + rows = cur.fetchall() + min_token = rows[0][0] if rows and rows[0] and rows[0][0] else -1 + min_token = min(min_token, -1) + + return DataStore(conn, hs, min_token) + finally: + conn.close() + + class DataStore(RoomMemberStore, RoomStore, RegistrationStore, StreamStore, ProfileStore, PresenceStore, TransactionStore, @@ -79,18 +98,36 @@ class DataStore(RoomMemberStore, RoomStore, EventPushActionsStore ): - def __init__(self, hs): - super(DataStore, self).__init__(hs) + def __init__(self, db_conn, hs, min_stream_token): self.hs = hs - self.min_token_deferred = self._get_min_token() - self.min_token = None + self.min_stream_token = min_stream_token self.client_ip_last_seen = Cache( name="client_ip_last_seen", keylen=4, ) + self._stream_id_gen = StreamIdGenerator( + db_conn, "events", "stream_ordering" + ) + self._receipts_id_gen = StreamIdGenerator( + db_conn, "receipts_linearized", "stream_id" + ) + self._account_data_id_gen = StreamIdGenerator( + db_conn, "account_data_max_stream_id", "stream_id" + ) + + self._transaction_id_gen = IdGenerator("sent_transactions", "id", self) + self._state_groups_id_gen = IdGenerator("state_groups", "id", self) + self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self) + self._refresh_tokens_id_gen = IdGenerator("refresh_tokens", "id", self) + self._pushers_id_gen = IdGenerator("pushers", "id", self) + self._push_rule_id_gen = IdGenerator("push_rules", "id", self) + self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) + + super(DataStore, self).__init__(hs) + @defer.inlineCallbacks def insert_client_ip(self, user, access_token, ip, user_agent): now = int(self._clock.time_msec()) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 90d7aee94a..5e77320540 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -15,13 +15,11 @@ import logging from synapse.api.errors import StoreError -from synapse.util.logutils import log_function from synapse.util.logcontext import preserve_context_over_fn, LoggingContext from synapse.util.caches.dictionary_cache import DictionaryCache from synapse.util.caches.descriptors import Cache import synapse.metrics -from util.id_generators import IdGenerator, StreamIdGenerator from twisted.internet import defer @@ -175,16 +173,6 @@ class SQLBaseStore(object): self.database_engine = hs.database_engine - self._stream_id_gen = StreamIdGenerator("events", "stream_ordering") - self._transaction_id_gen = IdGenerator("sent_transactions", "id", self) - self._state_groups_id_gen = IdGenerator("state_groups", "id", self) - self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self) - self._refresh_tokens_id_gen = IdGenerator("refresh_tokens", "id", self) - self._pushers_id_gen = IdGenerator("pushers", "id", self) - self._push_rule_id_gen = IdGenerator("push_rules", "id", self) - self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) - self._receipts_id_gen = StreamIdGenerator("receipts_linearized", "stream_id") - def start_profiling(self): self._previous_loop_ts = self._clock.time_msec() @@ -345,7 +333,8 @@ class SQLBaseStore(object): defer.returnValue(result) - def cursor_to_dict(self, cursor): + @staticmethod + def cursor_to_dict(cursor): """Converts a SQL cursor into an list of dicts. Args: @@ -402,8 +391,8 @@ class SQLBaseStore(object): if not or_ignore: raise - @log_function - def _simple_insert_txn(self, txn, table, values): + @staticmethod + def _simple_insert_txn(txn, table, values): keys, vals = zip(*values.items()) sql = "INSERT INTO %s (%s) VALUES(%s)" % ( @@ -414,7 +403,8 @@ class SQLBaseStore(object): txn.execute(sql, vals) - def _simple_insert_many_txn(self, txn, table, values): + @staticmethod + def _simple_insert_many_txn(txn, table, values): if not values: return @@ -537,9 +527,10 @@ class SQLBaseStore(object): table, keyvalues, retcol, allow_none=allow_none, ) - def _simple_select_one_onecol_txn(self, txn, table, keyvalues, retcol, + @classmethod + def _simple_select_one_onecol_txn(cls, txn, table, keyvalues, retcol, allow_none=False): - ret = self._simple_select_onecol_txn( + ret = cls._simple_select_onecol_txn( txn, table=table, keyvalues=keyvalues, @@ -554,7 +545,8 @@ class SQLBaseStore(object): else: raise StoreError(404, "No row found") - def _simple_select_onecol_txn(self, txn, table, keyvalues, retcol): + @staticmethod + def _simple_select_onecol_txn(txn, table, keyvalues, retcol): sql = ( "SELECT %(retcol)s FROM %(table)s WHERE %(where)s" ) % { @@ -603,7 +595,8 @@ class SQLBaseStore(object): table, keyvalues, retcols ) - def _simple_select_list_txn(self, txn, table, keyvalues, retcols): + @classmethod + def _simple_select_list_txn(cls, txn, table, keyvalues, retcols): """Executes a SELECT query on the named table, which may return zero or more rows, returning the result as a list of dicts. @@ -627,7 +620,7 @@ class SQLBaseStore(object): ) txn.execute(sql) - return self.cursor_to_dict(txn) + return cls.cursor_to_dict(txn) @defer.inlineCallbacks def _simple_select_many_batch(self, table, column, iterable, retcols, @@ -662,7 +655,8 @@ class SQLBaseStore(object): defer.returnValue(results) - def _simple_select_many_txn(self, txn, table, column, iterable, keyvalues, retcols): + @classmethod + def _simple_select_many_txn(cls, txn, table, column, iterable, keyvalues, retcols): """Executes a SELECT query on the named table, which may return zero or more rows, returning the result as a list of dicts. @@ -699,7 +693,7 @@ class SQLBaseStore(object): ) txn.execute(sql, values) - return self.cursor_to_dict(txn) + return cls.cursor_to_dict(txn) def _simple_update_one(self, table, keyvalues, updatevalues, desc="_simple_update_one"): @@ -726,7 +720,8 @@ class SQLBaseStore(object): table, keyvalues, updatevalues, ) - def _simple_update_one_txn(self, txn, table, keyvalues, updatevalues): + @staticmethod + def _simple_update_one_txn(txn, table, keyvalues, updatevalues): update_sql = "UPDATE %s SET %s WHERE %s" % ( table, ", ".join("%s = ?" % (k,) for k in updatevalues), @@ -743,7 +738,8 @@ class SQLBaseStore(object): if txn.rowcount > 1: raise StoreError(500, "More than one row matched") - def _simple_select_one_txn(self, txn, table, keyvalues, retcols, + @staticmethod + def _simple_select_one_txn(txn, table, keyvalues, retcols, allow_none=False): select_sql = "SELECT %s FROM %s WHERE %s" % ( ", ".join(retcols), @@ -784,7 +780,8 @@ class SQLBaseStore(object): raise StoreError(500, "more than one row matched") return self.runInteraction(desc, func) - def _simple_delete_txn(self, txn, table, keyvalues): + @staticmethod + def _simple_delete_txn(txn, table, keyvalues): sql = "DELETE FROM %s WHERE %s" % ( table, " AND ".join("%s = ?" % (k, ) for k in keyvalues) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index ba368a3eca..298cb9bada 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -66,11 +66,9 @@ class EventsStore(SQLBaseStore): return if backfilled: - if not self.min_token_deferred.called: - yield self.min_token_deferred - start = self.min_token - 1 - self.min_token -= len(events_and_contexts) + 1 - stream_orderings = range(start, self.min_token, -1) + start = self.min_stream_token - 1 + self.min_stream_token -= len(events_and_contexts) + 1 + stream_orderings = range(start, self.min_stream_token, -1) @contextmanager def stream_ordering_manager(): @@ -107,10 +105,8 @@ class EventsStore(SQLBaseStore): is_new_state=True, current_state=None): stream_ordering = None if backfilled: - if not self.min_token_deferred.called: - yield self.min_token_deferred - self.min_token -= 1 - stream_ordering = self.min_token + self.min_stream_token -= 1 + stream_ordering = self.min_stream_token if stream_ordering is None: stream_ordering_manager = yield self._stream_id_gen.get_next(self) diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index c4232bdc65..c0593e23ee 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -31,7 +31,9 @@ class ReceiptsStore(SQLBaseStore): def __init__(self, hs): super(ReceiptsStore, self).__init__(hs) - self._receipts_stream_cache = _RoomStreamChangeCache() + self._receipts_stream_cache = _RoomStreamChangeCache( + self._receipts_id_gen.get_max_token(None) + ) @cached(num_args=2) def get_receipts_for_room(self, room_id, receipt_type): @@ -377,11 +379,11 @@ class _RoomStreamChangeCache(object): may have changed since that key. If the key is too old then the cache will simply return all rooms. """ - def __init__(self, size_of_cache=10000): + def __init__(self, current_key, size_of_cache=10000): self._size_of_cache = size_of_cache self._room_to_key = {} self._cache = sorteddict() - self._earliest_key = None + self._earliest_key = current_key self.name = "ReceiptsRoomChangeCache" caches_by_name[self.name] = self._cache diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 02b1913e26..e31bad258a 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -444,19 +444,6 @@ class StreamStore(SQLBaseStore): rows = txn.fetchall() return rows[0][0] if rows else 0 - @defer.inlineCallbacks - def _get_min_token(self): - row = yield self._execute( - "_get_min_token", None, "SELECT MIN(stream_ordering) FROM events" - ) - - self.min_token = row[0][0] if row and row[0] and row[0][0] else -1 - self.min_token = min(self.min_token, -1) - - logger.debug("min_token is: %s", self.min_token) - - defer.returnValue(self.min_token) - @staticmethod def _set_before_and_after(events, rows): for event, row in zip(events, rows): diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index ed9c91e5ea..4c39e07cbd 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -16,7 +16,6 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached from twisted.internet import defer -from .util.id_generators import StreamIdGenerator import ujson as json import logging @@ -25,12 +24,6 @@ logger = logging.getLogger(__name__) class TagsStore(SQLBaseStore): - def __init__(self, hs): - super(TagsStore, self).__init__(hs) - - self._account_data_id_gen = StreamIdGenerator( - "account_data_max_stream_id", "stream_id" - ) def get_max_account_data_stream_id(self): """Get the current max stream id for the private user data stream diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index f58bf7fd2c..5c522f4ab9 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -72,28 +72,24 @@ class StreamIdGenerator(object): with stream_id_gen.get_next_txn(txn) as stream_id: # ... persist event ... """ - def __init__(self, table, column): + def __init__(self, db_conn, table, column): self.table = table self.column = column self._lock = threading.Lock() - self._current_max = None + cur = db_conn.cursor() + self._current_max = self._get_or_compute_current_max(cur) + cur.close() + self._unfinished_ids = deque() - @defer.inlineCallbacks def get_next(self, store): """ Usage: with yield stream_id_gen.get_next as stream_id: # ... persist event ... """ - if not self._current_max: - yield store.runInteraction( - "_compute_current_max", - self._get_or_compute_current_max, - ) - with self._lock: self._current_max += 1 next_id = self._current_max @@ -108,21 +104,14 @@ class StreamIdGenerator(object): with self._lock: self._unfinished_ids.remove(next_id) - defer.returnValue(manager()) + return manager() - @defer.inlineCallbacks def get_next_mult(self, store, n): """ Usage: with yield stream_id_gen.get_next(store, n) as stream_ids: # ... persist events ... """ - if not self._current_max: - yield store.runInteraction( - "_compute_current_max", - self._get_or_compute_current_max, - ) - with self._lock: next_ids = range(self._current_max + 1, self._current_max + n + 1) self._current_max += n @@ -139,24 +128,17 @@ class StreamIdGenerator(object): for next_id in next_ids: self._unfinished_ids.remove(next_id) - defer.returnValue(manager()) + return manager() - @defer.inlineCallbacks def get_max_token(self, store): """Returns the maximum stream id such that all stream ids less than or equal to it have been successfully persisted. """ - if not self._current_max: - yield store.runInteraction( - "_compute_current_max", - self._get_or_compute_current_max, - ) - with self._lock: if self._unfinished_ids: - defer.returnValue(self._unfinished_ids[0] - 1) + return self._unfinished_ids[0] - 1 - defer.returnValue(self._current_max) + return self._current_max def _get_or_compute_current_max(self, txn): with self._lock: From 9fda8b519358a4bf2702b24a45a7744b399ef127 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Jan 2016 18:27:23 +0000 Subject: [PATCH 017/349] Don't turn on profiling --- synapse/app/homeserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 795c655ae3..f881a8d262 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -642,7 +642,7 @@ def _resource_id(resource, path_seg): def run(hs): - PROFILE_SYNAPSE = True + PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile From 8c94833b72f27d18a57e424a0f4f0c823c3a0aa1 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 27 Jan 2016 10:24:20 +0000 Subject: [PATCH 018/349] Fix adding push rules relative to other rules --- synapse/rest/client/v1/push_rule.py | 15 ++++++++++----- synapse/storage/push_rule.py | 3 ++- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index cb3ec23872..96633a176c 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -66,11 +66,12 @@ class PushRuleRestServlet(ClientV1RestServlet): raise SynapseError(400, e.message) before = request.args.get("before", None) - if before and len(before): - before = before[0] + if before: + before = _namespaced_rule_id(spec, before[0]) + after = request.args.get("after", None) - if after and len(after): - after = after[0] + if after: + after = _namespaced_rule_id(spec, after[0]) try: yield self.hs.get_datastore().add_push_rule( @@ -452,11 +453,15 @@ def _strip_device_condition(rule): def _namespaced_rule_id_from_spec(spec): + return _namespaced_rule_id(spec, spec['rule_id']) + + +def _namespaced_rule_id(spec, rule_id): if spec['scope'] == 'global': scope = 'global' else: scope = 'device/%s' % (spec['profile_tag']) - return "%s/%s/%s" % (scope, spec['template'], spec['rule_id']) + return "%s/%s/%s" % (scope, spec['template'], rule_id) def _rule_id_from_namespaced(in_rule_id): diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 1f51c90ee5..f9a48171ba 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -130,7 +130,8 @@ class PushRuleStore(SQLBaseStore): def _add_push_rule_relative_txn(self, txn, user_id, **kwargs): after = kwargs.pop("after", None) - relative_to_rule = kwargs.pop("before", after) + before = kwargs.pop("before", None) + relative_to_rule = before or after res = self._simple_select_one_txn( txn, From 03f4569dc3ee6379778633f8c295edde9f2a212c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 10:35:30 +0000 Subject: [PATCH 019/349] Bump version and changelog --- CHANGES.rst | 45 +++++++++++++++++++++++++++++++++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index cb317c6a8b..dd4863c3e6 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,48 @@ +Changes in synapse v0.12.1-rc1 (2016-01-xx) +=========================================== + +Features: + +* Add unread notification counts in ``/sync`` (PR #456) +* Add support for inviting 3pids in ``/createRoom`` (PR #460) +* Add ability for guest accounts to upgrade (PR #462) +* Add ``/versions`` API (PR #468) +* Add ``event`` to ``/context`` API (PR #492) +* Add specific error code for invalid user names in ``/register`` (PR #499) +* Add support for push badge counts (PR #507) +* Add support for non-guest users to peek in rooms using ``/events`` (PR #510) + +Changes: + +* Change ``/sync`` so that guest users only get rooms they've joined (PR #469) +* Change to require unbanning before other membership changes (PR #501) +* Change default push rules to notify for all messages (PR #486) +* Change default push rules to not notify on membership changes (PR #514) +* Change default push rules to only notify messages in one to one rooms + (PR #529) +* Change ``/sync`` to reject requests with a ``from`` query param (PR #512) +* Change server manhole to use SSH rather than telnet (PR #473) +* Change server to require AS users to be registered before use (PR #487) +* Change server not to start when ASes are invalidly configured (PR #494) +* Change server to require ID and ``as_token`` to be unique for AS's (PR #496) +* Change maximum pagination limit to 1000 (PR #497) + +Bug fixes: + +* Fix bug where ``/sync`` didn't return when something under the leave key + changed (PR #461) +* Fix bug where we returned smaller rather than larger than requested + thumbnails when ``method=crop`` (PR #464) +* Fix thumbnails API to only return cropped thumbnails when asking for a + cropped thumbnail (PR #475) +* Fix bug where we occasionally still logged access tokens (PR #477) +* Fix bug where ``/events`` would always return immediately for guest users + (PR #480) +* Fix bug where ``/sync`` unexpectedly returned old left rooms (PR #481) +* Fix enabling and disabling push rules (P #498) +* Fix bug where ``/register`` returned 500 when given unicode username + (PR #513) + Changes in synapse v0.12.0 (2016-01-04) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index 8007079136..aacacda4fb 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.12.0" +__version__ = "0.12.1-rc1" From 97b364cb25179cefe38f0cb51ddde3ccb5757072 Mon Sep 17 00:00:00 2001 From: Florent VIOLLEAU Date: Wed, 27 Jan 2016 14:11:05 +0100 Subject: [PATCH 020/349] Update documentation Signed-off-by: Florent VIOLLEAU --- AUTHORS.rst | 3 +++ README.rst | 23 ++++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/AUTHORS.rst b/AUTHORS.rst index f19d17d24f..07d4bee2a3 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -51,3 +51,6 @@ Steven Hammerton Mads Robin Christensen * CentOS 7 installation instructions. + +Florent Violleau + * Add Raspberry Pi installation instructions and general troubleshooting items \ No newline at end of file diff --git a/README.rst b/README.rst index 297e72f1ae..39a338c790 100644 --- a/README.rst +++ b/README.rst @@ -125,6 +125,15 @@ Installing prerequisites on Mac OS X:: sudo easy_install pip sudo pip install virtualenv +Installing prerequisites on Raspbian:: + + sudo apt-get install build-essential python2.7-dev libffi-dev \ + python-pip python-setuptools sqlite3 \ + libssl-dev python-virtualenv libjpeg-dev + sudo pip install --upgrade pip + sudo pip install --upgrade ndg-httpsclient + sudo pip install --upgrade virtualenv + To install the synapse homeserver run:: virtualenv -p python2.7 ~/.synapse @@ -310,6 +319,18 @@ may need to manually upgrade it:: sudo pip install --upgrade pip +Installing may fail with ``Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)``. +You can fix this by manually upgrading pip and virtualenv:: + + sudo pip install --upgrade virtualenv + +You can next rerun ``virtualenv -p python2.7 synapse`` to update the virtual env. + +Installing may fail during installing virtualenv with ``InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.`` +You can fix this by manually installing ndg-httpsclient:: + + pip install --upgrade ndg-httpsclient + Installing may fail with ``mock requires setuptools>=17.1. Aborting installation``. You can fix this by upgrading setuptools:: @@ -544,4 +565,4 @@ sphinxcontrib-napoleon:: Building internal API documentation:: python setup.py build_sphinx - + \ No newline at end of file From d6d60b4d6cfb4f4b02a83bddb04530a6fbdf52a3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 27 Jan 2016 17:02:10 +0000 Subject: [PATCH 021/349] Federation: drop events which cause SynapseErrors ... rather than rejecting any attempt to federate channels which contain such events. --- synapse/handlers/federation.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6c19d6ae8c..2ce1e9d6c7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1186,7 +1186,13 @@ class FederationHandler(BaseHandler): try: self.auth.check(e, auth_events=auth_for_e) - except AuthError as err: + except SynapseError as err: + # we may get SynapseErrors here as well as AuthErrors. For + # instance, there are a couple of (ancient) events in some + # rooms whose senders do not have the correct sigil; these + # cause SynapseErrors in auth.check. We don't want to give up + # the attempt to federate altogether in such cases. + logger.warn( "Rejecting %s because %s", e.event_id, err.msg From 2152b320c5cd11caca0a039279e48262d6d88408 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 17:09:17 +0000 Subject: [PATCH 022/349] PEP 8 --- synapse/server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/server.py b/synapse/server.py index 006e91b37c..e013a349c9 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -21,7 +21,6 @@ # Imports required for the default HomeServer() implementation from twisted.web.client import BrowserLikePolicyForHTTPS from twisted.enterprise import adbapi -from twisted.internet import defer from synapse.federation import initialize_http_replication from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory From 0487c9441f1439bd02cb4d107a4fcacfe5dbe75d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 17:25:07 +0000 Subject: [PATCH 023/349] Fix tests --- tests/storage/test_appservice.py | 6 +++--- tests/storage/test_registration.py | 3 +-- tests/utils.py | 8 ++++++++ 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 5abecdf6e0..ed8af10d87 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -439,7 +439,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): f2 = self._write_config(suffix="2") config = Mock(app_service_config_files=[f1, f2]) - hs = yield setup_test_homeserver(config=config) + hs = yield setup_test_homeserver(config=config, datastore=Mock()) ApplicationServiceStore(hs) @@ -449,7 +449,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): f2 = self._write_config(id="id", suffix="2") config = Mock(app_service_config_files=[f1, f2]) - hs = yield setup_test_homeserver(config=config) + hs = yield setup_test_homeserver(config=config, datastore=Mock()) with self.assertRaises(ConfigError) as cm: ApplicationServiceStore(hs) @@ -465,7 +465,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): f2 = self._write_config(as_token="as_token", suffix="2") config = Mock(app_service_config_files=[f1, f2]) - hs = yield setup_test_homeserver(config=config) + hs = yield setup_test_homeserver(config=config, datastore=Mock()) with self.assertRaises(ConfigError) as cm: ApplicationServiceStore(hs) diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index a35efcc71e..7b3b4c13bc 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -18,7 +18,6 @@ from tests import unittest from twisted.internet import defer from synapse.api.errors import StoreError -from synapse.storage.registration import RegistrationStore from synapse.util import stringutils from tests.utils import setup_test_homeserver @@ -31,7 +30,7 @@ class RegistrationStoreTestCase(unittest.TestCase): hs = yield setup_test_homeserver() self.db_pool = hs.get_db_pool() - self.store = RegistrationStore(hs) + self.store = hs.get_datastore() self.user_id = "@my-user:test" self.tokens = ["AbCdEfGhIjKlMnOpQrStUvWxYz", diff --git a/tests/utils.py b/tests/utils.py index d75d492cb5..43cc2b30cd 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -60,8 +60,10 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): name, db_pool=db_pool, config=config, version_string="Synapse/tests", database_engine=create_engine("sqlite3"), + get_db_conn=db_pool.get_db_conn, **kargs ) + hs.setup() else: hs = HomeServer( name, db_pool=None, datastore=datastore, config=config, @@ -280,6 +282,12 @@ class SQLiteMemoryDbPool(ConnectionPool, object): lambda conn: prepare_database(conn, engine) ) + def get_db_conn(self): + conn = self.connect() + engine = create_engine("sqlite3") + prepare_database(conn, engine) + return conn + class MemoryDataStore(object): From f93ecf87837567a540904e5d6a4280cf453ce7a8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Jan 2016 18:27:23 +0000 Subject: [PATCH 024/349] Don't turn on profiling --- synapse/app/homeserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index fb76be58a2..504557b2fc 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -651,7 +651,7 @@ def _resource_id(resource, path_seg): def run(hs): - PROFILE_SYNAPSE = True + PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile From b97f6626b6f9b91498d06a7ae113b9d20f1fc2ef Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 09:54:30 +0000 Subject: [PATCH 025/349] Add cache to room stream --- synapse/handlers/sync.py | 42 +++++-- synapse/storage/events.py | 2 + synapse/storage/receipts.py | 65 +---------- synapse/storage/stream.py | 133 +++++++++++++++++++++++ synapse/util/caches/room_change_cache.py | 86 +++++++++++++++ 5 files changed, 254 insertions(+), 74 deletions(-) create mode 100644 synapse/util/caches/room_change_cache.py diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 328c049b03..1fdf978313 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -514,13 +514,6 @@ class SyncHandler(BaseHandler): timeline_limit = sync_config.filter_collection.timeline_limit() - room_events, _ = yield self.store.get_room_events_stream( - sync_config.user.to_string(), - from_key=since_token.room_key, - to_key=now_token.room_key, - limit=timeline_limit + 1, - ) - tags_by_room = yield self.store.get_updated_tags( sync_config.user.to_string(), since_token.account_data_key, @@ -533,6 +526,32 @@ class SyncHandler(BaseHandler): ) ) + rooms_changed = yield self.store.get_room_changes_for_user( + sync_config.user.to_string(), since_token.room_key, now_token.room_key + ) + + room_to_events = yield self.store.get_room_events_stream_for_rooms( + room_ids=room_ids, + from_key=since_token.room_key, + to_key=now_token.room_key, + limit=timeline_limit + 1, + ) + + room_events = [ + event + for events, _ in room_to_events.values() + for event in events + ] + + room_events.extend(rooms_changed) + + # room_events, _ = yield self.store.get_room_events_stream( + # sync_config.user.to_string(), + # from_key=since_token.room_key, + # to_key=now_token.room_key, + # limit=timeline_limit + 1, + # ) + joined = [] archived = [] if len(room_events) <= timeline_limit: @@ -694,14 +713,12 @@ class SyncHandler(BaseHandler): end_key = room_key while limited and len(recents) < timeline_limit and max_repeat: - events, keys = yield self.store.get_recent_events_for_room( + events, end_key = yield self.store.get_recent_room_events_stream_for_room( room_id, limit=load_limit + 1, - from_token=since_token.room_key if since_token else None, - end_token=end_key, + from_key=since_token.room_key if since_token else None, + to_key=end_key, ) - room_key, _ = keys - end_key = "s" + room_key.split('-')[-1] loaded_recents = sync_config.filter_collection.filter_room_timeline(events) loaded_recents = yield self._filter_events_for_client( sync_config.user.to_string(), @@ -712,6 +729,7 @@ class SyncHandler(BaseHandler): recents = loaded_recents if len(events) <= load_limit: limited = False + break max_repeat -= 1 if len(recents) > timeline_limit: diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 298cb9bada..d96ea3a30e 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,6 +128,8 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) + logger.info("Invalidating %r at %r", event.room_id, stream_ordering) + self._events_stream_cache.room_has_changed(None, event.room_id, stream_ordering) except _RollbackButIsFineException: pass diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index c0593e23ee..b7a4e77748 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -16,6 +16,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached from synapse.util.caches import cache_counter, caches_by_name +from synapse.util.caches.room_change_cache import RoomStreamChangeCache from twisted.internet import defer @@ -31,8 +32,8 @@ class ReceiptsStore(SQLBaseStore): def __init__(self, hs): super(ReceiptsStore, self).__init__(hs) - self._receipts_stream_cache = _RoomStreamChangeCache( - self._receipts_id_gen.get_max_token(None) + self._receipts_stream_cache = RoomStreamChangeCache( + "ReceiptsRoomChangeCache", self._receipts_id_gen.get_max_token(None) ) @cached(num_args=2) @@ -370,63 +371,3 @@ class ReceiptsStore(SQLBaseStore): "data": json.dumps(data), } ) - - -class _RoomStreamChangeCache(object): - """Keeps track of the stream_id of the latest change in rooms. - - Given a list of rooms and stream key, it will give a subset of rooms that - may have changed since that key. If the key is too old then the cache - will simply return all rooms. - """ - def __init__(self, current_key, size_of_cache=10000): - self._size_of_cache = size_of_cache - self._room_to_key = {} - self._cache = sorteddict() - self._earliest_key = current_key - self.name = "ReceiptsRoomChangeCache" - caches_by_name[self.name] = self._cache - - @defer.inlineCallbacks - def get_rooms_changed(self, store, room_ids, key): - """Returns subset of room ids that have had new receipts since the - given key. If the key is too old it will just return the given list. - """ - if key > (yield self._get_earliest_key(store)): - keys = self._cache.keys() - i = keys.bisect_right(key) - - result = set( - self._cache[k] for k in keys[i:] - ).intersection(room_ids) - - cache_counter.inc_hits(self.name) - else: - result = room_ids - cache_counter.inc_misses(self.name) - - defer.returnValue(result) - - @defer.inlineCallbacks - def room_has_changed(self, store, room_id, key): - """Informs the cache that the room has been changed at the given key. - """ - if key > (yield self._get_earliest_key(store)): - old_key = self._room_to_key.get(room_id, None) - if old_key: - key = max(key, old_key) - self._cache.pop(old_key, None) - self._cache[key] = room_id - - while len(self._cache) > self._size_of_cache: - k, r = self._cache.popitem() - self._earliest_key = max(k, self._earliest_key) - self._room_to_key.pop(r, None) - - @defer.inlineCallbacks - def _get_earliest_key(self, store): - if self._earliest_key is None: - self._earliest_key = yield store.get_max_receipt_stream_id() - self._earliest_key = int(self._earliest_key) - - defer.returnValue(self._earliest_key) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index e31bad258a..3a32a0019a 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -37,6 +37,7 @@ from twisted.internet import defer from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.util.caches.room_change_cache import RoomStreamChangeCache from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken from synapse.util.logutils import log_function @@ -77,6 +78,12 @@ def upper_bound(token): class StreamStore(SQLBaseStore): + def __init__(self, hs): + super(StreamStore, self).__init__(hs) + + self._events_stream_cache = RoomStreamChangeCache( + "EventsRoomStreamChangeCache", self._stream_id_gen.get_max_token(None) + ) @defer.inlineCallbacks def get_appservice_room_stream(self, service, from_key, to_key, limit=0): @@ -157,6 +164,132 @@ class StreamStore(SQLBaseStore): results = yield self.runInteraction("get_appservice_room_stream", f) defer.returnValue(results) + @defer.inlineCallbacks + def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0): + from_id = RoomStreamToken.parse_stream_token(from_key).stream + + room_ids = yield self._events_stream_cache.get_rooms_changed( + self, room_ids, from_id + ) + + if not room_ids: + defer.returnValue({}) + + results = {} + room_ids = list(room_ids) + for rm_ids in (room_ids[i:i+20] for i in xrange(0, len(room_ids), 20)): + res = yield defer.gatherResults([ + self.get_recent_room_events_stream_for_room( + room_id, from_key, to_key, limit + ).addCallback(lambda r, rm: (rm, r), room_id) + for room_id in room_ids + ]) + results.update(dict(res)) + + defer.returnValue(results) + + @defer.inlineCallbacks + def get_recent_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): + if from_key is not None: + from_id = RoomStreamToken.parse_stream_token(from_key).stream + else: + from_id = None + to_id = RoomStreamToken.parse_stream_token(to_key).stream + + if from_key == to_key: + defer.returnValue(([], from_key)) + + has_changed = yield self._events_stream_cache.get_room_has_changed( + room_id, from_id + ) + + if not has_changed: + defer.returnValue(([], from_key)) + + def f(txn): + if from_id is not None: + sql = ( + "SELECT event_id, stream_ordering FROM events WHERE" + " room_id = ?" + " AND not outlier" + " AND stream_ordering > ? AND stream_ordering <= ?" + " ORDER BY stream_ordering DESC LIMIT ?" + ) + txn.execute(sql, (room_id, from_id, to_id, limit)) + else: + sql = ( + "SELECT event_id, stream_ordering FROM events WHERE" + " room_id = ?" + " AND not outlier" + " AND stream_ordering <= ?" + " ORDER BY stream_ordering DESC LIMIT ?" + ) + txn.execute(sql, (room_id, to_id, limit)) + + rows = self.cursor_to_dict(txn) + + ret = self._get_events_txn( + txn, + [r["event_id"] for r in rows], + get_prev_content=True + ) + + ret.reverse() + + self._set_before_and_after(ret, rows) + + if rows: + key = "s%d" % min(r["stream_ordering"] for r in rows) + else: + # Assume we didn't get anything because there was nothing to + # get. + key = from_key + + return ret, key + res = yield self.runInteraction("get_recent_room_events_stream_for_room", f) + defer.returnValue(res) + + def get_room_changes_for_user(self, user_id, from_key, to_key): + if from_key is not None: + from_id = RoomStreamToken.parse_stream_token(from_key).stream + else: + from_id = None + to_id = RoomStreamToken.parse_stream_token(to_key).stream + + if from_key == to_key: + return defer.succeed([]) + + def f(txn): + if from_id is not None: + sql = ( + "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + " WHERE e.event_id = m.event_id" + " AND m.user_id = ?" + " AND e.stream_ordering > ? AND e.stream_ordering <= ?" + " ORDER BY e.stream_ordering ASC" + ) + txn.execute(sql, (user_id, from_id, to_id,)) + else: + sql = ( + "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + " WHERE e.event_id = m.event_id" + " AND m.user_id = ?" + " AND stream_ordering <= ?" + " ORDER BY stream_ordering ASC" + ) + txn.execute(sql, (user_id, to_id,)) + rows = self.cursor_to_dict(txn) + + ret = self._get_events_txn( + txn, + [r["event_id"] for r in rows], + get_prev_content=True + ) + + return ret + + return self.runInteraction("get_room_changes_for_user", f) + @log_function def get_room_events_stream( self, diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py new file mode 100644 index 0000000000..3a873c9c30 --- /dev/null +++ b/synapse/util/caches/room_change_cache.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util.caches import cache_counter, caches_by_name + + +from blist import sorteddict +import logging + + +logger = logging.getLogger(__name__) + + +class RoomStreamChangeCache(object): + """Keeps track of the stream_id of the latest change in rooms. + + Given a list of rooms and stream key, it will give a subset of rooms that + may have changed since that key. If the key is too old then the cache + will simply return all rooms. + """ + def __init__(self, name, current_key, size_of_cache=10000): + self._size_of_cache = size_of_cache + self._room_to_key = {} + self._cache = sorteddict() + self._earliest_known_key = current_key + self.name = name + caches_by_name[self.name] = self._cache + + def get_room_has_changed(self, room_id, key): + if key <= self._earliest_known_key: + return True + + room_key = self._room_to_key.get(room_id, None) + if room_key is None: + return True + + if key < room_key: + return True + + return False + + def get_rooms_changed(self, store, room_ids, key): + """Returns subset of room ids that have had new things since the + given key. If the key is too old it will just return the given list. + """ + if key > self._earliest_known_key: + keys = self._cache.keys() + i = keys.bisect_right(key) + + result = set( + self._cache[k] for k in keys[i:] + ).intersection(room_ids) + + cache_counter.inc_hits(self.name) + else: + result = room_ids + cache_counter.inc_misses(self.name) + + return result + + def room_has_changed(self, store, room_id, key): + """Informs the cache that the room has been changed at the given key. + """ + if key > self._earliest_known_key: + old_key = self._room_to_key.get(room_id, None) + if old_key: + key = max(key, old_key) + self._cache.pop(old_key, None) + self._cache[key] = room_id + + while len(self._cache) > self._size_of_cache: + k, r = self._cache.popitem() + self._earliest_key = max(k, self._earliest_key) + self._room_to_key.pop(r, None) From aca3193efb8c5f9f20049f61c96e5ff12f328b05 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 17:06:52 +0000 Subject: [PATCH 026/349] Use the same path for incremental with gap or without gap --- synapse/handlers/sync.py | 352 ++++++++++++++++---------------------- synapse/storage/events.py | 1 - synapse/storage/stream.py | 6 +- 3 files changed, 147 insertions(+), 212 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1fdf978313..f5e20d6a6e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -72,7 +72,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [ ) -class ArchivedSyncResult(collections.namedtuple("JoinedSyncResult", [ +class ArchivedSyncResult(collections.namedtuple("ArchivedSyncResult", [ "room_id", # str "timeline", # TimelineBatch "state", # dict[(str, str), FrozenEvent] @@ -429,44 +429,20 @@ class SyncHandler(BaseHandler): defer.returnValue((now_token, ephemeral_by_room)) - @defer.inlineCallbacks def full_state_sync_for_archived_room(self, room_id, sync_config, leave_event_id, leave_token, timeline_since_token, tags_by_room, account_data_by_room): """Sync a room for a client which is starting without any state Returns: - A Deferred JoinedSyncResult. + A Deferred ArchivedSyncResult. """ - batch = yield self.load_filtered_recents( - room_id, sync_config, leave_token, since_token=timeline_since_token + return self.incremental_sync_for_archived_room( + sync_config, room_id, leave_event_id, timeline_since_token, tags_by_room, + account_data_by_room, full_state=True, leave_token=leave_token, ) - leave_state = yield self.store.get_state_for_event(leave_event_id) - - leave_state = { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - leave_state.values() - ) - } - - account_data = self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ) - - account_data = sync_config.filter_collection.filter_room_account_data( - account_data - ) - - defer.returnValue(ArchivedSyncResult( - room_id=room_id, - timeline=batch, - state=leave_state, - account_data=account_data, - )) - @defer.inlineCallbacks def incremental_sync_with_gap(self, sync_config, since_token): """ Get the incremental delta needed to bring the client up to @@ -512,173 +488,127 @@ class SyncHandler(BaseHandler): sync_config.user ) + user_id = sync_config.user.to_string() + timeline_limit = sync_config.filter_collection.timeline_limit() tags_by_room = yield self.store.get_updated_tags( - sync_config.user.to_string(), + user_id, since_token.account_data_key, ) account_data, account_data_by_room = ( yield self.store.get_updated_account_data_for_user( - sync_config.user.to_string(), + user_id, since_token.account_data_key, ) ) + # Get a list of membership change events that have happened. rooms_changed = yield self.store.get_room_changes_for_user( - sync_config.user.to_string(), since_token.room_key, now_token.room_key + user_id, since_token.room_key, now_token.room_key ) + mem_change_events_by_room_id = {} + for event in rooms_changed: + mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) + + newly_joined_rooms = [] + archived = [] + invited = [] + for room_id, events in mem_change_events_by_room_id.items(): + non_joins = [e for e in events if e.membership != Membership.JOIN] + has_join = len(non_joins) != len(events) + + # We want to figure out if we joined the room at some point since + # the last sync (even if we have since left). This is to make sure + # we do send down the room, and with full state, where necessary + if room_id in joined_room_ids or has_join: + old_state = yield self.get_state_at(room_id, since_token) + old_mem_ev = old_state.get((EventTypes.Member, user_id), None) + if not old_mem_ev or old_mem_ev.membership != Membership.JOIN: + newly_joined_rooms.append(room_id) + + if room_id in joined_room_ids: + continue + + if not non_joins: + continue + + # Only bother if we're still currently invited + should_invite = non_joins[-1].membership == Membership.INVITE + if should_invite: + room_sync = InvitedSyncResult(room_id, invite=non_joins[-1]) + if room_sync: + invited.append(room_sync) + + # Always include leave/ban events. Just take the last one. + # TODO: How do we handle ban -> leave in same batch? + leave_events = [ + e for e in non_joins + if e.membership in (Membership.LEAVE, Membership.BAN) + ] + + if leave_events: + leave_event = leave_events[-1] + room_sync = yield self.incremental_sync_for_archived_room( + sync_config, room_id, leave_event.event_id, since_token, + tags_by_room, account_data_by_room, + full_state=room_id in newly_joined_rooms + ) + if room_sync: + archived.append(room_sync) + + # Get all events for rooms we're currently joined to. room_to_events = yield self.store.get_room_events_stream_for_rooms( - room_ids=room_ids, + room_ids=joined_room_ids, from_key=since_token.room_key, to_key=now_token.room_key, limit=timeline_limit + 1, ) - room_events = [ - event - for events, _ in room_to_events.values() - for event in events - ] - - room_events.extend(rooms_changed) - - # room_events, _ = yield self.store.get_room_events_stream( - # sync_config.user.to_string(), - # from_key=since_token.room_key, - # to_key=now_token.room_key, - # limit=timeline_limit + 1, - # ) - joined = [] - archived = [] - if len(room_events) <= timeline_limit: - # There is no gap in any of the rooms. Therefore we can just - # partition the new events by room and return them. - logger.debug("Got %i events for incremental sync - not limited", - len(room_events)) + # We loop through all room ids, even if there are no new events, in case + # there are non room events taht we need to notify about. + for room_id in joined_room_ids: + room_entry = room_to_events.get(room_id, None) - invite_events = [] - leave_events = [] - events_by_room_id = {} - for event in room_events: - events_by_room_id.setdefault(event.room_id, []).append(event) - if event.room_id not in joined_room_ids: - if (event.type == EventTypes.Member - and event.state_key == sync_config.user.to_string()): - if event.membership == Membership.INVITE: - invite_events.append(event) - elif event.membership in (Membership.LEAVE, Membership.BAN): - leave_events.append(event) + if room_entry: + events, start_key = room_entry - for room_id in joined_room_ids: - recents = events_by_room_id.get(room_id, []) - logger.debug("Events for room %s: %r", room_id, recents) - state = { - (event.type, event.state_key): event - for event in recents if event.is_state()} - limited = False + prev_batch_token = now_token.copy_and_replace("room_key", start_key) - if recents: - prev_batch = now_token.copy_and_replace( - "room_key", recents[0].internal_metadata.before - ) - else: - prev_batch = now_token + newly_joined_room = room_id in newly_joined_rooms + full_state = newly_joined_room - just_joined = yield self.check_joined_room(sync_config, state) - if just_joined: - logger.debug("User has just joined %s: needs full state", - room_id) - state = yield self.get_state_at(room_id, now_token) - # the timeline is inherently limited if we've just joined - limited = True - - recents = sync_config.filter_collection.filter_room_timeline(recents) - - state = { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - state.values() - ) - } - - acc_data = self.account_data_for_room( - room_id, tags_by_room, account_data_by_room + batch = yield self.load_filtered_recents( + room_id, sync_config, prev_batch_token, + since_token=since_token, + recents=events, + newly_joined_room=newly_joined_room, ) - - acc_data = sync_config.filter_collection.filter_room_account_data( - acc_data + else: + batch = TimelineBatch( + events=[], + prev_batch=since_token, + limited=False, ) + full_state = False - ephemeral = sync_config.filter_collection.filter_room_ephemeral( - ephemeral_by_room.get(room_id, []) - ) - - room_sync = JoinedSyncResult( - room_id=room_id, - timeline=TimelineBatch( - events=recents, - prev_batch=prev_batch, - limited=limited, - ), - state=state, - ephemeral=ephemeral, - account_data=acc_data, - unread_notifications={}, - ) - logger.debug("Result for room %s: %r", room_id, room_sync) - - if room_sync: - notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config, all_ephemeral_by_room - ) - - if notifs is not None: - notif_dict = room_sync.unread_notifications - notif_dict["notification_count"] = len(notifs) - notif_dict["highlight_count"] = len([ - 1 for notif in notifs - if _action_has_highlight(notif["actions"]) - ]) - - joined.append(room_sync) - - else: - logger.debug("Got %i events for incremental sync - hit limit", - len(room_events)) - - invite_events = yield self.store.get_invites_for_user( - sync_config.user.to_string() - ) - - leave_events = yield self.store.get_leave_and_ban_events_for_user( - sync_config.user.to_string() - ) - - for room_id in joined_room_ids: - room_sync = yield self.incremental_sync_with_gap_for_room( - room_id, sync_config, since_token, now_token, - ephemeral_by_room, tags_by_room, account_data_by_room, - all_ephemeral_by_room=all_ephemeral_by_room, - ) - if room_sync: - joined.append(room_sync) - - for leave_event in leave_events: - room_sync = yield self.incremental_sync_for_archived_room( - sync_config, leave_event, since_token, tags_by_room, - account_data_by_room + room_sync = yield self.incremental_sync_with_gap_for_room( + room_id=room_id, + sync_config=sync_config, + since_token=since_token, + now_token=now_token, + ephemeral_by_room=ephemeral_by_room, + tags_by_room=tags_by_room, + account_data_by_room=account_data_by_room, + all_ephemeral_by_room=all_ephemeral_by_room, + batch=batch, + full_state=full_state, ) if room_sync: - archived.append(room_sync) - - invited = [ - InvitedSyncResult(room_id=event.room_id, invite=event) - for event in invite_events - ] + joined.append(room_sync) account_data_for_user = sync_config.filter_collection.filter_account_data( self.account_data_for_user(account_data) @@ -699,12 +629,10 @@ class SyncHandler(BaseHandler): @defer.inlineCallbacks def load_filtered_recents(self, room_id, sync_config, now_token, - since_token=None): + since_token=None, recents=None, newly_joined_room=False): """ :returns a Deferred TimelineBatch """ - limited = True - recents = [] filtering_factor = 2 timeline_limit = sync_config.filter_collection.timeline_limit() load_limit = max(timeline_limit * filtering_factor, 100) @@ -712,11 +640,27 @@ class SyncHandler(BaseHandler): room_key = now_token.room_key end_key = room_key + limited = recents is None or newly_joined_room or timeline_limit < len(recents) + + if recents is not None: + recents = sync_config.filter_collection.filter_room_timeline(recents) + recents = yield self._filter_events_for_client( + sync_config.user.to_string(), + recents, + is_peeking=sync_config.is_guest, + ) + else: + recents = [] + + since_key = None + if since_token and not newly_joined_room: + since_key = since_token.room_key + while limited and len(recents) < timeline_limit and max_repeat: - events, end_key = yield self.store.get_recent_room_events_stream_for_room( + events, end_key = yield self.store.get_room_events_stream_for_room( room_id, limit=load_limit + 1, - from_key=since_token.room_key if since_token else None, + from_key=since_key, to_key=end_key, ) loaded_recents = sync_config.filter_collection.filter_room_timeline(events) @@ -727,6 +671,7 @@ class SyncHandler(BaseHandler): ) loaded_recents.extend(recents) recents = loaded_recents + if len(events) <= load_limit: limited = False break @@ -742,7 +687,9 @@ class SyncHandler(BaseHandler): ) defer.returnValue(TimelineBatch( - events=recents, prev_batch=prev_batch_token, limited=limited + events=recents, + prev_batch=prev_batch_token, + limited=limited or newly_joined_room )) @defer.inlineCallbacks @@ -750,24 +697,8 @@ class SyncHandler(BaseHandler): since_token, now_token, ephemeral_by_room, tags_by_room, account_data_by_room, - all_ephemeral_by_room): - """ Get the incremental delta needed to bring the client up to date for - the room. Gives the client the most recent events and the changes to - state. - Returns: - A Deferred JoinedSyncResult - """ - logger.debug("Doing incremental sync for room %s between %s and %s", - room_id, since_token, now_token) - - # TODO(mjark): Check for redactions we might have missed. - - batch = yield self.load_filtered_recents( - room_id, sync_config, now_token, since_token, - ) - - logger.debug("Recents %r", batch) - + all_ephemeral_by_room, + batch, full_state=False): if batch.limited: current_state = yield self.get_state_at(room_id, now_token) @@ -832,43 +763,48 @@ class SyncHandler(BaseHandler): defer.returnValue(room_sync) @defer.inlineCallbacks - def incremental_sync_for_archived_room(self, sync_config, leave_event, + def incremental_sync_for_archived_room(self, sync_config, room_id, leave_event_id, since_token, tags_by_room, - account_data_by_room): + account_data_by_room, full_state, + leave_token=None): """ Get the incremental delta needed to bring the client up to date for the archived room. Returns: A Deferred ArchivedSyncResult """ - stream_token = yield self.store.get_stream_token_for_event( - leave_event.event_id - ) + if not leave_token: + stream_token = yield self.store.get_stream_token_for_event( + leave_event_id + ) - leave_token = since_token.copy_and_replace("room_key", stream_token) + leave_token = since_token.copy_and_replace("room_key", stream_token) - if since_token.is_after(leave_token): + if since_token and since_token.is_after(leave_token): defer.returnValue(None) batch = yield self.load_filtered_recents( - leave_event.room_id, sync_config, leave_token, since_token, + room_id, sync_config, leave_token, since_token, ) logger.debug("Recents %r", batch) state_events_at_leave = yield self.store.get_state_for_event( - leave_event.event_id + leave_event_id ) - state_at_previous_sync = yield self.get_state_at( - leave_event.room_id, stream_position=since_token - ) + if not full_state: + state_at_previous_sync = yield self.get_state_at( + room_id, stream_position=since_token + ) - state_events_delta = yield self.compute_state_delta( - since_token=since_token, - previous_state=state_at_previous_sync, - current_state=state_events_at_leave, - ) + state_events_delta = yield self.compute_state_delta( + since_token=since_token, + previous_state=state_at_previous_sync, + current_state=state_events_at_leave, + ) + else: + state_events_delta = state_events_at_leave state_events_delta = { (e.type, e.state_key): e @@ -878,7 +814,7 @@ class SyncHandler(BaseHandler): } account_data = self.account_data_for_room( - leave_event.room_id, tags_by_room, account_data_by_room + room_id, tags_by_room, account_data_by_room ) account_data = sync_config.filter_collection.filter_room_account_data( @@ -886,7 +822,7 @@ class SyncHandler(BaseHandler): ) room_sync = ArchivedSyncResult( - room_id=leave_event.room_id, + room_id=room_id, timeline=batch, state=state_events_delta, account_data=account_data, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index d96ea3a30e..0dd1daaa2e 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,7 +128,6 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) - logger.info("Invalidating %r at %r", event.room_id, stream_ordering) self._events_stream_cache.room_has_changed(None, event.room_id, stream_ordering) except _RollbackButIsFineException: pass diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 3a32a0019a..563e289c4e 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -179,7 +179,7 @@ class StreamStore(SQLBaseStore): room_ids = list(room_ids) for rm_ids in (room_ids[i:i+20] for i in xrange(0, len(room_ids), 20)): res = yield defer.gatherResults([ - self.get_recent_room_events_stream_for_room( + self.get_room_events_stream_for_room( room_id, from_key, to_key, limit ).addCallback(lambda r, rm: (rm, r), room_id) for room_id in room_ids @@ -189,7 +189,7 @@ class StreamStore(SQLBaseStore): defer.returnValue(results) @defer.inlineCallbacks - def get_recent_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): + def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream else: @@ -246,7 +246,7 @@ class StreamStore(SQLBaseStore): key = from_key return ret, key - res = yield self.runInteraction("get_recent_room_events_stream_for_room", f) + res = yield self.runInteraction("get_room_events_stream_for_room", f) defer.returnValue(res) def get_room_changes_for_user(self, user_id, from_key, to_key): From e7febf4fbb1f1beb11e7a03252f6844f84af7f30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 17:11:04 +0000 Subject: [PATCH 027/349] PEP8 --- synapse/storage/events.py | 4 +++- synapse/storage/receipts.py | 2 -- synapse/storage/stream.py | 9 ++++++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 0dd1daaa2e..80187722ea 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,7 +128,9 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) - self._events_stream_cache.room_has_changed(None, event.room_id, stream_ordering) + self._events_stream_cache.room_has_changed( + None, event.room_id, stream_ordering + ) except _RollbackButIsFineException: pass diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index b7a4e77748..7118368d97 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -15,12 +15,10 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached -from synapse.util.caches import cache_counter, caches_by_name from synapse.util.caches.room_change_cache import RoomStreamChangeCache from twisted.internet import defer -from blist import sorteddict import logging import ujson as json diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 563e289c4e..0b22251790 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -262,7 +262,8 @@ class StreamStore(SQLBaseStore): def f(txn): if from_id is not None: sql = ( - "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + "SELECT m.event_id, stream_ordering FROM events AS e," + " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND e.stream_ordering > ? AND e.stream_ordering <= ?" @@ -271,7 +272,8 @@ class StreamStore(SQLBaseStore): txn.execute(sql, (user_id, from_id, to_id,)) else: sql = ( - "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + "SELECT m.event_id, stream_ordering FROM events AS e," + " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND stream_ordering <= ?" @@ -307,7 +309,8 @@ class StreamStore(SQLBaseStore): "SELECT c.room_id FROM history_visibility AS h" " INNER JOIN current_state_events AS c" " ON h.event_id = c.event_id" - " WHERE c.room_id IN (%s) AND h.history_visibility = 'world_readable'" % ( + " WHERE c.room_id IN (%s)" + " AND h.history_visibility = 'world_readable'" % ( ",".join(map(lambda _: "?", room_ids)) ) ) From 5fc9b17518008e08a420b94a8f77955440c52b2b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 17:39:20 +0000 Subject: [PATCH 028/349] No chdir --- synapse/app/homeserver.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 504557b2fc..56a34bd50b 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -728,7 +728,6 @@ def run(hs): auto_close_fds=False, verbose=True, logger=logger, - chdir=os.path.dirname(os.path.abspath(__file__)), ) daemon.start() From 5cba88ea7c96e5e8a9f3bc1a28cf3414b3083d60 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 27 Jan 2016 17:42:45 +0000 Subject: [PATCH 029/349] Make it possible to paginate forwards from stream tokens In order that we can fill the gap after a /sync, make it possible to paginate forwards from a stream token. --- synapse/handlers/message.py | 43 +++++++++++++++--------------- tests/rest/client/v1/test_rooms.py | 16 +++++++++-- 2 files changed, 35 insertions(+), 24 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ff800f8af1..b73ad62147 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -105,8 +105,6 @@ class MessageHandler(BaseHandler): room_token = pagin_config.from_token.room_key room_token = RoomStreamToken.parse(room_token) - if room_token.topological is None: - raise SynapseError(400, "Invalid token") pagin_config.from_token = pagin_config.from_token.copy_and_replace( "room_key", str(room_token) @@ -117,27 +115,28 @@ class MessageHandler(BaseHandler): membership, member_event_id = yield self._check_in_room_or_world_readable( room_id, user_id ) - if membership == Membership.LEAVE: - # If they have left the room then clamp the token to be before - # they left the room. - leave_token = yield self.store.get_topological_token_for_event( - member_event_id + + if source_config.direction == 'b': + # if we're going backwards, we might need to backfill. This + # requires that we have a topo token. + if room_token.topological is None: + raise SynapseError(400, "Invalid token: cannot paginate " + "backwards from a stream token") + + if membership == Membership.LEAVE: + # If they have left the room then clamp the token to be before + # they left the room, to save the effort of loading from the + # database. + leave_token = yield self.store.get_topological_token_for_event( + member_event_id + ) + leave_token = RoomStreamToken.parse(leave_token) + if leave_token.topological < room_token.topological: + source_config.from_key = str(leave_token) + + yield self.hs.get_handlers().federation_handler.maybe_backfill( + room_id, room_token.topological ) - leave_token = RoomStreamToken.parse(leave_token) - if leave_token.topological < room_token.topological: - source_config.from_key = str(leave_token) - - if source_config.direction == "f": - if source_config.to_key is None: - source_config.to_key = str(leave_token) - else: - to_token = RoomStreamToken.parse(source_config.to_key) - if leave_token.topological < to_token.topological: - source_config.to_key = str(leave_token) - - yield self.hs.get_handlers().federation_handler.maybe_backfill( - room_id, room_token.topological - ) events, next_key = yield data_source.get_pagination_rows( requester.user, source_config, room_id diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index cd03106e88..2fe6f695f5 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1045,8 +1045,20 @@ class RoomMessageListTestCase(RestTestCase): self.assertTrue("end" in response) @defer.inlineCallbacks - def test_stream_token_is_rejected(self): + def test_stream_token_is_rejected_for_back_pagination(self): (code, response) = yield self.mock_resource.trigger_get( - "/rooms/%s/messages?access_token=x&from=s0_0_0_0" % + "/rooms/%s/messages?access_token=x&from=s0_0_0_0_0&dir=b" % self.room_id) self.assertEquals(400, code) + + @defer.inlineCallbacks + def test_stream_token_is_accepted_for_fwd_pagianation(self): + token = "s0_0_0_0_0" + (code, response) = yield self.mock_resource.trigger_get( + "/rooms/%s/messages?access_token=x&from=%s" % + (self.room_id, token)) + self.assertEquals(200, code) + self.assertTrue("start" in response) + self.assertEquals(token, response['start']) + self.assertTrue("chunk" in response) + self.assertTrue("end" in response) \ No newline at end of file From a6477d5933a11d5e93a3d6fad284db511e0a1ea0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 09:19:55 +0000 Subject: [PATCH 030/349] Remove chdir --- synapse/app/homeserver.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index f881a8d262..a83d3f394f 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -719,7 +719,6 @@ def run(hs): auto_close_fds=False, verbose=True, logger=logger, - chdir=os.path.dirname(os.path.abspath(__file__)), ) daemon.start() From c5e7c0e436d9073c07887676817bb5a45314aea5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 09:58:45 +0000 Subject: [PATCH 031/349] Up get_rooms_for_user cache size --- synapse/storage/roommember.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index edfecced05..1d3e004c90 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -241,7 +241,7 @@ class RoomMemberStore(SQLBaseStore): return rows - @cached() + @cached(max_entries=5000) def get_rooms_for_user(self, user_id): return self.get_rooms_for_user_where_membership_is( user_id, membership_list=[Membership.JOIN], From ba8931829b0b601eb14049c92e0f21a10772576d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 11:34:17 +0000 Subject: [PATCH 032/349] Return correct type of token --- synapse/storage/stream.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 0b22251790..28721e6994 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -236,7 +236,7 @@ class StreamStore(SQLBaseStore): ret.reverse() - self._set_before_and_after(ret, rows) + self._set_before_and_after(ret, rows, topo_order=False) if rows: key = "s%d" % min(r["stream_ordering"] for r in rows) @@ -581,10 +581,13 @@ class StreamStore(SQLBaseStore): return rows[0][0] if rows else 0 @staticmethod - def _set_before_and_after(events, rows): + def _set_before_and_after(events, rows, topo_order=True): for event, row in zip(events, rows): stream = row["stream_ordering"] - topo = event.depth + if topo_order: + topo = event.depth + else: + topo = None internal = event.internal_metadata internal.before = str(RoomStreamToken(topo, stream - 1)) internal.after = str(RoomStreamToken(topo, stream)) From 4e7948b47a3f197682de82fc0cda07ebb08a581d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 11:52:34 +0000 Subject: [PATCH 033/349] Allow paginating backwards from stream token --- synapse/handlers/message.py | 15 +++++++++------ synapse/storage/stream.py | 16 ++++++++++++++-- tests/rest/client/v1/test_rooms.py | 9 +-------- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b73ad62147..82c8cb5f0c 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import SynapseError, AuthError, Codes +from synapse.api.errors import AuthError, Codes from synapse.streams.config import PaginationConfig from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator @@ -119,9 +119,12 @@ class MessageHandler(BaseHandler): if source_config.direction == 'b': # if we're going backwards, we might need to backfill. This # requires that we have a topo token. - if room_token.topological is None: - raise SynapseError(400, "Invalid token: cannot paginate " - "backwards from a stream token") + if room_token.topological: + max_topo = room_token.topological + else: + max_topo = yield self.store.get_max_topological_token_for_stream_and_room( + room_id, room_token.stream + ) if membership == Membership.LEAVE: # If they have left the room then clamp the token to be before @@ -131,11 +134,11 @@ class MessageHandler(BaseHandler): member_event_id ) leave_token = RoomStreamToken.parse(leave_token) - if leave_token.topological < room_token.topological: + if leave_token.topological < max_topo: source_config.from_key = str(leave_token) yield self.hs.get_handlers().federation_handler.maybe_backfill( - room_id, room_token.topological + room_id, max_topo ) events, next_key = yield data_source.get_pagination_rows( diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 28721e6994..5096b46864 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -234,10 +234,10 @@ class StreamStore(SQLBaseStore): get_prev_content=True ) - ret.reverse() - self._set_before_and_after(ret, rows, topo_order=False) + ret.reverse() + if rows: key = "s%d" % min(r["stream_ordering"] for r in rows) else: @@ -570,6 +570,18 @@ class StreamStore(SQLBaseStore): row["topological_ordering"], row["stream_ordering"],) ) + def get_max_topological_token_for_stream_and_room(self, room_id, stream_key): + sql = ( + "SELECT max(topological_ordering) FROM events" + " WHERE room_id = ? AND stream_ordering < ?" + ) + return self._execute( + "get_max_topological_token_for_stream_and_room", None, + sql, room_id, stream_key, + ).addCallback( + lambda r: r[0][0] if r else 0 + ) + def _get_max_topological_txn(self, txn): txn.execute( "SELECT MAX(topological_ordering) FROM events" diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 2fe6f695f5..ad5dd3bd6e 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1044,13 +1044,6 @@ class RoomMessageListTestCase(RestTestCase): self.assertTrue("chunk" in response) self.assertTrue("end" in response) - @defer.inlineCallbacks - def test_stream_token_is_rejected_for_back_pagination(self): - (code, response) = yield self.mock_resource.trigger_get( - "/rooms/%s/messages?access_token=x&from=s0_0_0_0_0&dir=b" % - self.room_id) - self.assertEquals(400, code) - @defer.inlineCallbacks def test_stream_token_is_accepted_for_fwd_pagianation(self): token = "s0_0_0_0_0" @@ -1061,4 +1054,4 @@ class RoomMessageListTestCase(RestTestCase): self.assertTrue("start" in response) self.assertEquals(token, response['start']) self.assertTrue("chunk" in response) - self.assertTrue("end" in response) \ No newline at end of file + self.assertTrue("end" in response) From 3c6518ddbfb6f74eb5c0d1d0e4e8159e4ae45b04 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 14:03:48 +0000 Subject: [PATCH 034/349] Amalgamate incremental and full sync for user --- synapse/handlers/sync.py | 56 ++++++++++++---------------------------- 1 file changed, 16 insertions(+), 40 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f5e20d6a6e..2bd83e4b5b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -298,46 +298,19 @@ class SyncHandler(BaseHandler): room_id, sync_config, now_token, since_token=timeline_since_token ) - notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config, ephemeral_by_room + room_sync = yield self.incremental_sync_with_gap_for_room( + room_id, sync_config, + now_token=now_token, + since_token=timeline_since_token, + ephemeral_by_room=ephemeral_by_room, + tags_by_room=tags_by_room, + account_data_by_room=account_data_by_room, + all_ephemeral_by_room=ephemeral_by_room, + batch=batch, + full_state=True, ) - unread_notifications = {} - if notifs is not None: - unread_notifications["notification_count"] = len(notifs) - unread_notifications["highlight_count"] = len([ - 1 for notif in notifs if _action_has_highlight(notif["actions"]) - ]) - - current_state = yield self.get_state_at(room_id, now_token) - - current_state = { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - current_state.values() - ) - } - - account_data = self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ) - - account_data = sync_config.filter_collection.filter_room_account_data( - account_data - ) - - ephemeral = sync_config.filter_collection.filter_room_ephemeral( - ephemeral_by_room.get(room_id, []) - ) - - defer.returnValue(JoinedSyncResult( - room_id=room_id, - timeline=batch, - state=current_state, - ephemeral=ephemeral, - account_data=account_data, - unread_notifications=unread_notifications, - )) + defer.returnValue(room_sync) def account_data_for_user(self, account_data): account_data_events = [] @@ -635,7 +608,7 @@ class SyncHandler(BaseHandler): """ filtering_factor = 2 timeline_limit = sync_config.filter_collection.timeline_limit() - load_limit = max(timeline_limit * filtering_factor, 100) + load_limit = timeline_limit * filtering_factor max_repeat = 3 # Only try a few times per room, otherwise room_key = now_token.room_key end_key = room_key @@ -699,7 +672,10 @@ class SyncHandler(BaseHandler): account_data_by_room, all_ephemeral_by_room, batch, full_state=False): - if batch.limited: + if full_state: + state = yield self.get_state_at(room_id, now_token) + + elif batch.limited: current_state = yield self.get_state_at(room_id, now_token) state_at_previous_sync = yield self.get_state_at( From 571a5663993fe68e85da76f9df47c2f215a84403 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 14:11:16 +0000 Subject: [PATCH 035/349] Change load limit params --- synapse/handlers/sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 2bd83e4b5b..51ec4702db 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -608,8 +608,8 @@ class SyncHandler(BaseHandler): """ filtering_factor = 2 timeline_limit = sync_config.filter_collection.timeline_limit() - load_limit = timeline_limit * filtering_factor - max_repeat = 3 # Only try a few times per room, otherwise + load_limit = max(timeline_limit * filtering_factor, 10) + max_repeat = 5 # Only try a few times per room, otherwise room_key = now_token.room_key end_key = room_key From 9101193242c59a9b56f550be05509c76e87b3bca Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Thu, 28 Jan 2016 14:18:45 +0000 Subject: [PATCH 036/349] Quot all the things --- jenkins.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jenkins.sh b/jenkins.sh index e2bb706c7f..804bbd49f1 100755 --- a/jenkins.sh +++ b/jenkins.sh @@ -52,7 +52,7 @@ RUN_POSTGRES="" for port in $(($PORT_BASE + 1)) $(($PORT_BASE + 2)); do if psql synapse_jenkins_$port <<< ""; then - RUN_POSTGRES=$RUN_POSTGRES:$port + RUN_POSTGRES="$RUN_POSTGRES:$port" cat > localhost-$port/database.yaml << EOF name: psycopg2 args: @@ -62,7 +62,7 @@ EOF done # Run if both postgresql databases exist -if test $RUN_POSTGRES = ":$(($PORT_BASE + 1)):$(($PORT_BASE + 2))"; then +if test "$RUN_POSTGRES" = ":$(($PORT_BASE + 1)):$(($PORT_BASE + 2))"; then echo >&2 "Running sytest with PostgreSQL"; $TOX_BIN/pip install psycopg2 ./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \ From 7ed2bbeb11d99ef97672497879e480f91db9b99b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 14:32:05 +0000 Subject: [PATCH 037/349] Clean up a bit. Add comment --- synapse/app/homeserver.py | 13 +++++++------ synapse/server.py | 4 ++-- synapse/storage/__init__.py | 27 +++++++++------------------ 3 files changed, 18 insertions(+), 26 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 504557b2fc..65562222cf 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -255,12 +255,13 @@ class SynapseHomeServer(HomeServer): quit_with_error(e.message) def get_db_conn(self): - db_conn = self.database_engine.module.connect( - **{ - k: v for k, v in self.db_config.get("args", {}).items() - if not k.startswith("cp_") - } - ) + # Any param beginning with cp_ is a parameter for adbapi, and should + # not be passed to the database engine. + db_params = { + k: v for k, v in self.db_config.get("args", {}).items() + if not k.startswith("cp_") + } + db_conn = self.database_engine.module.connect(**db_params) self.database_engine.on_new_connection(db_conn) return db_conn diff --git a/synapse/server.py b/synapse/server.py index e013a349c9..5fee7fe130 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -28,7 +28,7 @@ from synapse.notifier import Notifier from synapse.api.auth import Auth from synapse.handlers import Handlers from synapse.state import StateHandler -from synapse.storage import get_datastore +from synapse.storage import DataStore from synapse.util import Clock from synapse.util.distributor import Distributor from synapse.streams.events import EventSources @@ -117,7 +117,7 @@ class HomeServer(object): def setup(self): logger.info("Setting up.") - self.datastore = get_datastore(self) + self.datastore = DataStore(self.get_db_conn(), self) logger.info("Finished setting up.") def get_ip_from_request(self, request): diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index c8cab45f77..eb88842308 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -61,22 +61,6 @@ logger = logging.getLogger(__name__) LAST_SEEN_GRANULARITY = 120*1000 -def get_datastore(hs): - logger.info("getting called!") - - conn = hs.get_db_conn() - try: - cur = conn.cursor() - cur.execute("SELECT MIN(stream_ordering) FROM events",) - rows = cur.fetchall() - min_token = rows[0][0] if rows and rows[0] and rows[0][0] else -1 - min_token = min(min_token, -1) - - return DataStore(conn, hs, min_token) - finally: - conn.close() - - class DataStore(RoomMemberStore, RoomStore, RegistrationStore, StreamStore, ProfileStore, PresenceStore, TransactionStore, @@ -98,10 +82,17 @@ class DataStore(RoomMemberStore, RoomStore, EventPushActionsStore ): - def __init__(self, db_conn, hs, min_stream_token): + def __init__(self, db_conn, hs): self.hs = hs - self.min_stream_token = min_stream_token + cur = db_conn.cursor() + try: + cur.execute("SELECT MIN(stream_ordering) FROM events",) + rows = cur.fetchall() + self.min_stream_token = rows[0][0] if rows and rows[0] and rows[0][0] else -1 + self.min_stream_token = min(self.min_stream_token, -1) + finally: + cur.close() self.client_ip_last_seen = Cache( name="client_ip_last_seen", From 0935802f1e811f56121e968547fb25141ac43daa Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Thu, 28 Jan 2016 14:47:03 +0000 Subject: [PATCH 038/349] Pin pynacl to 0.3.0 Something has gone wrong in the packaging of 1.* which causes it not to compile. --- synapse/python_dependencies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index d37dc11470..a87628ed85 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -22,7 +22,7 @@ REQUIREMENTS = { "unpaddedbase64>=1.0.1": ["unpaddedbase64>=1.0.1"], "canonicaljson>=1.0.0": ["canonicaljson>=1.0.0"], "signedjson>=1.0.0": ["signedjson>=1.0.0"], - "pynacl>=0.3.0": ["nacl>=0.3.0", "nacl.bindings"], + "pynacl==0.3.0": ["nacl==0.3.0", "nacl.bindings"], "service_identity>=1.0.0": ["service_identity>=1.0.0"], "Twisted>=15.1.0": ["twisted>=15.1.0"], "pyopenssl>=0.14": ["OpenSSL>=0.14"], From 49c328a8927a913c4905f0c136fc6ce90e361b6e Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Thu, 28 Jan 2016 14:57:40 +0000 Subject: [PATCH 039/349] Prune on fetching So we don't try to checkout a stale ref --- jenkins.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jenkins.sh b/jenkins.sh index 804bbd49f1..ed3bdf80fa 100755 --- a/jenkins.sh +++ b/jenkins.sh @@ -26,7 +26,7 @@ TOX_BIN=$WORKSPACE/.tox/py27/bin if [[ ! -e .sytest-base ]]; then git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror else - (cd .sytest-base; git fetch) + (cd .sytest-base; git fetch -p) fi rm -rf sytest From e1941442d442fe62570551071edfd936304697e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 15:02:37 +0000 Subject: [PATCH 040/349] Invalidate caches properly. Remove unused arg --- synapse/storage/events.py | 9 ++++++--- synapse/storage/receipts.py | 10 ++++++---- synapse/storage/stream.py | 2 +- synapse/util/caches/room_change_cache.py | 4 ++-- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 80187722ea..2d2270b297 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,9 +128,6 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) - self._events_stream_cache.room_has_changed( - None, event.room_id, stream_ordering - ) except _RollbackButIsFineException: pass @@ -213,6 +210,12 @@ class EventsStore(SQLBaseStore): for event, _ in events_and_contexts: txn.call_after(self._invalidate_get_event_cache, event.event_id) + if not backfilled: + txn.call_after( + self._events_stream_cache.room_has_changed, + event.room_id, event.internal_metadata.stream_ordering, + ) + depth_updates = {} for event, _ in events_and_contexts: if event.internal_metadata.is_outlier(): diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 7118368d97..5ffbfdec51 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -78,7 +78,7 @@ class ReceiptsStore(SQLBaseStore): if from_key: room_ids = yield self._receipts_stream_cache.get_rooms_changed( - self, room_ids, from_key + room_ids, from_key ) results = yield self._get_linearized_receipts_for_rooms( @@ -221,6 +221,11 @@ class ReceiptsStore(SQLBaseStore): # FIXME: This shouldn't invalidate the whole cache txn.call_after(self.get_linearized_receipts_for_room.invalidate_all) + txn.call_after( + self._receipts_stream_cache.room_has_changed, + room_id, stream_id + ) + # We don't want to clobber receipts for more recent events, so we # have to compare orderings of existing receipts sql = ( @@ -308,9 +313,6 @@ class ReceiptsStore(SQLBaseStore): stream_id_manager = yield self._receipts_id_gen.get_next(self) with stream_id_manager as stream_id: - yield self._receipts_stream_cache.room_has_changed( - self, room_id, stream_id - ) have_persisted = yield self.runInteraction( "insert_linearized_receipt", self.insert_linearized_receipt_txn, diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 5096b46864..67e7e6a76f 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -169,7 +169,7 @@ class StreamStore(SQLBaseStore): from_id = RoomStreamToken.parse_stream_token(from_key).stream room_ids = yield self._events_stream_cache.get_rooms_changed( - self, room_ids, from_id + room_ids, from_id ) if not room_ids: diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py index 3a873c9c30..eb2ab5f1e4 100644 --- a/synapse/util/caches/room_change_cache.py +++ b/synapse/util/caches/room_change_cache.py @@ -51,7 +51,7 @@ class RoomStreamChangeCache(object): return False - def get_rooms_changed(self, store, room_ids, key): + def get_rooms_changed(self, room_ids, key): """Returns subset of room ids that have had new things since the given key. If the key is too old it will just return the given list. """ @@ -70,7 +70,7 @@ class RoomStreamChangeCache(object): return result - def room_has_changed(self, store, room_id, key): + def room_has_changed(self, room_id, key): """Informs the cache that the room has been changed at the given key. """ if key > self._earliest_known_key: From c23a8c783382a0789c757e16e104cf08654e6cf8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 15:55:26 +0000 Subject: [PATCH 041/349] Ensure keys to RoomStreamChangeCache are ints --- synapse/storage/stream.py | 11 ++++++----- synapse/util/caches/room_change_cache.py | 6 ++++++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 67e7e6a76f..6a724193e1 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -199,12 +199,13 @@ class StreamStore(SQLBaseStore): if from_key == to_key: defer.returnValue(([], from_key)) - has_changed = yield self._events_stream_cache.get_room_has_changed( - room_id, from_id - ) + if from_id: + has_changed = yield self._events_stream_cache.get_room_has_changed( + room_id, from_id + ) - if not has_changed: - defer.returnValue(([], from_key)) + if not has_changed: + defer.returnValue(([], from_key)) def f(txn): if from_id is not None: diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py index eb2ab5f1e4..e8bfedd72f 100644 --- a/synapse/util/caches/room_change_cache.py +++ b/synapse/util/caches/room_change_cache.py @@ -39,6 +39,8 @@ class RoomStreamChangeCache(object): caches_by_name[self.name] = self._cache def get_room_has_changed(self, room_id, key): + assert type(key) is int + if key <= self._earliest_known_key: return True @@ -55,6 +57,8 @@ class RoomStreamChangeCache(object): """Returns subset of room ids that have had new things since the given key. If the key is too old it will just return the given list. """ + assert type(key) is int + if key > self._earliest_known_key: keys = self._cache.keys() i = keys.bisect_right(key) @@ -73,6 +77,8 @@ class RoomStreamChangeCache(object): def room_has_changed(self, room_id, key): """Informs the cache that the room has been changed at the given key. """ + assert type(key) is int + if key > self._earliest_known_key: old_key = self._room_to_key.get(room_id, None) if old_key: From 00cb3eb24b277bb37bd1b7d8449c08a37cb4b014 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 16:37:41 +0000 Subject: [PATCH 042/349] Cache tags and account data --- synapse/storage/account_data.py | 20 ++++- synapse/storage/events.py | 2 +- synapse/storage/receipts.py | 8 +- synapse/storage/stream.py | 8 +- synapse/storage/tags.py | 14 ++++ synapse/util/caches/room_change_cache.py | 92 --------------------- synapse/util/caches/stream_change_cache.py | 95 ++++++++++++++++++++++ 7 files changed, 137 insertions(+), 102 deletions(-) delete mode 100644 synapse/util/caches/room_change_cache.py create mode 100644 synapse/util/caches/stream_change_cache.py diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 9c6597e012..95294c3f6c 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -14,6 +14,7 @@ # limitations under the License. from ._base import SQLBaseStore +from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -23,6 +24,13 @@ logger = logging.getLogger(__name__) class AccountDataStore(SQLBaseStore): + def __init__(self, hs): + super(AccountDataStore, self).__init__(hs) + + self._account_data_stream_cache = StreamChangeCache( + "AccountDataChangeCache", self._account_data_id_gen.get_max_token(None), + max_size=1000, + ) def get_account_data_for_user(self, user_id): """Get all the client account_data for a user. @@ -83,7 +91,7 @@ class AccountDataStore(SQLBaseStore): "get_account_data_for_room", get_account_data_for_room_txn ) - def get_updated_account_data_for_user(self, user_id, stream_id): + def get_updated_account_data_for_user(self, user_id, stream_id, room_ids=None): """Get all the client account_data for a that's changed. Args: @@ -120,6 +128,12 @@ class AccountDataStore(SQLBaseStore): return (global_account_data, account_data_by_room) + changed = self._account_data_stream_cache.get_entity_has_changed( + user_id, int(stream_id) + ) + if not changed: + defer.returnValue(({}, {})) + return self.runInteraction( "get_updated_account_data_for_user", get_updated_account_data_for_user_txn ) @@ -186,6 +200,10 @@ class AccountDataStore(SQLBaseStore): "content": content_json, } ) + txn.call_after( + self._account_data_stream_cache.entity_has_changed, + user_id, next_id, + ) self._update_max_stream_id(txn, next_id) with (yield self._account_data_id_gen.get_next(self)) as next_id: diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 2d2270b297..5e85552029 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -212,7 +212,7 @@ class EventsStore(SQLBaseStore): if not backfilled: txn.call_after( - self._events_stream_cache.room_has_changed, + self._events_stream_cache.entity_has_changed, event.room_id, event.internal_metadata.stream_ordering, ) diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 5ffbfdec51..8068c73740 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -15,7 +15,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached -from synapse.util.caches.room_change_cache import RoomStreamChangeCache +from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer @@ -30,7 +30,7 @@ class ReceiptsStore(SQLBaseStore): def __init__(self, hs): super(ReceiptsStore, self).__init__(hs) - self._receipts_stream_cache = RoomStreamChangeCache( + self._receipts_stream_cache = StreamChangeCache( "ReceiptsRoomChangeCache", self._receipts_id_gen.get_max_token(None) ) @@ -77,7 +77,7 @@ class ReceiptsStore(SQLBaseStore): room_ids = set(room_ids) if from_key: - room_ids = yield self._receipts_stream_cache.get_rooms_changed( + room_ids = yield self._receipts_stream_cache.get_entities_changed( room_ids, from_key ) @@ -222,7 +222,7 @@ class ReceiptsStore(SQLBaseStore): txn.call_after(self.get_linearized_receipts_for_room.invalidate_all) txn.call_after( - self._receipts_stream_cache.room_has_changed, + self._receipts_stream_cache.entity_has_changed, room_id, stream_id ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 6a724193e1..c7d7893328 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -37,7 +37,7 @@ from twisted.internet import defer from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks -from synapse.util.caches.room_change_cache import RoomStreamChangeCache +from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken from synapse.util.logutils import log_function @@ -81,7 +81,7 @@ class StreamStore(SQLBaseStore): def __init__(self, hs): super(StreamStore, self).__init__(hs) - self._events_stream_cache = RoomStreamChangeCache( + self._events_stream_cache = StreamChangeCache( "EventsRoomStreamChangeCache", self._stream_id_gen.get_max_token(None) ) @@ -168,7 +168,7 @@ class StreamStore(SQLBaseStore): def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0): from_id = RoomStreamToken.parse_stream_token(from_key).stream - room_ids = yield self._events_stream_cache.get_rooms_changed( + room_ids = yield self._events_stream_cache.get_entities_changed( room_ids, from_id ) @@ -200,7 +200,7 @@ class StreamStore(SQLBaseStore): defer.returnValue(([], from_key)) if from_id: - has_changed = yield self._events_stream_cache.get_room_has_changed( + has_changed = yield self._events_stream_cache.get_entity_has_changed( room_id, from_id ) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 4c39e07cbd..50af899192 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -15,6 +15,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached +from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -24,6 +25,13 @@ logger = logging.getLogger(__name__) class TagsStore(SQLBaseStore): + def __init__(self, hs): + super(TagsStore, self).__init__(hs) + + self._tags_stream_cache = StreamChangeCache( + "TagsChangeCache", self._account_data_id_gen.get_max_token(None), + max_size=1000, + ) def get_max_account_data_stream_id(self): """Get the current max stream id for the private user data stream @@ -80,6 +88,10 @@ class TagsStore(SQLBaseStore): room_ids = [row[0] for row in txn.fetchall()] return room_ids + changed = self._tags_stream_cache.get_entity_has_changed(user_id, int(stream_id)) + if not changed: + defer.returnValue({}) + room_ids = yield self.runInteraction( "get_updated_tags", get_updated_tags_txn ) @@ -177,6 +189,8 @@ class TagsStore(SQLBaseStore): next_id(int): The the revision to advance to. """ + txn.call_after(self._tags_stream_cache.entity_has_changed, user_id, next_id) + update_max_id_sql = ( "UPDATE account_data_max_stream_id" " SET stream_id = ?" diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py deleted file mode 100644 index e8bfedd72f..0000000000 --- a/synapse/util/caches/room_change_cache.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.util.caches import cache_counter, caches_by_name - - -from blist import sorteddict -import logging - - -logger = logging.getLogger(__name__) - - -class RoomStreamChangeCache(object): - """Keeps track of the stream_id of the latest change in rooms. - - Given a list of rooms and stream key, it will give a subset of rooms that - may have changed since that key. If the key is too old then the cache - will simply return all rooms. - """ - def __init__(self, name, current_key, size_of_cache=10000): - self._size_of_cache = size_of_cache - self._room_to_key = {} - self._cache = sorteddict() - self._earliest_known_key = current_key - self.name = name - caches_by_name[self.name] = self._cache - - def get_room_has_changed(self, room_id, key): - assert type(key) is int - - if key <= self._earliest_known_key: - return True - - room_key = self._room_to_key.get(room_id, None) - if room_key is None: - return True - - if key < room_key: - return True - - return False - - def get_rooms_changed(self, room_ids, key): - """Returns subset of room ids that have had new things since the - given key. If the key is too old it will just return the given list. - """ - assert type(key) is int - - if key > self._earliest_known_key: - keys = self._cache.keys() - i = keys.bisect_right(key) - - result = set( - self._cache[k] for k in keys[i:] - ).intersection(room_ids) - - cache_counter.inc_hits(self.name) - else: - result = room_ids - cache_counter.inc_misses(self.name) - - return result - - def room_has_changed(self, room_id, key): - """Informs the cache that the room has been changed at the given key. - """ - assert type(key) is int - - if key > self._earliest_known_key: - old_key = self._room_to_key.get(room_id, None) - if old_key: - key = max(key, old_key) - self._cache.pop(old_key, None) - self._cache[key] = room_id - - while len(self._cache) > self._size_of_cache: - k, r = self._cache.popitem() - self._earliest_key = max(k, self._earliest_key) - self._room_to_key.pop(r, None) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py new file mode 100644 index 0000000000..33b37f7f29 --- /dev/null +++ b/synapse/util/caches/stream_change_cache.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util.caches import cache_counter, caches_by_name + + +from blist import sorteddict +import logging + + +logger = logging.getLogger(__name__) + + +class StreamChangeCache(object): + """Keeps track of the stream positions of the latest change in a set of entities. + + Typically the entity will be a room or user id. + + Given a list of entities and a stream position, it will give a subset of + entities that may have changed since that position. If position key is too + old then the cache will simply return all given entities. + """ + def __init__(self, name, current_stream_pos, max_size=10000): + self._max_size = max_size + self._entity_to_key = {} + self._cache = sorteddict() + self._earliest_known_stream_pos = current_stream_pos + self.name = name + caches_by_name[self.name] = self._cache + + def get_entity_has_changed(self, entity, stream_pos): + assert type(stream_pos) is int + + if stream_pos <= self._earliest_known_stream_pos: + return True + + latest_entity_change_pos = self._entity_to_key.get(entity, None) + if latest_entity_change_pos is None: + return True + + if stream_pos < latest_entity_change_pos: + return True + + return False + + def get_entities_changed(self, entities, stream_pos): + """Returns subset of entities that have had new things since the + given position. If the position is too old it will just return the given list. + """ + assert type(stream_pos) is int + + if stream_pos > self._earliest_known_stream_pos: + keys = self._cache.keys() + i = keys.bisect_right(stream_pos) + + result = set( + self._cache[k] for k in keys[i:] + ).intersection(entities) + + cache_counter.inc_hits(self.name) + else: + result = entities + cache_counter.inc_misses(self.name) + + return result + + def entity_has_changed(self, entitiy, stream_pos): + """Informs the cache that the entitiy has been changed at the given + position. + """ + assert type(stream_pos) is int + + if stream_pos > self._earliest_known_stream_pos: + old_pos = self._entity_to_key.get(entitiy, None) + if old_pos: + stream_pos = max(stream_pos, old_pos) + self._cache.pop(old_pos, None) + self._cache[stream_pos] = entitiy + + while len(self._cache) > self._max_size: + k, r = self._cache.popitem() + self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos) + self._entity_to_key.pop(r, None) From 45cf827c8fe7163a51f1d0d7c9e2531da9b58c8d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 16:39:18 +0000 Subject: [PATCH 043/349] Change name and doc has_entity_changed --- synapse/storage/account_data.py | 2 +- synapse/storage/stream.py | 2 +- synapse/storage/tags.py | 2 +- synapse/util/caches/stream_change_cache.py | 4 +++- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 95294c3f6c..62e49e1c0e 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -128,7 +128,7 @@ class AccountDataStore(SQLBaseStore): return (global_account_data, account_data_by_room) - changed = self._account_data_stream_cache.get_entity_has_changed( + changed = self._account_data_stream_cache.has_entity_changed( user_id, int(stream_id) ) if not changed: diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index c7d7893328..6e81d46c60 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -200,7 +200,7 @@ class StreamStore(SQLBaseStore): defer.returnValue(([], from_key)) if from_id: - has_changed = yield self._events_stream_cache.get_entity_has_changed( + has_changed = yield self._events_stream_cache.has_entity_changed( room_id, from_id ) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 50af899192..75ce04092d 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -88,7 +88,7 @@ class TagsStore(SQLBaseStore): room_ids = [row[0] for row in txn.fetchall()] return room_ids - changed = self._tags_stream_cache.get_entity_has_changed(user_id, int(stream_id)) + changed = self._tags_stream_cache.has_entity_changed(user_id, int(stream_id)) if not changed: defer.returnValue({}) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 33b37f7f29..3ca0e57780 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -40,7 +40,9 @@ class StreamChangeCache(object): self.name = name caches_by_name[self.name] = self._cache - def get_entity_has_changed(self, entity, stream_pos): + def has_entity_changed(self, entity, stream_pos): + """Returns True if the entity may have been updated since stream_pos + """ assert type(stream_pos) is int if stream_pos <= self._earliest_known_stream_pos: From fdca8ec4187e1b4ea93cbfe17ece6ca4cbadd519 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 16:41:59 +0000 Subject: [PATCH 044/349] Add events index --- .../schema/delta/28/events_room_stream.sql | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 synapse/storage/schema/delta/28/events_room_stream.sql diff --git a/synapse/storage/schema/delta/28/events_room_stream.sql b/synapse/storage/schema/delta/28/events_room_stream.sql new file mode 100644 index 0000000000..200c35e6e2 --- /dev/null +++ b/synapse/storage/schema/delta/28/events_room_stream.sql @@ -0,0 +1,16 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +CREATE INDEX events_room_stream on events(room_id, stream_ordering); From 8fe8951a8d997a652d16758a7abed2b8afd117e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:09:09 +0000 Subject: [PATCH 045/349] Cache filters --- synapse/storage/filtering.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index f8fc9bdddc..5248736816 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -16,12 +16,13 @@ from twisted.internet import defer from ._base import SQLBaseStore +from synapse.util.caches.descriptors import cachedInlineCallbacks import simplejson as json class FilteringStore(SQLBaseStore): - @defer.inlineCallbacks + @cachedInlineCallbacks(num_args=2) def get_user_filter(self, user_localpart, filter_id): def_json = yield self._simple_select_one_onecol( table="user_filters", From 35981c8b71a2ce675f3b8414ca0e7920a5d1658e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:19:51 +0000 Subject: [PATCH 046/349] Fix test --- synapse/api/filtering.py | 5 +++++ tests/api/test_filtering.py | 7 ++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 6c13ada5df..6eff83e5f8 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -15,6 +15,8 @@ from synapse.api.errors import SynapseError from synapse.types import UserID, RoomID +import ujson as json + class Filtering(object): @@ -149,6 +151,9 @@ class FilterCollection(object): "include_leave", False ) + def __repr__(self): + return "" % (json.dumps(self._filter_json),) + def get_filter_json(self): return self._filter_json diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 1a4e439d30..ceb0089268 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -382,19 +382,20 @@ class FilteringTestCase(unittest.TestCase): "types": ["m.*"] } } - user = UserID.from_string("@" + user_localpart + ":test") + filter_id = yield self.datastore.add_user_filter( - user_localpart=user_localpart, + user_localpart=user_localpart + "2", user_filter=user_filter_json, ) event = MockEvent( + event_id="$asdasd:localhost", sender="@foo:bar", type="custom.avatar.3d.crazy", ) events = [event] user_filter = yield self.filtering.get_user_filter( - user_localpart=user_localpart, + user_localpart=user_localpart + "2", filter_id=filter_id, ) From 0663c5bd52d52e095258e312fb62c2a9cb3f200a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:27:28 +0000 Subject: [PATCH 047/349] Include cache hits with has_entity_changed --- synapse/util/caches/stream_change_cache.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 3ca0e57780..b48f5cb273 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -46,15 +46,19 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos <= self._earliest_known_stream_pos: + cache_counter.inc_misses(self.name) return True latest_entity_change_pos = self._entity_to_key.get(entity, None) if latest_entity_change_pos is None: + cache_counter.inc_misses(self.name) return True if stream_pos < latest_entity_change_pos: + cache_counter.inc_misses(self.name) return True + cache_counter.inc_hits(self.name) return False def get_entities_changed(self, entities, stream_pos): From 03b2c2577cbf51c80a42319689e1cb4903b8c4af Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:29:24 +0000 Subject: [PATCH 048/349] Don't use defer.returnValue --- synapse/storage/account_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 62e49e1c0e..88404059e8 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -132,7 +132,7 @@ class AccountDataStore(SQLBaseStore): user_id, int(stream_id) ) if not changed: - defer.returnValue(({}, {})) + return ({}, {}) return self.runInteraction( "get_updated_account_data_for_user", get_updated_account_data_for_user_txn From 82cf3a8043a6ffe1f57d78bf88eba06ad0c53fbe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:44:04 +0000 Subject: [PATCH 049/349] Fix inequalities --- synapse/util/caches/stream_change_cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index b48f5cb273..483e0cdf96 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -45,7 +45,7 @@ class StreamChangeCache(object): """ assert type(stream_pos) is int - if stream_pos <= self._earliest_known_stream_pos: + if stream_pos < self._earliest_known_stream_pos: cache_counter.inc_misses(self.name) return True @@ -67,7 +67,7 @@ class StreamChangeCache(object): """ assert type(stream_pos) is int - if stream_pos > self._earliest_known_stream_pos: + if stream_pos >= self._earliest_known_stream_pos: keys = self._cache.keys() i = keys.bisect_right(stream_pos) From 40431251cba7ef1b623559db972600ece40818a4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 18:05:43 +0000 Subject: [PATCH 050/349] Correctly update _entity_to_key --- synapse/util/caches/stream_change_cache.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 483e0cdf96..22a9f8f467 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -82,18 +82,19 @@ class StreamChangeCache(object): return result - def entity_has_changed(self, entitiy, stream_pos): - """Informs the cache that the entitiy has been changed at the given + def entity_has_changed(self, entity, stream_pos): + """Informs the cache that the entity has been changed at the given position. """ assert type(stream_pos) is int if stream_pos > self._earliest_known_stream_pos: - old_pos = self._entity_to_key.get(entitiy, None) + old_pos = self._entity_to_key.get(entity, None) if old_pos: stream_pos = max(stream_pos, old_pos) self._cache.pop(old_pos, None) - self._cache[stream_pos] = entitiy + self._cache[stream_pos] = entity + self._entity_to_key[entity] = stream_pos while len(self._cache) > self._max_size: k, r = self._cache.popitem() From 3f5dd18bd44ae426d3b1ff062dd64acbad72f8ae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 18:11:41 +0000 Subject: [PATCH 051/349] If the same as the earliest key, assume nothing has changed. --- synapse/util/caches/stream_change_cache.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 22a9f8f467..c673b1bdfc 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -49,6 +49,11 @@ class StreamChangeCache(object): cache_counter.inc_misses(self.name) return True + if stream_pos == self._earliest_known_stream_pos: + # If the same as the earliest key, assume nothing has changed. + cache_counter.inc_hits(self.name) + return False + latest_entity_change_pos = self._entity_to_key.get(entity, None) if latest_entity_change_pos is None: cache_counter.inc_misses(self.name) From 467c27a1f90b873d6838ad1351399551cfa9cc07 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 18:20:00 +0000 Subject: [PATCH 052/349] Amalgamate tags and account data stream caches --- synapse/storage/account_data.py | 3 ++- synapse/storage/tags.py | 18 +++++++----------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 88404059e8..822c8bbe00 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -28,7 +28,8 @@ class AccountDataStore(SQLBaseStore): super(AccountDataStore, self).__init__(hs) self._account_data_stream_cache = StreamChangeCache( - "AccountDataChangeCache", self._account_data_id_gen.get_max_token(None), + "AccountDataAndTagsChangeCache", + self._account_data_id_gen.get_max_token(None), max_size=1000, ) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 75ce04092d..e1a9c0c261 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -15,7 +15,6 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached -from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -25,14 +24,6 @@ logger = logging.getLogger(__name__) class TagsStore(SQLBaseStore): - def __init__(self, hs): - super(TagsStore, self).__init__(hs) - - self._tags_stream_cache = StreamChangeCache( - "TagsChangeCache", self._account_data_id_gen.get_max_token(None), - max_size=1000, - ) - def get_max_account_data_stream_id(self): """Get the current max stream id for the private user data stream @@ -88,7 +79,9 @@ class TagsStore(SQLBaseStore): room_ids = [row[0] for row in txn.fetchall()] return room_ids - changed = self._tags_stream_cache.has_entity_changed(user_id, int(stream_id)) + changed = self._account_data_stream_cache.has_entity_changed( + user_id, int(stream_id) + ) if not changed: defer.returnValue({}) @@ -189,7 +182,10 @@ class TagsStore(SQLBaseStore): next_id(int): The the revision to advance to. """ - txn.call_after(self._tags_stream_cache.entity_has_changed, user_id, next_id) + txn.call_after( + self._account_data_stream_cache.entity_has_changed, + user_id, next_id + ) update_max_id_sql = ( "UPDATE account_data_max_stream_id" From 50e18938a96583b2dc8cbc69aa7182a35e3435af Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 10:00:45 +0000 Subject: [PATCH 053/349] Reset size on clear --- synapse/util/caches/lrucache.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index e6a66dc041..5182cadb9e 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -37,6 +37,7 @@ class LruCache(object): """ def __init__(self, max_size, keylen=1, cache_type=dict): cache = cache_type() + self.cache = cache # Used for introspection. self.size = 0 list_root = [] list_root[:] = [list_root, list_root, None, None] @@ -142,6 +143,7 @@ class LruCache(object): list_root[NEXT] = list_root list_root[PREV] = list_root cache.clear() + self.size = 0 @synchronized def cache_len(): From 766526e1142e7ad0ffb43bd075b0ff2d6265e4cb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 10:11:21 +0000 Subject: [PATCH 054/349] Make TreeCache keep track of its own size. --- synapse/util/caches/lrucache.py | 9 +++------ synapse/util/caches/treecache.py | 7 +++++++ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 5182cadb9e..ca9ffdf1b4 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -38,7 +38,6 @@ class LruCache(object): def __init__(self, max_size, keylen=1, cache_type=dict): cache = cache_type() self.cache = cache # Used for introspection. - self.size = 0 list_root = [] list_root[:] = [list_root, list_root, None, None] @@ -61,7 +60,6 @@ class LruCache(object): prev_node[NEXT] = node next_node[PREV] = node cache[key] = node - self.size += 1 def move_node_to_front(node): prev_node = node[PREV] @@ -80,7 +78,6 @@ class LruCache(object): next_node = node[NEXT] prev_node[NEXT] = next_node next_node[PREV] = prev_node - self.size -= 1 @synchronized def cache_get(key, default=None): @@ -99,7 +96,7 @@ class LruCache(object): node[VALUE] = value else: add_node(key, value) - if self.size > max_size: + if len(cache) > max_size: todelete = list_root[PREV] delete_node(todelete) cache.pop(todelete[KEY], None) @@ -111,7 +108,7 @@ class LruCache(object): return node[VALUE] else: add_node(key, value) - if self.size > max_size: + if len(cache) > max_size: todelete = list_root[PREV] delete_node(todelete) cache.pop(todelete[KEY], None) @@ -147,7 +144,7 @@ class LruCache(object): @synchronized def cache_len(): - return self.size + return len(cache) @synchronized def cache_contains(key): diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 3b58860910..a29ea8144e 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -8,6 +8,7 @@ class TreeCache(object): Keys must be tuples. """ def __init__(self): + self.size = 0 self.root = {} def __setitem__(self, key, value): @@ -21,6 +22,7 @@ class TreeCache(object): for k in key[:-1]: node = node.setdefault(k, {}) node[key[-1]] = value + self.size += 1 def get(self, key, default=None): node = self.root @@ -31,6 +33,7 @@ class TreeCache(object): return node.get(key[-1], default) def clear(self): + self.size = 0 self.root = {} def pop(self, key, default=None): @@ -57,4 +60,8 @@ class TreeCache(object): break node_and_keys[i+1][0].pop(k) + self.size -= 1 return popped + + def __len__(self): + return self.size From a30364c1f99bdd7d5cb0fe82ebdfe52d996defef Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 10:44:46 +0000 Subject: [PATCH 055/349] Correctly bookkeep the size of TreeCache --- synapse/util/caches/treecache.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index a29ea8144e..3331ea9eba 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -21,7 +21,7 @@ class TreeCache(object): node = self.root for k in key[:-1]: node = node.setdefault(k, {}) - node[key[-1]] = value + node[key[-1]] = _Entry(value) self.size += 1 def get(self, key, default=None): @@ -30,7 +30,7 @@ class TreeCache(object): node = node.get(k, None) if node is None: return default - return node.get(key[-1], default) + return node.get(key[-1], _Entry(default)).value def clear(self): self.size = 0 @@ -60,8 +60,33 @@ class TreeCache(object): break node_and_keys[i+1][0].pop(k) - self.size -= 1 + popped, cnt = _strip_and_count_entires(popped) + self.size -= cnt return popped def __len__(self): return self.size + + +class _Entry(object): + __slots__ = ["value"] + + def __init__(self, value): + object.__setattr__(self, "value", value) + + +def _strip_and_count_entires(d): + """Takes an _Entry or dict with leaves of _Entry's, and either returns the + value or a dictionary with _Entry's replaced by their values. + + Also returns the count of _Entry's + """ + if isinstance(d, dict): + cnt = 0 + for key, value in d.items(): + v, n = _strip_and_count_entires(value) + d[key] = v + cnt += n + return d, cnt + else: + return d.value, 1 From c046630c330b5abc4403f15afa2ffb13b7b8aa08 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 11:17:54 +0000 Subject: [PATCH 056/349] Remove spurious self.size --- synapse/util/caches/lrucache.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index ca9ffdf1b4..f7423f2fab 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -140,7 +140,6 @@ class LruCache(object): list_root[NEXT] = list_root list_root[PREV] = list_root cache.clear() - self.size = 0 @synchronized def cache_len(): From fb7299800feb0b8bc0f8429fc5b840abea0609d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 11:29:14 +0000 Subject: [PATCH 057/349] Directly set self.value --- synapse/util/caches/treecache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 3331ea9eba..29d02f7e95 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -72,7 +72,7 @@ class _Entry(object): __slots__ = ["value"] def __init__(self, value): - object.__setattr__(self, "value", value) + self.value = value def _strip_and_count_entires(d): From 4fce59f2747d9c73784470e84396a09ae70bbda7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 11:33:11 +0000 Subject: [PATCH 058/349] Add tests --- tests/util/test_lrucache.py | 7 +++++++ tests/util/test_treecache.py | 12 ++++++++++++ 2 files changed, 19 insertions(+) diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index 2cd3d26454..bab366fb7f 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -19,6 +19,7 @@ from .. import unittest from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache + class LruCacheTestCase(unittest.TestCase): def test_get_set(self): @@ -72,3 +73,9 @@ class LruCacheTestCase(unittest.TestCase): self.assertEquals(cache.get(("vehicles", "car")), "vroom") self.assertEquals(cache.get(("vehicles", "train")), "chuff") # Man from del_multi say "Yes". + + def test_clear(self): + cache = LruCache(1) + cache["key"] = 1 + cache.clear() + self.assertEquals(len(cache), 0) diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py index 9946ceb3f1..1efbeb6b33 100644 --- a/tests/util/test_treecache.py +++ b/tests/util/test_treecache.py @@ -25,6 +25,7 @@ class TreeCacheTestCase(unittest.TestCase): cache[("b",)] = "B" self.assertEquals(cache.get(("a",)), "A") self.assertEquals(cache.get(("b",)), "B") + self.assertEquals(len(cache), 2) def test_pop_onelevel(self): cache = TreeCache() @@ -33,6 +34,7 @@ class TreeCacheTestCase(unittest.TestCase): self.assertEquals(cache.pop(("a",)), "A") self.assertEquals(cache.pop(("a",)), None) self.assertEquals(cache.get(("b",)), "B") + self.assertEquals(len(cache), 1) def test_get_set_twolevel(self): cache = TreeCache() @@ -42,6 +44,7 @@ class TreeCacheTestCase(unittest.TestCase): self.assertEquals(cache.get(("a", "a")), "AA") self.assertEquals(cache.get(("a", "b")), "AB") self.assertEquals(cache.get(("b", "a")), "BA") + self.assertEquals(len(cache), 3) def test_pop_twolevel(self): cache = TreeCache() @@ -53,6 +56,7 @@ class TreeCacheTestCase(unittest.TestCase): self.assertEquals(cache.get(("a", "b")), "AB") self.assertEquals(cache.pop(("b", "a")), "BA") self.assertEquals(cache.pop(("b", "a")), None) + self.assertEquals(len(cache), 1) def test_pop_mixedlevel(self): cache = TreeCache() @@ -64,3 +68,11 @@ class TreeCacheTestCase(unittest.TestCase): self.assertEquals(cache.get(("a", "a")), None) self.assertEquals(cache.get(("a", "b")), None) self.assertEquals(cache.get(("b", "a")), "BA") + self.assertEquals(len(cache), 1) + + def test_clear(self): + cache = TreeCache() + cache[("a",)] = "A" + cache[("b",)] = "B" + cache.clear() + self.assertEquals(len(cache), 0) From 5687a00e4ec24b78e41b0377bfe524d7f0b13a43 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 29 Jan 2016 13:26:15 +0000 Subject: [PATCH 059/349] Allow three_pid_creds as well as threePidCreds in /account/3pid --- synapse/rest/client/v2_alpha/account.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index d507172704..a614b79d45 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -116,9 +116,10 @@ class ThreepidRestServlet(RestServlet): body = parse_json_dict_from_request(request) - if 'threePidCreds' not in body: + threePidCreds = body.get('threePidCreds') + threePidCreds = body.get('three_pid_creds', threePidCreds) + if threePidCreds is None: raise SynapseError(400, "Missing param", Codes.MISSING_PARAM) - threePidCreds = body['threePidCreds'] requester = yield self.auth.get_user_by_req(request) user_id = requester.user.to_string() From ea320d34641f98c7aa477e2340ff265c1ac63419 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 13:34:48 +0000 Subject: [PATCH 060/349] Don't work out unread_notifs_for_room_id unless needed --- synapse/handlers/sync.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 51ec4702db..075566417f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -697,17 +697,6 @@ class SyncHandler(BaseHandler): if just_joined: state = yield self.get_state_at(room_id, now_token) - notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config, all_ephemeral_by_room - ) - - unread_notifications = {} - if notifs is not None: - unread_notifications["notification_count"] = len(notifs) - unread_notifications["highlight_count"] = len([ - 1 for notif in notifs if _action_has_highlight(notif["actions"]) - ]) - state = { (e.type, e.state_key): e for e in sync_config.filter_collection.filter_room_state(state.values()) @@ -725,6 +714,7 @@ class SyncHandler(BaseHandler): ephemeral_by_room.get(room_id, []) ) + unread_notifications = {} room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, @@ -734,6 +724,17 @@ class SyncHandler(BaseHandler): unread_notifications=unread_notifications, ) + if room_sync: + notifs = yield self.unread_notifs_for_room_id( + room_id, sync_config, all_ephemeral_by_room + ) + + if notifs is not None: + unread_notifications["notification_count"] = len(notifs) + unread_notifications["highlight_count"] = len([ + 1 for notif in notifs if _action_has_highlight(notif["actions"]) + ]) + logger.debug("Room sync: %r", room_sync) defer.returnValue(room_sync) From ebc5f00efed5c7f72601933f55032947077c50a0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 13:37:40 +0000 Subject: [PATCH 061/349] Bump AccountDataAndTagsChangeCache size --- synapse/storage/account_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 822c8bbe00..ed6587429b 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -30,7 +30,7 @@ class AccountDataStore(SQLBaseStore): self._account_data_stream_cache = StreamChangeCache( "AccountDataAndTagsChangeCache", self._account_data_id_gen.get_max_token(None), - max_size=1000, + max_size=10000, ) def get_account_data_for_user(self, user_id): From 96bb4bf38ab9fe204c956b3d3e88ccb19acc1728 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 13:56:22 +0000 Subject: [PATCH 062/349] Update CHANGES --- CHANGES.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index dd4863c3e6..b9c956fd03 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,4 +1,4 @@ -Changes in synapse v0.12.1-rc1 (2016-01-xx) +Changes in synapse v0.12.1-rc1 (2016-01-29) =========================================== Features: @@ -18,8 +18,8 @@ Changes: * Change to require unbanning before other membership changes (PR #501) * Change default push rules to notify for all messages (PR #486) * Change default push rules to not notify on membership changes (PR #514) -* Change default push rules to only notify messages in one to one rooms - (PR #529) +* Change default push rules in one to one rooms to only notify for events that + are messages (PR #529) * Change ``/sync`` to reject requests with a ``from`` query param (PR #512) * Change server manhole to use SSH rather than telnet (PR #473) * Change server to require AS users to be registered before use (PR #487) @@ -39,7 +39,7 @@ Bug fixes: * Fix bug where ``/events`` would always return immediately for guest users (PR #480) * Fix bug where ``/sync`` unexpectedly returned old left rooms (PR #481) -* Fix enabling and disabling push rules (P #498) +* Fix enabling and disabling push rules (PR #498) * Fix bug where ``/register`` returned 500 when given unicode username (PR #513) From 0fcafbece8258105fa4e81be4657ecc36359d258 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 29 Jan 2016 14:12:26 +0000 Subject: [PATCH 063/349] Add config option for setting the trusted id servers, disabling checking the ID server in integration tests --- synapse/config/registration.py | 7 +++++++ synapse/handlers/identity.py | 25 +++++++++++++++++-------- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index d3f4b9d543..76d2d2d640 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -34,6 +34,7 @@ class RegistrationConfig(Config): self.registration_shared_secret = config.get("registration_shared_secret") self.macaroon_secret_key = config.get("macaroon_secret_key") self.bcrypt_rounds = config.get("bcrypt_rounds", 12) + self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"] self.allow_guest_access = config.get("allow_guest_access", False) def default_config(self, **kwargs): @@ -60,6 +61,12 @@ class RegistrationConfig(Config): # participate in rooms hosted on this server which have been made # accessible to anonymous users. allow_guest_access: False + + # The list of identity servers trusted to verify third party + # identifiers by this server. + trusted_third_party_id_servers: + - matrix.org + - vector.im """ % locals() def add_arguments(self, parser): diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 819ec57c4f..77f133be8f 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -36,14 +36,15 @@ class IdentityHandler(BaseHandler): self.http_client = hs.get_simple_http_client() + self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers) + self.trust_any_id_server_just_for_testing_do_not_use = ( + hs.config.use_insecure_ssl_client_just_for_testing_do_not_use + ) + @defer.inlineCallbacks def threepid_from_creds(self, creds): yield run_on_reactor() - # XXX: make this configurable! - # trustedIdServers = ['matrix.org', 'localhost:8090'] - trustedIdServers = ['matrix.org', 'vector.im'] - if 'id_server' in creds: id_server = creds['id_server'] elif 'idServer' in creds: @@ -58,10 +59,18 @@ class IdentityHandler(BaseHandler): else: raise SynapseError(400, "No client_secret in creds") - if id_server not in trustedIdServers: - logger.warn('%s is not a trusted ID server: rejecting 3pid ' + - 'credentials', id_server) - defer.returnValue(None) + if id_server not in self.trusted_id_servers: + if self.trust_any_id_server_just_for_testing_do_not_use: + logger.warn( + "Trusting untrustworthy ID server %r even though it isn't" + " in the trusted id list for testing because" + " 'use_insecure_ssl_client_just_for_testing_do_not_use'" + " is set in the config" + ) + else: + logger.warn('%s is not a trusted ID server: rejecting 3pid ' + + 'credentials', id_server) + defer.returnValue(None) data = {} try: From 18579534ea67f2d98c189e2ddeccc4bfecb491eb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 14:37:59 +0000 Subject: [PATCH 064/349] Prefill stream change caches --- synapse/storage/__init__.py | 49 +++++++++++++++++++++- synapse/storage/account_data.py | 9 ---- synapse/storage/stream.py | 8 ---- synapse/util/caches/stream_change_cache.py | 5 ++- 4 files changed, 52 insertions(+), 19 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index eb88842308..95ae97d507 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -45,9 +45,10 @@ from .search import SearchStore from .tags import TagsStore from .account_data import AccountDataStore - from util.id_generators import IdGenerator, StreamIdGenerator +from synapse.util.caches.stream_change_cache import StreamChangeCache + import logging @@ -117,8 +118,54 @@ class DataStore(RoomMemberStore, RoomStore, self._push_rule_id_gen = IdGenerator("push_rules", "id", self) self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) + events_max = self._stream_id_gen.get_max_token(None) + event_cache_prefill = self._get_cache_dict( + db_conn, "events", + entity_column="room_id", + stream_column="stream_ordering", + max_value=events_max, + ) + self._events_stream_cache = StreamChangeCache( + "EventsRoomStreamChangeCache", events_max, + prefilled_cache=event_cache_prefill, + ) + + account_max = self._account_data_id_gen.get_max_token(None) + account_cache_prefill = self._get_cache_dict( + db_conn, "account_data", + entity_column="user_id", + stream_column="stream_id", + max_value=account_max, + ) + self._account_data_stream_cache = StreamChangeCache( + "AccountDataAndTagsChangeCache", account_max, + prefilled_cache=account_cache_prefill, + ) + super(DataStore, self).__init__(hs) + def _get_cache_dict(self, db_conn, table, entity_column, stream_column, max_value): + sql = ( + "SELECT %(entity)s, MAX(%(stream)s) FROM %(table)s" + " WHERE %(stream)s > max(? - 100000, 0)" + " GROUP BY %(entity)s" + " ORDER BY MAX(%(stream)s) DESC" + " LIMIT 10000" + ) % { + "table": table, + "entity": entity_column, + "stream": stream_column, + } + + txn = db_conn.cursor() + txn.execute(sql, (int(max_value),)) + rows = txn.fetchall() + + return { + row[0]: row[1] + for row in rows + } + @defer.inlineCallbacks def insert_client_ip(self, user, access_token, ip, user_agent): now = int(self._clock.time_msec()) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index ed6587429b..625d062eb1 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -14,7 +14,6 @@ # limitations under the License. from ._base import SQLBaseStore -from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -24,14 +23,6 @@ logger = logging.getLogger(__name__) class AccountDataStore(SQLBaseStore): - def __init__(self, hs): - super(AccountDataStore, self).__init__(hs) - - self._account_data_stream_cache = StreamChangeCache( - "AccountDataAndTagsChangeCache", - self._account_data_id_gen.get_max_token(None), - max_size=10000, - ) def get_account_data_for_user(self, user_id): """Get all the client account_data for a user. diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 6e81d46c60..e245d2f914 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -37,7 +37,6 @@ from twisted.internet import defer from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks -from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken from synapse.util.logutils import log_function @@ -78,13 +77,6 @@ def upper_bound(token): class StreamStore(SQLBaseStore): - def __init__(self, hs): - super(StreamStore, self).__init__(hs) - - self._events_stream_cache = StreamChangeCache( - "EventsRoomStreamChangeCache", self._stream_id_gen.get_max_token(None) - ) - @defer.inlineCallbacks def get_appservice_room_stream(self, service, from_key, to_key, limit=0): # NB this lives here instead of appservice.py so we can reuse the diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index c673b1bdfc..891cb619fa 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -32,7 +32,7 @@ class StreamChangeCache(object): entities that may have changed since that position. If position key is too old then the cache will simply return all given entities. """ - def __init__(self, name, current_stream_pos, max_size=10000): + def __init__(self, name, current_stream_pos, max_size=10000, prefilled_cache={}): self._max_size = max_size self._entity_to_key = {} self._cache = sorteddict() @@ -40,6 +40,9 @@ class StreamChangeCache(object): self.name = name caches_by_name[self.name] = self._cache + for entity, stream_pos in prefilled_cache.items(): + self.entity_has_changed(entity, stream_pos) + def has_entity_changed(self, entity, stream_pos): """Returns True if the entity may have been updated since stream_pos """ From f67d60496a8a9b2c95fcacb6d4c539a1d4b6a105 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 14:41:16 +0000 Subject: [PATCH 065/349] Convert param style --- synapse/storage/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 95ae97d507..2ed505cb1e 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -85,6 +85,7 @@ class DataStore(RoomMemberStore, RoomStore, def __init__(self, db_conn, hs): self.hs = hs + self.database_engine = hs.database_engine cur = db_conn.cursor() try: @@ -157,6 +158,8 @@ class DataStore(RoomMemberStore, RoomStore, "stream": stream_column, } + sql = self.database_engine.convert_param_style(sql) + txn = db_conn.cursor() txn.execute(sql, (int(max_value),)) rows = txn.fetchall() From 45488e0ffae5100c3a82568642736aff203e1602 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 14:42:01 +0000 Subject: [PATCH 066/349] Max is not a function --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 2ed505cb1e..4d374a8b07 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -148,7 +148,7 @@ class DataStore(RoomMemberStore, RoomStore, def _get_cache_dict(self, db_conn, table, entity_column, stream_column, max_value): sql = ( "SELECT %(entity)s, MAX(%(stream)s) FROM %(table)s" - " WHERE %(stream)s > max(? - 100000, 0)" + " WHERE %(stream)s > ? - 100000" " GROUP BY %(entity)s" " ORDER BY MAX(%(stream)s) DESC" " LIMIT 10000" From 3d60686c0ceeb88c4f6269110e92dc0c7bf5a3b6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 14:49:11 +0000 Subject: [PATCH 067/349] Actually use cache --- synapse/storage/__init__.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 4d374a8b07..957fff3c23 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -120,26 +120,26 @@ class DataStore(RoomMemberStore, RoomStore, self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) events_max = self._stream_id_gen.get_max_token(None) - event_cache_prefill = self._get_cache_dict( + event_cache_prefill, min_event_val = self._get_cache_dict( db_conn, "events", entity_column="room_id", stream_column="stream_ordering", max_value=events_max, ) self._events_stream_cache = StreamChangeCache( - "EventsRoomStreamChangeCache", events_max, + "EventsRoomStreamChangeCache", min_event_val, prefilled_cache=event_cache_prefill, ) account_max = self._account_data_id_gen.get_max_token(None) - account_cache_prefill = self._get_cache_dict( + account_cache_prefill, min_acc_val = self._get_cache_dict( db_conn, "account_data", entity_column="user_id", stream_column="stream_id", max_value=account_max, ) self._account_data_stream_cache = StreamChangeCache( - "AccountDataAndTagsChangeCache", account_max, + "AccountDataAndTagsChangeCache", min_acc_val, prefilled_cache=account_cache_prefill, ) @@ -151,7 +151,6 @@ class DataStore(RoomMemberStore, RoomStore, " WHERE %(stream)s > ? - 100000" " GROUP BY %(entity)s" " ORDER BY MAX(%(stream)s) DESC" - " LIMIT 10000" ) % { "table": table, "entity": entity_column, @@ -164,11 +163,18 @@ class DataStore(RoomMemberStore, RoomStore, txn.execute(sql, (int(max_value),)) rows = txn.fetchall() - return { - row[0]: row[1] + cache = { + row[0]: int(row[1]) for row in rows } + if cache: + min_val = min(cache.values()) + else: + min_val = max_value + + return cache, min_val + @defer.inlineCallbacks def insert_client_ip(self, user, access_token, ip, user_agent): now = int(self._clock.time_msec()) From f2d5ff5bf2cb95eb0a6619ae7fb40603175c8a7d Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 29 Jan 2016 14:53:14 +0000 Subject: [PATCH 068/349] Fix the mock homserver used in the tests --- tests/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/utils.py b/tests/utils.py index 43cc2b30cd..431252a6f1 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -49,6 +49,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): config.disable_registration = False config.macaroon_secret_key = "not even a little secret" config.server_name = "server.under.test" + config.trusted_third_party_id_servers = [] if "clock" not in kargs: kargs["clock"] = MockClock() From b5dbced9389d072d4bd15002c7ddffba9e54340e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 14:53:59 +0000 Subject: [PATCH 069/349] Don't prefill account data --- synapse/storage/__init__.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 957fff3c23..a6cb588563 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -132,15 +132,8 @@ class DataStore(RoomMemberStore, RoomStore, ) account_max = self._account_data_id_gen.get_max_token(None) - account_cache_prefill, min_acc_val = self._get_cache_dict( - db_conn, "account_data", - entity_column="user_id", - stream_column="stream_id", - max_value=account_max, - ) self._account_data_stream_cache = StreamChangeCache( - "AccountDataAndTagsChangeCache", min_acc_val, - prefilled_cache=account_cache_prefill, + "AccountDataAndTagsChangeCache", account_max, ) super(DataStore, self).__init__(hs) From 6927d0e091b4b6124bd45a627c7f299f2544c5b4 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 29 Jan 2016 15:01:26 +0000 Subject: [PATCH 070/349] Add missing param to the log line --- synapse/handlers/identity.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 77f133be8f..656ce124f9 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -65,7 +65,8 @@ class IdentityHandler(BaseHandler): "Trusting untrustworthy ID server %r even though it isn't" " in the trusted id list for testing because" " 'use_insecure_ssl_client_just_for_testing_do_not_use'" - " is set in the config" + " is set in the config", + id_server, ) else: logger.warn('%s is not a trusted ID server: rejecting 3pid ' + From 4a6eb5eb45c8c77c02a38b9cd3448174a1cf018a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 15:21:38 +0000 Subject: [PATCH 071/349] Make /events always return a newer token, if one exists --- synapse/notifier.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/synapse/notifier.py b/synapse/notifier.py index 6eaa65071e..acd4019a91 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -22,6 +22,8 @@ from synapse.util.async import run_on_reactor, ObservableDeferred from synapse.types import StreamToken import synapse.metrics +from collections import namedtuple + import logging @@ -118,6 +120,11 @@ class _NotifierUserStream(object): return _NotificationListener(self.notify_deferred.observe()) +class EventStreamResult(namedtuple("EventStreamResult", ("events", "tokens"))): + def __nonzero__(self): + return bool(self.events) + + class Notifier(object): """ This class is responsible for notifying any listeners when there are new events available for it. @@ -356,7 +363,7 @@ class Notifier(object): @defer.inlineCallbacks def check_for_updates(before_token, after_token): if not after_token.is_after(before_token): - defer.returnValue(None) + defer.returnValue(EventStreamResult([], (before_token, before_token))) events = [] end_token = from_token @@ -372,7 +379,7 @@ class Notifier(object): new_events, new_key = yield source.get_new_events( user=user, from_key=getattr(from_token, keyname), - limit=limit, + limit=max(limit * 2, 10), is_guest=is_peeking, room_ids=room_ids, ) @@ -388,10 +395,7 @@ class Notifier(object): events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) - if events: - defer.returnValue((events, (from_token, end_token))) - else: - defer.returnValue(None) + defer.returnValue(EventStreamResult(events, (from_token, end_token))) user_id_for_stream = user.to_string() if is_peeking: @@ -415,9 +419,6 @@ class Notifier(object): from_token=from_token, ) - if result is None: - result = ([], (from_token, from_token)) - defer.returnValue(result) @defer.inlineCallbacks From 13724569eccbd2431f46868099afd44128af19d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 15:33:44 +0000 Subject: [PATCH 072/349] Deal with None limit --- synapse/notifier.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/notifier.py b/synapse/notifier.py index acd4019a91..32bd16661d 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -376,10 +376,14 @@ class Notifier(object): continue if only_keys and name not in only_keys: continue + if limit: + new_limit = max(limit * 2, 10) + else: + new_limit = 10 new_events, new_key = yield source.get_new_events( user=user, from_key=getattr(from_token, keyname), - limit=max(limit * 2, 10), + limit=new_limit, is_guest=is_peeking, room_ids=room_ids, ) From 8da95b6f1bb1a37597f0b89c4da88b064401b0b8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 15:39:17 +0000 Subject: [PATCH 073/349] Comment. Remove superfluous order by --- synapse/storage/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index a6cb588563..ee2153737d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -139,11 +139,13 @@ class DataStore(RoomMemberStore, RoomStore, super(DataStore, self).__init__(hs) def _get_cache_dict(self, db_conn, table, entity_column, stream_column, max_value): + # Fetch a mapping of room_id -> max stream position for "recent" rooms. + # It doesn't really matter how many we get, the StreamChangeCache will + # do the right thing to ensure it respects the max size of cache. sql = ( "SELECT %(entity)s, MAX(%(stream)s) FROM %(table)s" " WHERE %(stream)s > ? - 100000" " GROUP BY %(entity)s" - " ORDER BY MAX(%(stream)s) DESC" ) % { "table": table, "entity": entity_column, From e70165039cd718af0d4aca46df8857e805c39e28 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 16:24:34 +0000 Subject: [PATCH 074/349] If stream pos is greater then earliest known key and entity hasn't changed, then entity hasn't changed --- synapse/util/caches/stream_change_cache.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 891cb619fa..b37f1c0725 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -52,15 +52,10 @@ class StreamChangeCache(object): cache_counter.inc_misses(self.name) return True - if stream_pos == self._earliest_known_stream_pos: - # If the same as the earliest key, assume nothing has changed. - cache_counter.inc_hits(self.name) - return False - latest_entity_change_pos = self._entity_to_key.get(entity, None) if latest_entity_change_pos is None: - cache_counter.inc_misses(self.name) - return True + cache_counter.inc_hits(self.name) + return False if stream_pos < latest_entity_change_pos: cache_counter.inc_misses(self.name) @@ -98,7 +93,7 @@ class StreamChangeCache(object): if stream_pos > self._earliest_known_stream_pos: old_pos = self._entity_to_key.get(entity, None) - if old_pos: + if old_pos is not None: stream_pos = max(stream_pos, old_pos) self._cache.pop(old_pos, None) self._cache[stream_pos] = entity From cc9c97e0dc0cf399d5d6013f12746063091b619e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 16:41:51 +0000 Subject: [PATCH 075/349] Invalidate _account_data_stream_cache correctly --- synapse/storage/account_data.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 625d062eb1..b8387fc500 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -157,6 +157,10 @@ class AccountDataStore(SQLBaseStore): "content": content_json, } ) + txn.call_after( + self._account_data_stream_cache.entity_has_changed, + user_id, next_id, + ) self._update_max_stream_id(txn, next_id) with (yield self._account_data_id_gen.get_next(self)) as next_id: From 25c311eaf603cef8cbf9e6501aad83d53c304ebb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 16:52:48 +0000 Subject: [PATCH 076/349] Cache get_room_changes_for_user --- synapse/storage/__init__.py | 4 ++++ synapse/storage/roommember.py | 4 ++++ synapse/storage/stream.py | 7 +++++++ 3 files changed, 15 insertions(+) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index ee2153737d..c91c7a3729 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -131,6 +131,10 @@ class DataStore(RoomMemberStore, RoomStore, prefilled_cache=event_cache_prefill, ) + self._membership_stream_cache = StreamChangeCache( + "MembershipStreamChangeCache", events_max, + ) + account_max = self._account_data_id_gen.get_max_token(None) self._account_data_stream_cache = StreamChangeCache( "AccountDataAndTagsChangeCache", account_max, diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 1d3e004c90..3065b0c1a5 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -58,6 +58,10 @@ class RoomMemberStore(SQLBaseStore): txn.call_after(self.get_rooms_for_user.invalidate, (event.state_key,)) txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,)) txn.call_after(self.get_users_in_room.invalidate, (event.room_id,)) + txn.call_after( + self._membership_stream_cache.entity_has_changed, + event.state_key, event.internal_metadata.stream_ordering + ) def get_room_member(self, user_id, room_id): """Retrieve the current state of a room member. diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index e245d2f914..cc9e623608 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -252,6 +252,13 @@ class StreamStore(SQLBaseStore): if from_key == to_key: return defer.succeed([]) + if from_id: + has_changed = self._membership_stream_cache.has_entity_changed( + user_id, int(from_id) + ) + if not has_changed: + return defer.succeed([]) + def f(txn): if from_id is not None: sql = ( From d98a9f25839ede491201714dc7a9d661cd4ff8f3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sun, 31 Jan 2016 13:31:15 +0000 Subject: [PATCH 077/349] Don't use before_token. Its wrong. Use actual limit. --- synapse/notifier.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/synapse/notifier.py b/synapse/notifier.py index 32bd16661d..29965a9ab5 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -363,7 +363,7 @@ class Notifier(object): @defer.inlineCallbacks def check_for_updates(before_token, after_token): if not after_token.is_after(before_token): - defer.returnValue(EventStreamResult([], (before_token, before_token))) + defer.returnValue(EventStreamResult([], (from_token, from_token))) events = [] end_token = from_token @@ -376,14 +376,11 @@ class Notifier(object): continue if only_keys and name not in only_keys: continue - if limit: - new_limit = max(limit * 2, 10) - else: - new_limit = 10 + new_events, new_key = yield source.get_new_events( user=user, from_key=getattr(from_token, keyname), - limit=new_limit, + limit=limit, is_guest=is_peeking, room_ids=room_ids, ) From ceb6b8680a8e419c3c132dcdb675517c5e9f69fd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Feb 2016 10:33:52 +0000 Subject: [PATCH 078/349] Only use room_ids if in get_room_events_stream if is_guest --- synapse/storage/stream.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index e245d2f914..a60e662f7d 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -316,11 +316,6 @@ class StreamStore(SQLBaseStore): " WHERE m.user_id = ? AND m.membership = 'join'" ) current_room_membership_args = [user_id] - if room_ids: - current_room_membership_sql += " AND m.room_id in (%s)" % ( - ",".join(map(lambda _: "?", room_ids)) - ) - current_room_membership_args = [user_id] + room_ids # We also want to get any membership events about that user, e.g. # invites or leave notifications. From fa48020a52dfb924be4f191d54631b4d05b08ba0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Feb 2016 15:59:40 +0000 Subject: [PATCH 079/349] Move state calculations from rest to handler --- synapse/handlers/sync.py | 162 ++++++++++++++++----------- synapse/rest/client/v2_alpha/sync.py | 75 ------------- 2 files changed, 97 insertions(+), 140 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 075566417f..3109e30414 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -23,6 +23,7 @@ from twisted.internet import defer import collections import logging +import itertools logger = logging.getLogger(__name__) @@ -672,35 +673,10 @@ class SyncHandler(BaseHandler): account_data_by_room, all_ephemeral_by_room, batch, full_state=False): - if full_state: - state = yield self.get_state_at(room_id, now_token) - - elif batch.limited: - current_state = yield self.get_state_at(room_id, now_token) - - state_at_previous_sync = yield self.get_state_at( - room_id, stream_position=since_token - ) - - state = yield self.compute_state_delta( - since_token=since_token, - previous_state=state_at_previous_sync, - current_state=current_state, - ) - else: - state = { - (event.type, event.state_key): event - for event in batch.events if event.is_state() - } - - just_joined = yield self.check_joined_room(sync_config, state) - if just_joined: - state = yield self.get_state_at(room_id, now_token) - - state = { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state(state.values()) - } + state = yield self.compute_state_delta( + room_id, batch, sync_config, since_token, now_token, + full_state=full_state + ) account_data = self.account_data_for_room( room_id, tags_by_room, account_data_by_room @@ -766,30 +742,11 @@ class SyncHandler(BaseHandler): logger.debug("Recents %r", batch) - state_events_at_leave = yield self.store.get_state_for_event( - leave_event_id + state_events_delta = yield self.compute_state_delta( + room_id, batch, sync_config, since_token, leave_token, + full_state=full_state ) - if not full_state: - state_at_previous_sync = yield self.get_state_at( - room_id, stream_position=since_token - ) - - state_events_delta = yield self.compute_state_delta( - since_token=since_token, - previous_state=state_at_previous_sync, - current_state=state_events_at_leave, - ) - else: - state_events_delta = state_events_at_leave - - state_events_delta = { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - state_events_delta.values() - ) - } - account_data = self.account_data_for_room( room_id, tags_by_room, account_data_by_room ) @@ -843,15 +800,18 @@ class SyncHandler(BaseHandler): state = {} defer.returnValue(state) - def compute_state_delta(self, since_token, previous_state, current_state): - """ Works out the differnce in state between the current state and the - state the client got when it last performed a sync. + @defer.inlineCallbacks + def compute_state_delta(self, room_id, batch, sync_config, since_token, now_token, + full_state): + """ Works out the differnce in state between the start of the timeline + and the previous sync. - :param str since_token: the point we are comparing against - :param dict[(str,str), synapse.events.FrozenEvent] previous_state: the - state to compare to - :param dict[(str,str), synapse.events.FrozenEvent] current_state: the - new state + :param str room_id + :param TimelineBatch batch + :param sync_config + :param str since_token + :param str now_token + :param bool full_state :returns A new event dictionary """ @@ -860,12 +820,50 @@ class SyncHandler(BaseHandler): # updates even if they occured logically before the previous event. # TODO(mjark) Check for new redactions in the state events. - state_delta = {} - for key, event in current_state.iteritems(): - if (key not in previous_state or - previous_state[key].event_id != event.event_id): - state_delta[key] = event - return state_delta + if full_state: + if batch: + state = yield self.store.get_state_for_event(batch.events[0].event_id) + else: + state = yield self.get_state_at( + room_id, stream_position=now_token + ) + + timeline_state = { + (event.type, event.state_key): event + for event in batch.events if event.is_state() + } + + state = _calculate_state( + timeline_contains=timeline_state, + timeline_start=state, + previous={}, + ) + elif batch.limited: + state_at_previous_sync = yield self.get_state_at( + room_id, stream_position=since_token + ) + + state_at_timeline_start = yield self.store.get_state_for_event( + batch.events[0].event_id + ) + + timeline_state = { + (event.type, event.state_key): event + for event in batch.events if event.is_state() + } + + state = _calculate_state( + timeline_contains=timeline_state, + timeline_start=state_at_timeline_start, + previous=state_at_previous_sync, + ) + else: + state = {} + + defer.returnValue({ + (e.type, e.state_key): e + for e in sync_config.filter_collection.filter_room_state(state.values()) + }) def check_joined_room(self, sync_config, state_delta): """ @@ -912,3 +910,37 @@ def _action_has_highlight(actions): pass return False + + +def _calculate_state(timeline_contains, timeline_start, previous): + """Works out what state to include in a sync response. + + Args: + timeline_contains (dict): state in the timeline + timeline_start (dict): state at the start of the timeline + previous (dict): state at the end of the previous sync (or empty dict + if there is an initial sync) + + Returns: + dict + """ + event_id_to_state = { + e.event_id: e + for e in itertools.chain( + timeline_contains.values(), + previous.values(), + timeline_start.values(), + ) + } + + tc_ids = set(e.event_id for e in timeline_contains.values()) + p_ids = set(e.event_id for e in previous.values()) + ts_ids = set(e.event_id for e in timeline_start.values()) + + state_ids = (ts_ids - p_ids) - tc_ids + + evs = (event_id_to_state[e] for e in state_ids) + return { + (e.type, e.state_key): e + for e in evs + } diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 07b5b5dfd5..140ce2704b 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -20,7 +20,6 @@ from synapse.http.servlet import ( ) from synapse.handlers.sync import SyncConfig from synapse.types import StreamToken -from synapse.events import FrozenEvent from synapse.events.utils import ( serialize_event, format_event_for_client_v2_without_room_id, ) @@ -287,9 +286,6 @@ class SyncRestServlet(RestServlet): state_dict = room.state timeline_events = room.timeline.events - state_dict = SyncRestServlet._rollback_state_for_timeline( - state_dict, timeline_events) - state_events = state_dict.values() serialized_state = [serialize(e) for e in state_events] @@ -314,77 +310,6 @@ class SyncRestServlet(RestServlet): return result - @staticmethod - def _rollback_state_for_timeline(state, timeline): - """ - Wind the state dictionary backwards, so that it represents the - state at the start of the timeline, rather than at the end. - - :param dict[(str, str), synapse.events.EventBase] state: the - state dictionary. Will be updated to the state before the timeline. - :param list[synapse.events.EventBase] timeline: the event timeline - :return: updated state dictionary - """ - - result = state.copy() - - for timeline_event in reversed(timeline): - if not timeline_event.is_state(): - continue - - event_key = (timeline_event.type, timeline_event.state_key) - - logger.debug("Considering %s for removal", event_key) - - state_event = result.get(event_key) - if (state_event is None or - state_event.event_id != timeline_event.event_id): - # the event in the timeline isn't present in the state - # dictionary. - # - # the most likely cause for this is that there was a fork in - # the event graph, and the state is no longer valid. Really, - # the event shouldn't be in the timeline. We're going to ignore - # it for now, however. - logger.debug("Found state event %r in timeline which doesn't " - "match state dictionary", timeline_event) - continue - - prev_event_id = timeline_event.unsigned.get("replaces_state", None) - - prev_content = timeline_event.unsigned.get('prev_content') - prev_sender = timeline_event.unsigned.get('prev_sender') - # Empircally it seems possible for the event to have a - # "replaces_state" key but not a prev_content or prev_sender - # markjh conjectures that it could be due to the server not - # having a copy of that event. - # If this is the case the we ignore the previous event. This will - # cause the displayname calculations on the client to be incorrect - if prev_event_id is None or not prev_content or not prev_sender: - logger.debug( - "Removing %r from the state dict, as it is missing" - " prev_content (prev_event_id=%r)", - timeline_event.event_id, prev_event_id - ) - del result[event_key] - else: - logger.debug( - "Replacing %r with %r in state dict", - timeline_event.event_id, prev_event_id - ) - result[event_key] = FrozenEvent({ - "type": timeline_event.type, - "state_key": timeline_event.state_key, - "content": prev_content, - "sender": prev_sender, - "event_id": prev_event_id, - "room_id": timeline_event.room_id, - }) - - logger.debug("New value: %r", result.get(event_key)) - - return result - def register_servlets(hs, http_server): SyncRestServlet(hs).register(http_server) From 4bf448be254808c83aeb5ae28e601752664bc9e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Feb 2016 16:26:51 +0000 Subject: [PATCH 080/349] Switch over /events to use per room caches --- synapse/handlers/room.py | 25 ++++++++++++++++++++----- synapse/storage/stream.py | 4 ++-- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 58e2d25f97..aca795e1c4 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1008,15 +1008,30 @@ class RoomEventSource(object): limit=limit, ) else: - events, end_key = yield self.store.get_room_events_stream( - user_id=user.to_string(), + room_events = yield self.store.get_room_changes_for_user( + user.to_string(), from_key, to_key + ) + + room_to_events = yield self.store.get_room_events_stream_for_rooms( + room_ids=room_ids, from_key=from_key, to_key=to_key, - limit=limit, - room_ids=room_ids, - is_guest=is_guest, + limit=limit or 10, ) + events = list(room_events) + events.extend(e for evs, _ in room_to_events.values() for e in evs) + + events.sort(key=lambda e: e.internal_metadata.after) + + if limit: + events[:] = events[:limit] + + if events: + end_key = events[-1].internal_metadata.after + else: + end_key = to_key + defer.returnValue((events, end_key)) def get_current_key(self, direction='f'): diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 8dc8f5c640..fd84aa8996 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -39,7 +39,6 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken -from synapse.util.logutils import log_function import logging @@ -288,11 +287,12 @@ class StreamStore(SQLBaseStore): get_prev_content=True ) + self._set_before_and_after(ret, rows, topo_order=False) + return ret return self.runInteraction("get_room_changes_for_user", f) - @log_function def get_room_events_stream( self, user_id, From 89b40b225cda4326081f6735b2a8a9bff5ce3446 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Feb 2016 16:32:46 +0000 Subject: [PATCH 081/349] Order things correctly --- synapse/handlers/room.py | 2 +- synapse/storage/stream.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index aca795e1c4..a71cba8ef1 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1022,7 +1022,7 @@ class RoomEventSource(object): events = list(room_events) events.extend(e for evs, _ in room_to_events.values() for e in evs) - events.sort(key=lambda e: e.internal_metadata.after) + events.sort(key=lambda e: e.internal_metadata.order) if limit: events[:] = events[:limit] diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index fd84aa8996..a03458c2fc 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -598,6 +598,10 @@ class StreamStore(SQLBaseStore): internal = event.internal_metadata internal.before = str(RoomStreamToken(topo, stream - 1)) internal.after = str(RoomStreamToken(topo, stream)) + internal.order = ( + int(topo) if topo else 0, + int(stream), + ) @defer.inlineCallbacks def get_events_around(self, room_id, event_id, before_limit, after_limit): From d7ac861d3b34bbc6d0d0b66370903831ef6ee9bf Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 1 Feb 2016 16:33:19 +0000 Subject: [PATCH 082/349] Pull guest access token out of the auth session params, otherwise it will break if you open the email on a different device. --- synapse/rest/client/v2_alpha/register.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index c4d025b465..5d50dd9e3d 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -152,6 +152,7 @@ class RegisterRestServlet(RestServlet): desired_username = params.get("username", None) new_password = params.get("password", None) + guest_access_token = params.get("guest_access_token", None) (user_id, token) = yield self.registration_handler.register( localpart=desired_username, From 854ca32f10f6e1c8041160d4866470098c7f6fc1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Feb 2016 16:52:27 +0000 Subject: [PATCH 083/349] Comments --- synapse/handlers/sync.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 3109e30414..8d8d10da33 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -807,11 +807,12 @@ class SyncHandler(BaseHandler): and the previous sync. :param str room_id - :param TimelineBatch batch + :param TimelineBatch batch: The timeline batch for the room that will + be sent to the user. :param sync_config - :param str since_token - :param str now_token - :param bool full_state + :param str since_token: Token of the end of the previous batch. May be None. + :param str now_token: Token of the end of the current batch. + :param bool full_state: Whether to force returning the full state. :returns A new event dictionary """ @@ -919,7 +920,7 @@ def _calculate_state(timeline_contains, timeline_start, previous): timeline_contains (dict): state in the timeline timeline_start (dict): state at the start of the timeline previous (dict): state at the end of the previous sync (or empty dict - if there is an initial sync) + if this is an initial sync) Returns: dict From b023995538ae2ed13934638009463abcbae08f92 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Feb 2016 14:11:14 +0000 Subject: [PATCH 084/349] WARN if we get a topo token instead of stream. --- synapse/handlers/room.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a71cba8ef1..68e2c75a48 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -18,7 +18,7 @@ from twisted.internet import defer from ._base import BaseHandler -from synapse.types import UserID, RoomAlias, RoomID +from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken from synapse.api.constants import ( EventTypes, Membership, JoinRules, RoomCreationPreset, ) @@ -997,6 +997,11 @@ class RoomEventSource(object): to_key = yield self.get_current_key() + from_token = RoomStreamToken.parse(from_key) + if from_token.topological: + logger.warn("Stream has topological part!!!! %r", from_key) + from_key = "s%s" % (from_token.stream,) + app_service = yield self.store.get_app_service_by_user_id( user.to_string() ) From 69214ea671d3e219d53698cc608c8e4d414cc3f7 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 2 Feb 2016 14:42:31 +0000 Subject: [PATCH 085/349] Pass make_guest whne we autogen a user ID --- synapse/handlers/register.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c11b98d0b7..abd1a16a41 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -139,7 +139,9 @@ class RegistrationHandler(BaseHandler): yield self.store.register( user_id=user_id, token=token, - password_hash=password_hash) + password_hash=password_hash, + make_guest=make_guest + ) yield registered_user(self.distributor, user) except SynapseError: From 65e92eca4912848b03f71b7b7d29727015be31ce Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Feb 2016 15:19:34 +0000 Subject: [PATCH 086/349] Change the way we do public room list fetching --- synapse/handlers/room.py | 80 ++++++++++++++----- synapse/storage/room.py | 2 +- .../schema/delta/28/public_roms_index.sql | 16 ++++ 3 files changed, 77 insertions(+), 21 deletions(-) create mode 100644 synapse/storage/schema/delta/28/public_roms_index.sql diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a71cba8ef1..1b3f624c67 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -876,31 +876,71 @@ class RoomListHandler(BaseHandler): @defer.inlineCallbacks def get_public_room_list(self): - chunk = yield self.store.get_rooms(is_public=True) + room_ids = yield self.store.get_public_room_ids() - room_members = yield defer.gatherResults( - [ - self.store.get_users_in_room(room["room_id"]) - for room in chunk - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) + @defer.inlineCallbacks + def handle_room(room_id): + aliases = yield self.store.get_aliases_for_room(room_id) + if not aliases: + defer.returnValue(None) - avatar_urls = yield defer.gatherResults( - [ - self.get_room_avatar_url(room["room_id"]) - for room in chunk - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) + state = yield self.state_handler.get_current_state(room_id) - for i, room in enumerate(chunk): - room["num_joined_members"] = len(room_members[i]) - if avatar_urls[i]: - room["avatar_url"] = avatar_urls[i] + result = {"aliases": aliases, "room_id": room_id} + + name_event = state.get((EventTypes.Name, ""), None) + if name_event: + name = name_event.content.get("name", None) + if name: + result["name"] = name + + topic_event = state.get((EventTypes.Topic, ""), None) + if topic_event: + topic = topic_event.content.get("topic", None) + if topic: + result["topic"] = topic + + canonical_event = state.get((EventTypes.CanonicalAlias, ""), None) + if canonical_event: + canonical_alias = canonical_event.content.get("alias", None) + if canonical_alias: + result["canonical_alias"] = canonical_alias + + visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None) + visibility = None + if visibility_event: + visibility = visibility_event.content.get("history_visibility", None) + result["world_readable"] = visibility == "world_readable" + + guest_event = state.get((EventTypes.GuestAccess, ""), None) + guest = None + if guest_event: + guest = guest_event.content.get("guest_access", None) + result["guest_can_join"] = guest == "can_join" + + avatar_event = state.get(("m.room.avatar", ""), None) + if avatar_event: + avatar_url = avatar_event.content.get("url", None) + if avatar_url: + result["avatar_url"] = avatar_url + + result["num_joined_members"] = sum( + 1 for (event_type, _), ev in state.items() + if event_type == EventTypes.Member and ev.membership == Membership.JOIN + ) + + defer.returnValue(result) + + result = [] + for chunk in (room_ids[i:i+10] for i in xrange(0, len(room_ids), 10)): + chunk_result = yield defer.gatherResults([ + handle_room(room_id) + for room_id in chunk + ], consumeErrors=True).addErrback(unwrapFirstError) + result.extend(v for v in chunk_result if v) # FIXME (erikj): START is no longer a valid value - defer.returnValue({"start": "START", "end": "END", "chunk": chunk}) + defer.returnValue({"start": "START", "end": "END", "chunk": result}) @defer.inlineCallbacks def get_room_avatar_url(self, room_id): diff --git a/synapse/storage/room.py b/synapse/storage/room.py index dc09a3aaba..1b6311f332 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import StoreError from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.util.caches.descriptors import cachedInlineCallbacks, cached from .engines import PostgresEngine, Sqlite3Engine import collections diff --git a/synapse/storage/schema/delta/28/public_roms_index.sql b/synapse/storage/schema/delta/28/public_roms_index.sql new file mode 100644 index 0000000000..ba62a974a4 --- /dev/null +++ b/synapse/storage/schema/delta/28/public_roms_index.sql @@ -0,0 +1,16 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +CREATE INDEX public_room_index on rooms(is_public); From 477b1ed6cfd130e5a004cda0c0b84509da2aa006 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Feb 2016 15:58:14 +0000 Subject: [PATCH 087/349] Fetch events in a separate transaction. This has a couple of benefits: - It reduces the time of transactions, allowing other database requests to run. - Fetching events is given a dedicated database thread, and so can't starve other database requests. --- synapse/storage/stream.py | 55 +++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index a03458c2fc..bcae3d718e 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -220,27 +220,29 @@ class StreamStore(SQLBaseStore): rows = self.cursor_to_dict(txn) - ret = self._get_events_txn( - txn, - [r["event_id"] for r in rows], - get_prev_content=True - ) + return rows - self._set_before_and_after(ret, rows, topo_order=False) + rows = yield self.runInteraction("get_room_events_stream_for_room", f) - ret.reverse() + ret = yield self._get_events( + [r["event_id"] for r in rows], + get_prev_content=True + ) - if rows: - key = "s%d" % min(r["stream_ordering"] for r in rows) - else: - # Assume we didn't get anything because there was nothing to - # get. - key = from_key + self._set_before_and_after(ret, rows, topo_order=False) - return ret, key - res = yield self.runInteraction("get_room_events_stream_for_room", f) - defer.returnValue(res) + ret.reverse() + if rows: + key = "s%d" % min(r["stream_ordering"] for r in rows) + else: + # Assume we didn't get anything because there was nothing to + # get. + key = from_key + + defer.returnValue((ret, key)) + + @defer.inlineCallbacks def get_room_changes_for_user(self, user_id, from_key, to_key): if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream @@ -249,14 +251,14 @@ class StreamStore(SQLBaseStore): to_id = RoomStreamToken.parse_stream_token(to_key).stream if from_key == to_key: - return defer.succeed([]) + defer.returnValue([]) if from_id: has_changed = self._membership_stream_cache.has_entity_changed( user_id, int(from_id) ) if not has_changed: - return defer.succeed([]) + defer.returnValue([]) def f(txn): if from_id is not None: @@ -281,17 +283,18 @@ class StreamStore(SQLBaseStore): txn.execute(sql, (user_id, to_id,)) rows = self.cursor_to_dict(txn) - ret = self._get_events_txn( - txn, - [r["event_id"] for r in rows], - get_prev_content=True - ) + return rows - self._set_before_and_after(ret, rows, topo_order=False) + rows = yield self.runInteraction("get_room_changes_for_user", f) - return ret + ret = yield self._get_events( + [r["event_id"] for r in rows], + get_prev_content=True + ) - return self.runInteraction("get_room_changes_for_user", f) + self._set_before_and_after(ret, rows, topo_order=False) + + defer.returnValue(ret) def get_room_events_stream( self, From 8a391e33ae21f9a62c57cca8eea47435a14a6247 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Feb 2016 16:12:10 +0000 Subject: [PATCH 088/349] s/get_room_changes_for_user/get_membership_changes_for_user/ --- synapse/handlers/room.py | 2 +- synapse/handlers/sync.py | 2 +- synapse/storage/stream.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 68e2c75a48..799221c198 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1013,7 +1013,7 @@ class RoomEventSource(object): limit=limit, ) else: - room_events = yield self.store.get_room_changes_for_user( + room_events = yield self.store.get_membership_changes_for_user( user.to_string(), from_key, to_key ) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 8d8d10da33..dc686db541 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -479,7 +479,7 @@ class SyncHandler(BaseHandler): ) # Get a list of membership change events that have happened. - rooms_changed = yield self.store.get_room_changes_for_user( + rooms_changed = yield self.store.get_membership_changes_for_user( user_id, since_token.room_key, now_token.room_key ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index bcae3d718e..338a9d40d5 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -243,7 +243,7 @@ class StreamStore(SQLBaseStore): defer.returnValue((ret, key)) @defer.inlineCallbacks - def get_room_changes_for_user(self, user_id, from_key, to_key): + def get_membership_changes_for_user(self, user_id, from_key, to_key): if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream else: @@ -285,7 +285,7 @@ class StreamStore(SQLBaseStore): return rows - rows = yield self.runInteraction("get_room_changes_for_user", f) + rows = yield self.runInteraction("get_membership_changes_for_user", f) ret = yield self._get_events( [r["event_id"] for r in rows], From d83d004ccdb7ace1dcb51b8acf7645bc176b10a5 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 2 Feb 2016 17:18:50 +0000 Subject: [PATCH 089/349] Fix flake8 warnings for new flake8 --- setup.cfg | 1 + synapse/api/auth.py | 2 +- synapse/app/__init__.py | 19 +++++++++++ synapse/app/homeserver.py | 40 +++++++----------------- synapse/appservice/api.py | 2 +- synapse/federation/federation_client.py | 2 +- synapse/handlers/_base.py | 2 +- synapse/handlers/directory.py | 4 +-- synapse/handlers/events.py | 2 +- synapse/handlers/presence.py | 2 +- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/notifier.py | 2 +- synapse/push/push_rule_evaluator.py | 2 +- synapse/rest/client/v1/login.py | 2 +- synapse/rest/client/v1/pusher.py | 4 +-- synapse/rest/client/v1/register.py | 3 +- synapse/rest/client/v2_alpha/register.py | 3 +- synapse/rest/client/versions.py | 4 +-- synapse/server.py | 2 +- synapse/state.py | 2 +- synapse/storage/__init__.py | 2 +- synapse/storage/_base.py | 7 +++-- synapse/storage/engines/sqlite3.py | 2 +- synapse/storage/event_federation.py | 2 +- synapse/storage/events.py | 6 ++-- synapse/storage/stream.py | 2 +- synapse/util/__init__.py | 2 +- synapse/util/caches/descriptors.py | 4 +-- synapse/util/caches/expiringcache.py | 2 +- synapse/util/caches/treecache.py | 2 +- synapse/util/logutils.py | 2 +- synapse/util/ratelimitutils.py | 2 +- 34 files changed, 74 insertions(+), 67 deletions(-) diff --git a/setup.cfg b/setup.cfg index ba027c7d13..e7fc5ffe78 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,3 +16,4 @@ ignore = [flake8] max-line-length = 90 +ignore = W503 diff --git a/synapse/api/auth.py b/synapse/api/auth.py index b5536e8565..c5a2865e26 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -574,7 +574,7 @@ class Auth(object): raise AuthError( 403, "Application service has not registered this user" - ) + ) defer.returnValue(user_id) @defer.inlineCallbacks diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index bfebb0f644..1bc4279807 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -12,3 +12,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import sys +sys.dont_write_bytecode = True + +from synapse.python_dependencies import ( + check_requirements, MissingRequirementError +) # NOQA + +try: + check_requirements() +except MissingRequirementError as e: + message = "\n".join([ + "Missing Requirement: %s" % (e.message,), + "To install run:", + " pip install --upgrade --force \"%s\"" % (e.dependency,), + "", + ]) + sys.stderr.writelines(message) + sys.exit(1) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e5066c48ef..c3066d6a0d 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -14,27 +14,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys -from synapse.rest import ClientRestResource +import synapse + +import contextlib +import logging +import os +import re +import resource +import subprocess +import sys +import time -sys.dont_write_bytecode = True from synapse.python_dependencies import ( - check_requirements, DEPENDENCY_LINKS, MissingRequirementError + check_requirements, DEPENDENCY_LINKS ) -if __name__ == '__main__': - try: - check_requirements() - except MissingRequirementError as e: - message = "\n".join([ - "Missing Requirement: %s" % (e.message,), - "To install run:", - " pip install --upgrade --force \"%s\"" % (e.dependency,), - "", - ]) - sys.stderr.writelines(message) - sys.exit(1) - +from synapse.rest import ClientRestResource from synapse.storage.engines import create_engine, IncorrectDatabaseSetup from synapse.storage import are_all_users_on_domain from synapse.storage.prepare_database import UpgradeDatabaseException @@ -73,17 +68,6 @@ from synapse import events from daemonize import Daemonize -import synapse - -import contextlib -import logging -import os -import re -import resource -import subprocess -import time - - logger = logging.getLogger("synapse.app.homeserver") diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index e1c07028e8..bc90605324 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -29,7 +29,7 @@ class ApplicationServiceApi(SimpleHttpClient): pushing. """ - def __init__(self, hs): + def __init__(self, hs): super(ApplicationServiceApi, self).__init__(hs) self.clock = hs.get_clock() diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c6259f9dc8..e30e2da58d 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -57,7 +57,7 @@ class FederationClient(FederationBase): cache_name="get_pdu_cache", clock=self._clock, max_len=1000, - expiry_ms=120*1000, + expiry_ms=120 * 1000, reset_expiry_on_get=False, ) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 744a9ee507..1423df6cf3 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -147,7 +147,7 @@ class BaseHandler(object): ) if not allowed: raise LimitExceededError( - retry_after_ms=int(1000*(time_allowed - time_now)), + retry_after_ms=int(1000 * (time_allowed - time_now)), ) @defer.inlineCallbacks diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 691564c651..4efecb1ffd 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -175,8 +175,8 @@ class DirectoryHandler(BaseHandler): # If this server is in the list of servers, return it first. if self.server_name in servers: servers = ( - [self.server_name] - + [s for s in servers if s != self.server_name] + [self.server_name] + + [s for s in servers if s != self.server_name] ) else: servers = list(servers) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 254b483da6..5ad8f3779a 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -130,7 +130,7 @@ class EventStreamHandler(BaseHandler): # Add some randomness to this value to try and mitigate against # thundering herds on restart. - timeout = random.randint(int(timeout*0.9), int(timeout*1.1)) + timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1)) events, tokens = yield self.notifier.get_events_for( auth_user, pagin_config, timeout, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index d36eb3b8d7..d0c21ff5c9 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -34,7 +34,7 @@ metrics = synapse.metrics.get_metrics_for(__name__) # Don't bother bumping "last active" time if it differs by less than 60 seconds -LAST_ACTIVE_GRANULARITY = 60*1000 +LAST_ACTIVE_GRANULARITY = 60 * 1000 # Keep no more than this number of offline serial revisions MAX_OFFLINE_SERIALS = 1000 diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index abd1a16a41..b8fbcf9233 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -213,7 +213,7 @@ class RegistrationHandler(BaseHandler): 400, "User ID must only contain characters which do not" " require URL encoding." - ) + ) user = UserID(localpart, self.hs.hostname) user_id = user.to_string() diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 799221c198..088b76d237 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -927,7 +927,7 @@ class RoomContextHandler(BaseHandler): Returns: dict, or None if the event isn't found """ - before_limit = math.floor(limit/2.) + before_limit = math.floor(limit / 2.) after_limit = limit - before_limit now_token = yield self.hs.get_event_sources().get_current_token() diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index da13e32e78..c3589534f8 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -152,7 +152,7 @@ class MatrixFederationHttpClient(object): return self.clock.time_bound_deferred( request_deferred, - time_out=timeout/1000. if timeout else 60, + time_out=timeout / 1000. if timeout else 60, ) response = yield preserve_context_over_fn( diff --git a/synapse/notifier.py b/synapse/notifier.py index 29965a9ab5..1a90bd55cd 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -308,7 +308,7 @@ class Notifier(object): def timed_out(): if listener: listener.deferred.cancel() - timer = self.clock.call_later(timeout/1000., timed_out) + timer = self.clock.call_later(timeout / 1000., timed_out) prev_token = from_token while not result: diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index dca018af95..2a2b4437dc 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -304,7 +304,7 @@ def _flatten_dict(d, prefix=[], result={}): if isinstance(value, basestring): result[".".join(prefix + [key])] = value.lower() elif hasattr(value, "items"): - _flatten_dict(value, prefix=(prefix+[key]), result=result) + _flatten_dict(value, prefix=(prefix + [key]), result=result) return result diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 07836709fb..7199113dac 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -89,7 +89,7 @@ class LoginRestServlet(ClientV1RestServlet): LoginRestServlet.SAML2_TYPE): relay_state = "" if "relay_state" in login_submission: - relay_state = "&RelayState="+urllib.quote( + relay_state = "&RelayState=" + urllib.quote( login_submission["relay_state"]) result = { "uri": "%s%s" % (self.idp_redirect_url, relay_state) diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index e218ed215c..5547f1b112 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -52,7 +52,7 @@ class PusherRestServlet(ClientV1RestServlet): if i not in content: missing.append(i) if len(missing): - raise SynapseError(400, "Missing parameters: "+','.join(missing), + raise SynapseError(400, "Missing parameters: " + ','.join(missing), errcode=Codes.MISSING_PARAM) logger.debug("set pushkey %s to kind %s", content['pushkey'], content['kind']) @@ -83,7 +83,7 @@ class PusherRestServlet(ClientV1RestServlet): data=content['data'] ) except PusherConfigException as pce: - raise SynapseError(400, "Config Error: "+pce.message, + raise SynapseError(400, "Config Error: " + pce.message, errcode=Codes.MISSING_PARAM) defer.returnValue((200, {})) diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 5378a9a938..2bfd4d96bf 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -38,7 +38,8 @@ logger = logging.getLogger(__name__) if hasattr(hmac, "compare_digest"): compare_digest = hmac.compare_digest else: - compare_digest = lambda a, b: a == b + def compare_digest(a, b): + return a == b class RegisterRestServlet(ClientV1RestServlet): diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 5d50dd9e3d..56a5bbec30 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -34,7 +34,8 @@ from synapse.util.async import run_on_reactor if hasattr(hmac, "compare_digest"): compare_digest = hmac.compare_digest else: - compare_digest = lambda a, b: a == b + def compare_digest(a, b): + return a == b logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 349ef6b396..ca5468c402 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -26,9 +26,7 @@ class VersionsRestServlet(RestServlet): def on_GET(self, request): return (200, { - "versions": [ - "r0.0.1", - ] + "versions": ["r0.0.1"] }) diff --git a/synapse/server.py b/synapse/server.py index 5fee7fe130..368d615576 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -23,7 +23,7 @@ from twisted.web.client import BrowserLikePolicyForHTTPS from twisted.enterprise import adbapi from synapse.federation import initialize_http_replication -from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory +from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory from synapse.notifier import Notifier from synapse.api.auth import Auth from synapse.handlers import Handlers diff --git a/synapse/state.py b/synapse/state.py index 0acf309fe0..b9a1387520 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -63,7 +63,7 @@ class StateHandler(object): cache_name="state_cache", clock=self.clock, max_len=SIZE_OF_CACHE, - expiry_ms=EVICTION_TIMEOUT_SECONDS*1000, + expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000, reset_expiry_on_get=True, ) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index c91c7a3729..5a9e7720d9 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -59,7 +59,7 @@ logger = logging.getLogger(__name__) # Number of msec of granularity to store the user IP 'last seen' time. Smaller # times give more inserts into the database even for readonly API hits # 120 seconds == 2 minutes -LAST_SEEN_GRANULARITY = 120*1000 +LAST_SEEN_GRANULARITY = 120 * 1000 class DataStore(RoomMemberStore, RoomStore, diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 5e77320540..cfb87d9328 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -185,7 +185,7 @@ class SQLBaseStore(object): time_then = self._previous_loop_ts self._previous_loop_ts = time_now - ratio = (curr - prev)/(time_now - time_then) + ratio = (curr - prev) / (time_now - time_then) top_three_counters = self._txn_perf_counters.interval( time_now - time_then, limit=3 @@ -643,7 +643,10 @@ class SQLBaseStore(object): if not iterable: defer.returnValue(results) - chunks = [iterable[i:i+batch_size] for i in xrange(0, len(iterable), batch_size)] + chunks = [ + iterable[i:i + batch_size] + for i in xrange(0, len(iterable), batch_size) + ] for chunk in chunks: rows = yield self.runInteraction( desc, diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite3.py index 400c10103c..91fac33b8b 100644 --- a/synapse/storage/engines/sqlite3.py +++ b/synapse/storage/engines/sqlite3.py @@ -54,7 +54,7 @@ class Sqlite3Engine(object): def _parse_match_info(buf): bufsize = len(buf) - return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)] + return [struct.unpack('@I', buf[i:i + 4])[0] for i in range(0, bufsize, 4)] def _rank(raw_match_info): diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 5f32eec6f8..ce2c794025 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -58,7 +58,7 @@ class EventFederationStore(SQLBaseStore): new_front = set() front_list = list(front) chunks = [ - front_list[x:x+100] + front_list[x:x + 100] for x in xrange(0, len(front), 100) ] for chunk in chunks: diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 5e85552029..4d7cdd00d0 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -84,7 +84,7 @@ class EventsStore(SQLBaseStore): event.internal_metadata.stream_ordering = stream chunks = [ - events_and_contexts[x:x+100] + events_and_contexts[x:x + 100] for x in xrange(0, len(events_and_contexts), 100) ] @@ -740,7 +740,7 @@ class EventsStore(SQLBaseStore): rows = [] N = 200 for i in range(1 + len(events) / N): - evs = events[i*N:(i + 1)*N] + evs = events[i * N:(i + 1) * N] if not evs: break @@ -755,7 +755,7 @@ class EventsStore(SQLBaseStore): " LEFT JOIN rejections as rej USING (event_id)" " LEFT JOIN redactions as r ON e.event_id = r.redacts" " WHERE e.event_id IN (%s)" - ) % (",".join(["?"]*len(evs)),) + ) % (",".join(["?"] * len(evs)),) txn.execute(sql, evs) rows.extend(self.cursor_to_dict(txn)) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 338a9d40d5..2c49a5e499 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -168,7 +168,7 @@ class StreamStore(SQLBaseStore): results = {} room_ids = list(room_ids) - for rm_ids in (room_ids[i:i+20] for i in xrange(0, len(room_ids), 20)): + for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): res = yield defer.gatherResults([ self.get_room_events_stream_for_room( room_id, from_key, to_key, limit diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index f1fe963adf..7566d9eb33 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -46,7 +46,7 @@ class Clock(object): def looping_call(self, f, msec): l = task.LoopingCall(f) - l.start(msec/1000.0, now=False) + l.start(msec / 1000.0, now=False) return l def stop_looping_call(self, loop): diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 88e56e3302..e27917c63a 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -149,7 +149,7 @@ class CacheDescriptor(object): self.lru = lru self.tree = tree - self.arg_names = inspect.getargspec(orig).args[1:num_args+1] + self.arg_names = inspect.getargspec(orig).args[1:num_args + 1] if len(self.arg_names) < self.num_args: raise Exception( @@ -250,7 +250,7 @@ class CacheListDescriptor(object): self.num_args = num_args self.list_name = list_name - self.arg_names = inspect.getargspec(orig).args[1:num_args+1] + self.arg_names = inspect.getargspec(orig).args[1:num_args + 1] self.list_pos = self.arg_names.index(self.list_name) self.cache = cache diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 494226f5ea..62cae99649 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -55,7 +55,7 @@ class ExpiringCache(object): def f(): self._prune_cache() - self._clock.looping_call(f, self._expiry_ms/2) + self._clock.looping_call(f, self._expiry_ms / 2) def __setitem__(self, key, value): now = self._clock.time_msec() diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 29d02f7e95..03bc1401b7 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -58,7 +58,7 @@ class TreeCache(object): if n: break - node_and_keys[i+1][0].pop(k) + node_and_keys[i + 1][0].pop(k) popped, cnt = _strip_and_count_entires(popped) self.size -= cnt diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py index d5b1a37eff..c37a157787 100644 --- a/synapse/util/logutils.py +++ b/synapse/util/logutils.py @@ -111,7 +111,7 @@ def time_function(f): _log_debug_as_f( f, "[FUNC END] {%s-%d} %f", - (func_name, id, end-start,), + (func_name, id, end - start,), ) return r diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index c37d6f12e3..ea321bc6a9 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -163,7 +163,7 @@ class _PerHostRatelimiter(object): "Ratelimit [%s]: sleeping req", id(request_id), ) - ret_defer = sleep(self.sleep_msec/1000.0) + ret_defer = sleep(self.sleep_msec / 1000.0) self.sleeping_requests.add(request_id) From a644ac6d2c710d8dbe59d8c2fbf8ee95e1e32bad Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 2 Feb 2016 17:23:12 +0000 Subject: [PATCH 090/349] Explain what W503 is --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index e7fc5ffe78..f8cc13c840 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,4 +16,4 @@ ignore = [flake8] max-line-length = 90 -ignore = W503 +ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it. From 2df6114bc449194fa99aae3f7c41b37e1ea0dbcf Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 2 Feb 2016 19:21:49 +0000 Subject: [PATCH 091/349] Log more diagnostics for unrecognised access tokens --- synapse/api/auth.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index c5a2865e26..5bba9343f6 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -696,6 +696,7 @@ class Auth(object): def _look_up_user_by_access_token(self, token): ret = yield self.store.get_user_by_access_token(token) if not ret: + logger.warn("Unrecognised access token - not in store: %s" % (token,)) raise AuthError( self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.", errcode=Codes.UNKNOWN_TOKEN @@ -713,6 +714,7 @@ class Auth(object): token = request.args["access_token"][0] service = yield self.store.get_app_service_by_token(token) if not service: + logger.warn("Unrecognised appservice access token: %s" % (token,)) raise AuthError( self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.", From b32121a5d1c833ab3e2c164e642ef982364069e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 10:30:56 +0000 Subject: [PATCH 092/349] Unused import --- synapse/storage/room.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 1b6311f332..dc09a3aaba 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import StoreError from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cachedInlineCallbacks, cached +from synapse.util.caches.descriptors import cachedInlineCallbacks from .engines import PostgresEngine, Sqlite3Engine import collections From 771528ab1323715271b9e968d2d337b88910fb2f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 10:50:49 +0000 Subject: [PATCH 093/349] Change event_push_actions_rm_tokens schema --- synapse/handlers/sync.py | 6 +-- synapse/push/__init__.py | 2 +- synapse/storage/event_push_actions.py | 47 +++++++++++++------ synapse/storage/prepare_database.py | 2 +- .../storage/schema/delta/29/push_actions.sql | 31 ++++++++++++ 5 files changed, 67 insertions(+), 21 deletions(-) create mode 100644 synapse/storage/schema/delta/29/push_actions.sql diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index dc686db541..0292e06733 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -706,10 +706,8 @@ class SyncHandler(BaseHandler): ) if notifs is not None: - unread_notifications["notification_count"] = len(notifs) - unread_notifications["highlight_count"] = len([ - 1 for notif in notifs if _action_has_highlight(notif["actions"]) - ]) + unread_notifications["notification_count"] = notifs["notify_count"] + unread_notifications["highlight_count"] = notifs["highlight_count"] logger.debug("Room sync: %r", room_sync) diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 9bc0b356f4..8b9d0f03e5 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -316,7 +316,7 @@ class Pusher(object): r.room_id, self.user_id, last_unread_event_id ) ) - badge += len(notifs) + badge += notifs["notify_count"] defer.returnValue(badge) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index a05c4f84cf..aca3219206 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -37,7 +37,11 @@ class EventPushActionsStore(SQLBaseStore): 'event_id': event.event_id, 'user_id': uid, 'profile_tag': profile_tag, - 'actions': json.dumps(actions) + 'actions': json.dumps(actions), + 'stream_ordering': event.internal_metadata.stream_ordering, + 'topological_ordering': event.depth, + 'notif': 1, + 'highlight': 1 if _action_has_highlight(actions) else 0, }) def f(txn): @@ -74,26 +78,28 @@ class EventPushActionsStore(SQLBaseStore): topological_ordering = results[0][1] sql = ( - "SELECT ea.event_id, ea.actions" - " FROM event_push_actions ea, events e" - " WHERE ea.room_id = e.room_id" - " AND ea.event_id = e.event_id" - " AND ea.user_id = ?" - " AND ea.room_id = ?" + "SELECT sum(notif), sum(highlight)" + " FROM event_push_actions ea" + " WHERE" + " user_id = ?" + " AND room_id = ?" " AND (" - " e.topological_ordering > ?" - " OR (e.topological_ordering = ? AND e.stream_ordering > ?)" + " topological_ordering > ?" + " OR (topological_ordering = ? AND stream_ordering > ?)" ")" ) txn.execute(sql, ( user_id, room_id, topological_ordering, topological_ordering, stream_ordering - ) - ) - return [ - {"event_id": row[0], "actions": json.loads(row[1])} - for row in txn.fetchall() - ] + )) + row = txn.fetchone() + if row: + return { + "notify_count": row[0] or 0, + "highlight_count": row[1] or 0, + } + else: + return {"notify_count": 0, "highlight_count": 0} ret = yield self.runInteraction( "get_unread_event_push_actions_by_room", @@ -117,3 +123,14 @@ class EventPushActionsStore(SQLBaseStore): "remove_push_actions_for_event_id", f ) + + +def _action_has_highlight(actions): + for action in actions: + try: + if action.get("set_tweak", None) == "highlight": + return action.get("value", True) + except AttributeError: + pass + + return False diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c1f5f99789..d782b8e25b 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 28 +SCHEMA_VERSION = 29 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/schema/delta/29/push_actions.sql b/synapse/storage/schema/delta/29/push_actions.sql new file mode 100644 index 0000000000..7e7b09820a --- /dev/null +++ b/synapse/storage/schema/delta/29/push_actions.sql @@ -0,0 +1,31 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE event_push_actions ADD COLUMN topological_ordering BIGINT; +ALTER TABLE event_push_actions ADD COLUMN stream_ordering BIGINT; +ALTER TABLE event_push_actions ADD COLUMN notif SMALLINT; +ALTER TABLE event_push_actions ADD COLUMN highlight SMALLINT; + +UPDATE event_push_actions SET stream_ordering = ( + SELECT stream_ordering FROM events WHERE event_id = event_push_actions.event_id +), topological_ordering = ( + SELECT topological_ordering FROM events WHERE event_id = event_push_actions.event_id +); + +UPDATE event_push_actions SET notif = 1, highlight = 0; + +CREATE INDEX event_push_actions_rm_tokens on event_push_actions( + user_id, room_id, topological_ordering, stream_ordering +); From 5f280837a6757c3e9fe226c3fac35409c23d3967 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Wed, 3 Feb 2016 11:27:39 +0000 Subject: [PATCH 094/349] Add macaroon inspection script --- scripts-dev/dump_macaroon.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100755 scripts-dev/dump_macaroon.py diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py new file mode 100755 index 0000000000..6e45be75d6 --- /dev/null +++ b/scripts-dev/dump_macaroon.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python2 + +import pymacaroons +import sys + +if len(sys.argv) == 1: + sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],)) + sys.exit(1) + +macaroon_string = sys.argv[1] +key = sys.argv[2] if len(sys.argv) > 2 else None + +macaroon = pymacaroons.Macaroon.deserialize(macaroon_string) +print macaroon.inspect() + +print "" + +verifier = pymacaroons.Verifier() +verifier.satisfy_general(lambda c: True) +try: + verifier.verify(macaroon, key) + print "Signature is correct" +except Exception as e: + print e.message From 772b45c745688745d4c6d6b60047eeafaef773ee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 11:43:26 +0000 Subject: [PATCH 095/349] Remove unused method --- synapse/handlers/room.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 0daf2ce28e..42581289ef 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -942,14 +942,6 @@ class RoomListHandler(BaseHandler): # FIXME (erikj): START is no longer a valid value defer.returnValue({"start": "START", "end": "END", "chunk": result}) - @defer.inlineCallbacks - def get_room_avatar_url(self, room_id): - event = yield self.hs.get_state_handler().get_current_state( - room_id, "m.room.avatar" - ) - if event and "url" in event.content: - defer.returnValue(event.content["url"]) - class RoomContextHandler(BaseHandler): @defer.inlineCallbacks From 9cd80a7b5c62d2687fd4a2e6481a7701b6d782c5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 11:52:57 +0000 Subject: [PATCH 096/349] PEP8 --- synapse/handlers/room.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 42581289ef..bfd7e44e9f 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -932,7 +932,7 @@ class RoomListHandler(BaseHandler): defer.returnValue(result) result = [] - for chunk in (room_ids[i:i+10] for i in xrange(0, len(room_ids), 10)): + for chunk in (room_ids[i:i + 10] for i in xrange(0, len(room_ids), 10)): chunk_result = yield defer.gatherResults([ handle_room(room_id) for room_id in chunk From f8aae79a72e462f4af65a22d0665192867522174 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 13:23:32 +0000 Subject: [PATCH 097/349] Simplify get_rooms --- synapse/app/homeserver.py | 4 +- synapse/storage/room.py | 84 ++++---------------------------------- tests/storage/test_room.py | 26 ------------ 3 files changed, 9 insertions(+), 105 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index c3066d6a0d..0a6a19033d 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -674,8 +674,8 @@ def run(hs): stats["uptime_seconds"] = uptime stats["total_users"] = yield hs.get_datastore().count_all_users() - all_rooms = yield hs.get_datastore().get_rooms(False) - stats["total_room_count"] = len(all_rooms) + room_count = yield hs.get_datastore().get_room_count() + stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore().count_daily_users() daily_messages = yield hs.get_datastore().count_daily_messages() diff --git a/synapse/storage/room.py b/synapse/storage/room.py index dc09a3aaba..46ab38a313 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -87,90 +87,20 @@ class RoomStore(SQLBaseStore): desc="get_public_room_ids", ) - @defer.inlineCallbacks - def get_rooms(self, is_public): - """Retrieve a list of all public rooms. - - Args: - is_public (bool): True if the rooms returned should be public. - Returns: - A list of room dicts containing at least a "room_id" key, a - "topic" key if one is set, and a "name" key if one is set + def get_room_count(self): + """Retrieve a list of all rooms """ def f(txn): - def subquery(table_name, column_name=None): - column_name = column_name or table_name - return ( - "SELECT %(table_name)s.event_id as event_id, " - "%(table_name)s.room_id as room_id, %(column_name)s " - "FROM %(table_name)s " - "INNER JOIN current_state_events as c " - "ON c.event_id = %(table_name)s.event_id " % { - "column_name": column_name, - "table_name": table_name, - } - ) + sql = "SELECT count(*) FROM rooms" + txn.execute(sql) + row = txn.fetchone() + return row[0] or 0 - sql = ( - "SELECT" - " r.room_id," - " max(n.name)," - " max(t.topic)," - " max(v.history_visibility)," - " max(g.guest_access)" - " FROM rooms AS r" - " LEFT JOIN (%(topic)s) AS t ON t.room_id = r.room_id" - " LEFT JOIN (%(name)s) AS n ON n.room_id = r.room_id" - " LEFT JOIN (%(history_visibility)s) AS v ON v.room_id = r.room_id" - " LEFT JOIN (%(guest_access)s) AS g ON g.room_id = r.room_id" - " WHERE r.is_public = ?" - " GROUP BY r.room_id" % { - "topic": subquery("topics", "topic"), - "name": subquery("room_names", "name"), - "history_visibility": subquery("history_visibility"), - "guest_access": subquery("guest_access"), - } - ) - - txn.execute(sql, (is_public,)) - - rows = txn.fetchall() - - for i, row in enumerate(rows): - room_id = row[0] - aliases = self._simple_select_onecol_txn( - txn, - table="room_aliases", - keyvalues={ - "room_id": room_id - }, - retcol="room_alias", - ) - - rows[i] = list(row) + [aliases] - - return rows - - rows = yield self.runInteraction( + return self.runInteraction( "get_rooms", f ) - ret = [ - { - "room_id": r[0], - "name": r[1], - "topic": r[2], - "world_readable": r[3] == "world_readable", - "guest_can_join": r[4] == "can_join", - "aliases": r[5], - } - for r in rows - if r[5] # We only return rooms that have at least one alias. - ] - - defer.returnValue(ret) - def _store_room_topic_txn(self, txn, event): if hasattr(event, "content") and "topic" in event.content: self._simple_insert_txn( diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index 7fdbfc60f1..0baaf3df21 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -51,32 +51,6 @@ class RoomStoreTestCase(unittest.TestCase): (yield self.store.get_room(self.room.to_string())) ) - @defer.inlineCallbacks - def test_get_rooms(self): - # get_rooms does an INNER JOIN on the room_aliases table :( - - rooms = yield self.store.get_rooms(is_public=True) - # Should be empty before we add the alias - self.assertEquals([], rooms) - - yield self.store.create_room_alias_association( - room_alias=self.alias, - room_id=self.room.to_string(), - servers=["test"] - ) - - rooms = yield self.store.get_rooms(is_public=True) - - self.assertEquals(1, len(rooms)) - self.assertEquals({ - "name": None, - "room_id": self.room.to_string(), - "topic": None, - "aliases": [self.alias.to_string()], - "world_readable": False, - "guest_can_join": False, - }, rooms[0]) - class RoomEventsStoreTestCase(unittest.TestCase): From d4f72a5bfb95d07d5af3f49c736823840659101a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 13:51:25 +0000 Subject: [PATCH 098/349] Allowing tagging log contexts --- synapse/handlers/sync.py | 10 ++++++++++ synapse/http/server.py | 41 ++++++++++++++++++++++++-------------- synapse/util/logcontext.py | 7 ++++++- 3 files changed, 42 insertions(+), 16 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index dc686db541..72ccaf1e3f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -18,6 +18,7 @@ from ._base import BaseHandler from synapse.streams.config import PaginationConfig from synapse.api.constants import Membership, EventTypes from synapse.util import unwrapFirstError +from synapse.util.logcontext import LoggingContext from twisted.internet import defer @@ -140,6 +141,15 @@ class SyncHandler(BaseHandler): A Deferred SyncResult. """ + context = LoggingContext.current_context() + if context: + if since_token is None: + context.tag = "initial_sync" + elif full_state: + context.tag = "full_state_sync" + else: + context.tag = "incremental_sync" + if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. diff --git a/synapse/http/server.py b/synapse/http/server.py index 10d1fcd3f6..c250a4604f 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -41,7 +41,7 @@ metrics = synapse.metrics.get_metrics_for(__name__) incoming_requests_counter = metrics.register_counter( "requests", - labels=["method", "servlet"], + labels=["method", "servlet", "tag"], ) outgoing_responses_counter = metrics.register_counter( "responses", @@ -50,23 +50,23 @@ outgoing_responses_counter = metrics.register_counter( response_timer = metrics.register_distribution( "response_time", - labels=["method", "servlet"] + labels=["method", "servlet", "tag"] ) response_ru_utime = metrics.register_distribution( - "response_ru_utime", labels=["method", "servlet"] + "response_ru_utime", labels=["method", "servlet", "tag"] ) response_ru_stime = metrics.register_distribution( - "response_ru_stime", labels=["method", "servlet"] + "response_ru_stime", labels=["method", "servlet", "tag"] ) response_db_txn_count = metrics.register_distribution( - "response_db_txn_count", labels=["method", "servlet"] + "response_db_txn_count", labels=["method", "servlet", "tag"] ) response_db_txn_duration = metrics.register_distribution( - "response_db_txn_duration", labels=["method", "servlet"] + "response_db_txn_duration", labels=["method", "servlet", "tag"] ) @@ -226,7 +226,6 @@ class JsonResource(HttpServer, resource.Resource): servlet_classname = servlet_instance.__class__.__name__ else: servlet_classname = "%r" % callback - incoming_requests_counter.inc(request.method, servlet_classname) args = [ urllib.unquote(u).decode("UTF-8") if u else u for u in m.groups() @@ -237,21 +236,33 @@ class JsonResource(HttpServer, resource.Resource): code, response = callback_return self._send_response(request, code, response) - response_timer.inc_by( - self.clock.time_msec() - start, request.method, servlet_classname - ) - try: context = LoggingContext.current_context() + + tag = "" + if context: + tag = context.tag + + incoming_requests_counter.inc(request.method, servlet_classname, tag) + + response_timer.inc_by( + self.clock.time_msec() - start, request.method, + servlet_classname, tag + ) + ru_utime, ru_stime = context.get_resource_usage() - response_ru_utime.inc_by(ru_utime, request.method, servlet_classname) - response_ru_stime.inc_by(ru_stime, request.method, servlet_classname) + response_ru_utime.inc_by( + ru_utime, request.method, servlet_classname, tag + ) + response_ru_stime.inc_by( + ru_stime, request.method, servlet_classname, tag + ) response_db_txn_count.inc_by( - context.db_txn_count, request.method, servlet_classname + context.db_txn_count, request.method, servlet_classname, tag ) response_db_txn_duration.inc_by( - context.db_txn_duration, request.method, servlet_classname + context.db_txn_duration, request.method, servlet_classname, tag ) except: pass diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 0595c0fa4f..e701092cd8 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -47,7 +47,8 @@ class LoggingContext(object): """ __slots__ = [ - "parent_context", "name", "usage_start", "usage_end", "main_thread", "__dict__" + "parent_context", "name", "usage_start", "usage_end", "main_thread", + "__dict__", "tag", ] thread_local = threading.local() @@ -72,6 +73,9 @@ class LoggingContext(object): def add_database_transaction(self, duration_ms): pass + def __nonzero__(self): + return False + sentinel = Sentinel() def __init__(self, name=None): @@ -83,6 +87,7 @@ class LoggingContext(object): self.db_txn_duration = 0. self.usage_start = None self.main_thread = threading.current_thread() + self.tag = "" def __str__(self): return "%s@%x" % (self.name, id(self)) From 5054806ec1f64fd784d9e74d73a678643d539c3f Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Wed, 3 Feb 2016 14:42:01 +0000 Subject: [PATCH 099/349] Rename config field to reflect yaml name --- synapse/config/registration.py | 6 +++--- synapse/rest/client/v1/register.py | 4 ++-- synapse/rest/client/v2_alpha/register.py | 2 +- tests/rest/client/v1/test_events.py | 2 +- tests/rest/client/v2_alpha/test_register.py | 4 ++-- tests/utils.py | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 76d2d2d640..90ea19bd4b 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -23,11 +23,11 @@ from distutils.util import strtobool class RegistrationConfig(Config): def read_config(self, config): - self.disable_registration = not bool( + self.enable_registration = bool( strtobool(str(config["enable_registration"])) ) if "disable_registration" in config: - self.disable_registration = bool( + self.enable_registration = not bool( strtobool(str(config["disable_registration"])) ) @@ -78,6 +78,6 @@ class RegistrationConfig(Config): def read_arguments(self, args): if args.enable_registration is not None: - self.disable_registration = not bool( + self.enable_registration = bool( strtobool(str(args.enable_registration)) ) diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 2bfd4d96bf..6d6d03c34c 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -59,7 +59,7 @@ class RegisterRestServlet(ClientV1RestServlet): # } # TODO: persistent storage self.sessions = {} - self.disable_registration = hs.config.disable_registration + self.enable_registration = hs.config.enable_registration def on_GET(self, request): if self.hs.config.enable_registration_captcha: @@ -113,7 +113,7 @@ class RegisterRestServlet(ClientV1RestServlet): is_using_shared_secret = login_type == LoginType.SHARED_SECRET can_register = ( - not self.disable_registration + self.enable_registration or is_application_server or is_using_shared_secret ) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 56a5bbec30..ec5c21fa1f 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -117,7 +117,7 @@ class RegisterRestServlet(RestServlet): return # == Normal User Registration == (everyone else) - if self.hs.config.disable_registration: + if not self.hs.config.enable_registration: raise SynapseError(403, "Registration has been disabled") guest_access_token = body.get("guest_access_token", None) diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index b260e269ac..e9698bfdc9 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -122,7 +122,7 @@ class EventStreamPermissionsTestCase(RestTestCase): self.ratelimiter = hs.get_ratelimiter() self.ratelimiter.send_message.return_value = (True, 0) hs.config.enable_registration_captcha = False - hs.config.disable_registration = False + hs.config.enable_registration = True hs.get_handlers().federation_handler = Mock() diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index f9a2b22485..df0841b0b1 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -41,7 +41,7 @@ class RegisterRestServletTestCase(unittest.TestCase): self.hs.hostname = "superbig~testing~thing.com" self.hs.get_auth = Mock(return_value=self.auth) self.hs.get_handlers = Mock(return_value=self.handlers) - self.hs.config.disable_registration = False + self.hs.config.enable_registration = True # init the thing we're testing self.servlet = RegisterRestServlet(self.hs) @@ -120,7 +120,7 @@ class RegisterRestServletTestCase(unittest.TestCase): })) def test_POST_disabled_registration(self): - self.hs.config.disable_registration = True + self.hs.config.enable_registration = False self.request_data = json.dumps({ "username": "kermit", "password": "monkey" diff --git a/tests/utils.py b/tests/utils.py index 431252a6f1..3b1eb50d8d 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -46,7 +46,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): config = Mock() config.signing_key = [MockKey()] config.event_cache_size = 1 - config.disable_registration = False + config.enable_registration = True config.macaroon_secret_key = "not even a little secret" config.server_name = "server.under.test" config.trusted_third_party_id_servers = [] From 24277fbb97540499daf4984b3ac1a1b81a0a19de Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 3 Feb 2016 14:59:19 +0000 Subject: [PATCH 100/349] Don't return null if profile display name / avatar url isn't set: omit them instead --- synapse/rest/client/v1/profile.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index b15defdd07..f294f3d7ee 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -33,7 +33,11 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): user, ) - defer.returnValue((200, {"displayname": displayname})) + ret = {} + if displayname is not None: + ret["displayname"] = displayname + + defer.returnValue((200, {ret})) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -66,7 +70,11 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): user, ) - defer.returnValue((200, {"avatar_url": avatar_url})) + ret = {} + if avatar_url is not None: + ret["avatar_url"] = avatar_url + + defer.returnValue((200, ret)) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -102,10 +110,13 @@ class ProfileRestServlet(ClientV1RestServlet): user, ) - defer.returnValue((200, { - "displayname": displayname, - "avatar_url": avatar_url - })) + ret = {} + if displayname is not None: + ret["displayname"] = displayname + if avatar_url is not None: + ret["avatar_url"] = avatar_url + + defer.returnValue((200, ret)) def register_servlets(hs, http_server): From 156cea5b45acac88a030f5b469c1921a2f3573cc Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 3 Feb 2016 15:04:51 +0000 Subject: [PATCH 101/349] No braces here --- synapse/rest/client/v1/profile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index f294f3d7ee..3c5a212920 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -37,7 +37,7 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): if displayname is not None: ret["displayname"] = displayname - defer.returnValue((200, {ret})) + defer.returnValue((200, ret)) @defer.inlineCallbacks def on_PUT(self, request, user_id): From 33c71c3a4b06852285d8e446cbe2f57293ae0975 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 16:17:18 +0000 Subject: [PATCH 102/349] Preserve log context over when deferring to thread pool in media repo --- synapse/rest/media/v1/base_resource.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/synapse/rest/media/v1/base_resource.py b/synapse/rest/media/v1/base_resource.py index bdc65f0198..58d56ec7a4 100644 --- a/synapse/rest/media/v1/base_resource.py +++ b/synapse/rest/media/v1/base_resource.py @@ -28,6 +28,7 @@ from twisted.protocols.basic import FileSender from synapse.util.async import ObservableDeferred from synapse.util.stringutils import is_ascii +from synapse.util.logcontext import preserve_context_over_fn import os @@ -276,7 +277,8 @@ class BaseMediaResource(Resource): ) self._makedirs(t_path) - t_len = yield threads.deferToThread( + t_len = yield preserve_context_over_fn( + threads.deferToThread, self._generate_thumbnail, input_path, t_path, t_width, t_height, t_method, t_type ) @@ -298,7 +300,8 @@ class BaseMediaResource(Resource): ) self._makedirs(t_path) - t_len = yield threads.deferToThread( + t_len = yield preserve_context_over_fn( + threads.deferToThread, self._generate_thumbnail, input_path, t_path, t_width, t_height, t_method, t_type ) @@ -372,7 +375,7 @@ class BaseMediaResource(Resource): media_id, t_width, t_height, t_type, t_method, t_len )) - yield threads.deferToThread(generate_thumbnails) + yield preserve_context_over_fn(threads.deferToThread, generate_thumbnails) for l in local_thumbnails: yield self.store.store_local_thumbnail(*l) @@ -445,7 +448,7 @@ class BaseMediaResource(Resource): t_width, t_height, t_type, t_method, t_len ]) - yield threads.deferToThread(generate_thumbnails) + yield preserve_context_over_fn(threads.deferToThread, generate_thumbnails) for r in remote_thumbnails: yield self.store.store_remote_media_thumbnail(*r) From b84d59c5f01914fe53d2673c5c7e372f5c61d088 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 16:22:35 +0000 Subject: [PATCH 103/349] Add descriptions --- synapse/storage/appservice.py | 3 ++- synapse/storage/keys.py | 1 + synapse/storage/registration.py | 1 + synapse/storage/stream.py | 1 + 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index b5aa55c0a3..1100c67714 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -276,7 +276,8 @@ class ApplicationServiceTransactionStore(SQLBaseStore): "application_services_state", dict(as_id=service.id), ["state"], - allow_none=True + allow_none=True, + desc="get_appservice_state", ) if result: defer.returnValue(result.get("state")) diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 8022b8cfc6..fd05bfe54e 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -39,6 +39,7 @@ class KeyStore(SQLBaseStore): table="server_tls_certificates", keyvalues={"server_name": server_name}, retcols=("tls_certificate",), + desc="get_server_certificate", ) tls_certificate = OpenSSL.crypto.load_certificate( OpenSSL.crypto.FILETYPE_ASN1, tls_certificate_bytes, diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 70cde0d04d..bd35e19be6 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -134,6 +134,7 @@ class RegistrationStore(SQLBaseStore): }, retcols=["name", "password_hash", "is_guest"], allow_none=True, + desc="get_user_by_id", ) def get_users_by_id_case_insensitive(self, user_id): diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 2c49a5e499..50436cb2d2 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -564,6 +564,7 @@ class StreamStore(SQLBaseStore): table="events", keyvalues={"event_id": event_id}, retcols=("stream_ordering", "topological_ordering"), + desc="get_topological_token_for_event", ).addCallback(lambda row: "t%d-%d" % ( row["topological_ordering"], row["stream_ordering"],) ) From aa4af94c69b8b1c263dacfce0358aaef97d3e323 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 16:29:32 +0000 Subject: [PATCH 104/349] We return dicts now. --- synapse/storage/event_push_actions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index aca3219206..2742e0c008 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -72,7 +72,7 @@ class EventPushActionsStore(SQLBaseStore): ) results = txn.fetchall() if len(results) == 0: - return [] + return {} stream_ordering = results[0][0] topological_ordering = results[0][1] From 709e09e1c3dfc85f585dbf782de9701c5db0a02d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 16:32:20 +0000 Subject: [PATCH 105/349] Remove old log line --- synapse/rest/client/v1/room.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index c7ea15c624..81bfe377bd 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -429,8 +429,6 @@ class RoomEventContext(ClientV1RestServlet): serialize_event(event, time_now) for event in results["state"] ] - logger.info("Responding with %r", results) - defer.returnValue((200, results)) From 4d36e732307ad35eb070af384058f227d7d85dd0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Feb 2016 16:35:00 +0000 Subject: [PATCH 106/349] Actually return something sensible --- synapse/storage/event_push_actions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 2742e0c008..d0a969f50b 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -72,7 +72,7 @@ class EventPushActionsStore(SQLBaseStore): ) results = txn.fetchall() if len(results) == 0: - return {} + return {"notify_count": 0, "highlight_count": 0} stream_ordering = results[0][0] topological_ordering = results[0][1] From 6a9f1209dfe5b3c43726aff24000129856bdc084 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Fri, 5 Feb 2016 01:58:23 +0000 Subject: [PATCH 107/349] Error if macaroon key is missing from config Currently we store all access tokens in the DB, and fall back to that check if we can't validate the macaroon, so our fallback works here, but for guests, their macaroons don't get persisted, so we don't get to find them in the database. Each restart, we generate a new ephemeral key, so guests lose access after each server restart. I tried to fix up the config stuff to be less insane, but gave up, so instead I bolt on yet another piece of custom one-off insanity. Also, add some basic tests for config generation and loading. --- synapse/app/homeserver.py | 20 ++++++--- synapse/config/__main__.py | 7 +++- synapse/config/_base.py | 35 ++++++++++------ synapse/config/registration.py | 18 ++++++-- tests/config/__init__.py | 14 +++++++ tests/config/test_generate.py | 50 ++++++++++++++++++++++ tests/config/test_load.py | 77 ++++++++++++++++++++++++++++++++++ 7 files changed, 198 insertions(+), 23 deletions(-) create mode 100644 tests/config/__init__.py create mode 100644 tests/config/test_generate.py create mode 100644 tests/config/test_load.py diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 0a6a19033d..89238cb7e3 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -24,6 +24,7 @@ import resource import subprocess import sys import time +from synapse.config._base import ConfigError from synapse.python_dependencies import ( check_requirements, DEPENDENCY_LINKS @@ -350,11 +351,20 @@ def setup(config_options): Returns: HomeServer """ - config = HomeServerConfig.load_config( - "Synapse Homeserver", - config_options, - generate_section="Homeserver" - ) + try: + config = HomeServerConfig.load_config( + "Synapse Homeserver", + config_options, + generate_section="Homeserver" + ) + except ConfigError as e: + sys.stderr.write("\n" + e.message + "\n") + sys.exit(1) + + if not config: + # If a config isn't returned, and an exception isn't raised, we're just + # generating config files and shouldn't try to continue. + sys.exit(0) config.setup_logging() diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py index ea9e7907a6..0a3b70e11f 100644 --- a/synapse/config/__main__.py +++ b/synapse/config/__main__.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from synapse.config._base import ConfigError if __name__ == "__main__": import sys @@ -21,7 +22,11 @@ if __name__ == "__main__": if action == "read": key = sys.argv[2] - config = HomeServerConfig.load_config("", sys.argv[3:]) + try: + config = HomeServerConfig.load_config("", sys.argv[3:]) + except ConfigError as e: + sys.stderr.write("\n" + e.message + "\n") + sys.exit(1) print getattr(config, key) sys.exit(0) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index a9304a11ba..15d78ff33a 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -17,7 +17,6 @@ import argparse import errno import os import yaml -import sys from textwrap import dedent @@ -136,13 +135,20 @@ class Config(object): results.append(getattr(cls, name)(self, *args, **kargs)) return results - def generate_config(self, config_dir_path, server_name, report_stats=None): + def generate_config( + self, + config_dir_path, + server_name, + is_generating_file, + report_stats=None, + ): default_config = "# vim:ft=yaml\n" default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all( "default_config", config_dir_path=config_dir_path, server_name=server_name, + is_generating_file=is_generating_file, report_stats=report_stats, )) @@ -244,8 +250,10 @@ class Config(object): server_name = config_args.server_name if not server_name: - print "Must specify a server_name to a generate config for." - sys.exit(1) + raise ConfigError( + "Must specify a server_name to a generate config for." + " Pass -H server.name." + ) if not os.path.exists(config_dir_path): os.makedirs(config_dir_path) with open(config_path, "wb") as config_file: @@ -253,6 +261,7 @@ class Config(object): config_dir_path=config_dir_path, server_name=server_name, report_stats=(config_args.report_stats == "yes"), + is_generating_file=True ) obj.invoke_all("generate_files", config) config_file.write(config_bytes) @@ -266,7 +275,7 @@ class Config(object): "If this server name is incorrect, you will need to" " regenerate the SSL certificates" ) - sys.exit(0) + return else: print ( "Config file %r already exists. Generating any missing key" @@ -302,25 +311,25 @@ class Config(object): specified_config.update(yaml_config) if "server_name" not in specified_config: - sys.stderr.write("\n" + MISSING_SERVER_NAME + "\n") - sys.exit(1) + raise ConfigError(MISSING_SERVER_NAME) server_name = specified_config["server_name"] _, config = obj.generate_config( config_dir_path=config_dir_path, - server_name=server_name + server_name=server_name, + is_generating_file=False, ) config.pop("log_config") config.update(specified_config) if "report_stats" not in config: - sys.stderr.write( - "\n" + MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" + - MISSING_REPORT_STATS_SPIEL + "\n") - sys.exit(1) + raise ConfigError( + MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" + + MISSING_REPORT_STATS_SPIEL + ) if generate_keys: obj.invoke_all("generate_files", config) - sys.exit(0) + return obj.invoke_all("read_config", config) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 90ea19bd4b..9b6dacc5b8 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -33,12 +33,24 @@ class RegistrationConfig(Config): self.registration_shared_secret = config.get("registration_shared_secret") self.macaroon_secret_key = config.get("macaroon_secret_key") + if self.macaroon_secret_key is None: + raise Exception( + "Config is missing missing macaroon_secret_key - please set it" + " in your config file." + ) self.bcrypt_rounds = config.get("bcrypt_rounds", 12) self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"] self.allow_guest_access = config.get("allow_guest_access", False) - def default_config(self, **kwargs): + def default_config(self, is_generating_file=False, **kwargs): registration_shared_secret = random_string_with_symbols(50) + + macaroon_line = "" + if is_generating_file: + macaroon_line += '\n macaroon_secret_key: "%s"\n' % ( + random_string_with_symbols(50), + ) + macaroon_secret_key = random_string_with_symbols(50) return """\ ## Registration ## @@ -49,9 +61,7 @@ class RegistrationConfig(Config): # If set, allows registration by anyone who also has the shared # secret, even if registration is otherwise disabled. registration_shared_secret: "%(registration_shared_secret)s" - - macaroon_secret_key: "%(macaroon_secret_key)s" - +%(macaroon_line)s # Set the number of bcrypt rounds used to generate password hash. # Larger numbers increase the work factor needed to generate the hash. # The default number of rounds is 12. diff --git a/tests/config/__init__.py b/tests/config/__init__.py new file mode 100644 index 0000000000..b7df13c9ee --- /dev/null +++ b/tests/config/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py new file mode 100644 index 0000000000..4329d73974 --- /dev/null +++ b/tests/config/test_generate.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os.path +import shutil +import tempfile +from synapse.config.homeserver import HomeServerConfig +from tests import unittest + + +class ConfigGenerationTestCase(unittest.TestCase): + + def setUp(self): + self.dir = tempfile.mkdtemp() + print self.dir + self.file = os.path.join(self.dir, "homeserver.yaml") + + def tearDown(self): + shutil.rmtree(self.dir) + + def test_generate_config_generates_files(self): + HomeServerConfig.load_config("", [ + "--generate-config", + "-c", self.file, + "--report-stats=yes", + "-H", "lemurs.win" + ]) + + self.assertSetEqual( + set([ + "homeserver.yaml", + "lemurs.win.log.config", + "lemurs.win.signing.key", + "lemurs.win.tls.crt", + "lemurs.win.tls.dh", + "lemurs.win.tls.key", + ]), + set(os.listdir(self.dir)) + ) diff --git a/tests/config/test_load.py b/tests/config/test_load.py new file mode 100644 index 0000000000..7f41279715 --- /dev/null +++ b/tests/config/test_load.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os.path +import shutil +import tempfile +import yaml +from synapse.config.homeserver import HomeServerConfig +from tests import unittest + + +class ConfigLoadingTestCase(unittest.TestCase): + + def setUp(self): + self.dir = tempfile.mkdtemp() + print self.dir + self.file = os.path.join(self.dir, "homeserver.yaml") + + def tearDown(self): + shutil.rmtree(self.dir) + + def test_load_fails_if_server_name_missing(self): + self.generate_config_and_remove_lines_containing("server_name") + with self.assertRaises(Exception): + HomeServerConfig.load_config("", ["-c", self.file]) + + def test_generates_and_loads_macaroon_secret_key(self): + self.generate_config() + + with open(self.file, + "r") as f: + raw = yaml.load(f) + self.assertIn("macaroon_secret_key", raw) + + config = HomeServerConfig.load_config("", ["-c", self.file]) + self.assertTrue( + hasattr(config, "macaroon_secret_key"), + "Want config to have attr macaroon_secret_key" + ) + if len(config.macaroon_secret_key) < 5: + self.fail( + "Want macaroon secret key to be string of at least length 5," + "was: %r" % (config.macaroon_secret_key,) + ) + + def test_load_fails_if_macaroon_secret_key_missing(self): + self.generate_config_and_remove_lines_containing("macaroon") + with self.assertRaises(Exception): + HomeServerConfig.load_config("", ["-c", self.file]) + + def generate_config(self): + HomeServerConfig.load_config("", [ + "--generate-config", + "-c", self.file, + "--report-stats=yes", + "-H", "lemurs.win" + ]) + + def generate_config_and_remove_lines_containing(self, needle): + self.generate_config() + + with open(self.file, "r") as f: + contents = f.readlines() + contents = [l for l in contents if needle not in l] + with open(self.file, "w") as f: + f.write("".join(contents)) From 737c4223ef25fd2856d0ff6cc111d14b19f1adec Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Fri, 5 Feb 2016 10:47:46 +0000 Subject: [PATCH 108/349] Host /media/r0 as well as /media/v1 --- synapse/api/urls.py | 3 ++- synapse/app/homeserver.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 19824f9a02..0fd9b7f244 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -23,5 +23,6 @@ WEB_CLIENT_PREFIX = "/_matrix/client" CONTENT_REPO_PREFIX = "/_matrix/content" SERVER_KEY_PREFIX = "/_matrix/key/v1" SERVER_KEY_V2_PREFIX = "/_matrix/key/v2" -MEDIA_PREFIX = "/_matrix/media/v1" +MEDIA_PREFIX = "/_matrix/media/r0" +LEGACY_MEDIA_PREFIX = "/_matrix/media/v1" APP_SERVICE_PREFIX = "/_matrix/appservice/v1" diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 89238cb7e3..e5c7e39cf9 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -56,7 +56,7 @@ from synapse.rest.key.v1.server_key_resource import LocalKey from synapse.rest.key.v2 import KeyApiV2Resource from synapse.api.urls import ( FEDERATION_PREFIX, WEB_CLIENT_PREFIX, CONTENT_REPO_PREFIX, - SERVER_KEY_PREFIX, MEDIA_PREFIX, STATIC_PREFIX, + SERVER_KEY_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, STATIC_PREFIX, SERVER_KEY_V2_PREFIX, ) from synapse.config.homeserver import HomeServerConfig @@ -148,8 +148,10 @@ class SynapseHomeServer(HomeServer): }) if name in ["media", "federation", "client"]: + media_repo = MediaRepositoryResource(self) resources.update({ - MEDIA_PREFIX: MediaRepositoryResource(self), + MEDIA_PREFIX: media_repo, + LEGACY_MEDIA_PREFIX: media_repo, CONTENT_REPO_PREFIX: ContentRepoResource( self, self.config.uploads_path, self.auth, self.content_addr ), From 79a1c0574b33955d28bfb12697ccd5a7be779b36 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Fri, 5 Feb 2016 11:22:30 +0000 Subject: [PATCH 109/349] Allocate guest user IDs numericcally The current random IDs are ugly and confusing when presented in UIs. This makes them prettier and easier to read. Also, disable non-automated registration of numeric IDs so that we don't need to worry so much about people carving out our automated address space and us needing to keep retrying ID registration. --- synapse/handlers/register.py | 55 +++++++++++++++++++++------------ synapse/storage/registration.py | 36 +++++++++++++++++++++ 2 files changed, 72 insertions(+), 19 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index b8fbcf9233..2660fd21a2 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -21,7 +21,6 @@ from synapse.api.errors import ( AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError ) from ._base import BaseHandler -import synapse.util.stringutils as stringutils from synapse.util.async import run_on_reactor from synapse.http.client import CaptchaServerHttpClient @@ -45,6 +44,8 @@ class RegistrationHandler(BaseHandler): self.distributor.declare("registered_user") self.captcha_client = CaptchaServerHttpClient(hs) + self._next_generated_user_id = None + @defer.inlineCallbacks def check_username(self, localpart, guest_access_token=None): yield run_on_reactor() @@ -91,7 +92,7 @@ class RegistrationHandler(BaseHandler): Args: localpart : The local part of the user ID to register. If None, - one will be randomly generated. + one will be generated. password (str) : The password to assign to this user so they can login again. This can be None which means they cannot login again via a password (e.g. the user is an application service user). @@ -108,6 +109,18 @@ class RegistrationHandler(BaseHandler): if localpart: yield self.check_username(localpart, guest_access_token=guest_access_token) + was_guest = guest_access_token is not None + + if not was_guest: + try: + int(localpart) + raise RegistrationError( + 400, + "Numeric user IDs are reserved for guest users." + ) + except ValueError: + pass + user = UserID(localpart, self.hs.hostname) user_id = user.to_string() @@ -118,40 +131,36 @@ class RegistrationHandler(BaseHandler): user_id=user_id, token=token, password_hash=password_hash, - was_guest=guest_access_token is not None, + was_guest=was_guest, make_guest=make_guest, ) yield registered_user(self.distributor, user) else: - # autogen a random user ID + # autogen a sequential user ID attempts = 0 - user_id = None token = None - while not user_id: + user = None + while not user: + localpart = yield self._generate_user_id(attempts > 0) + user = UserID(localpart, self.hs.hostname) + user_id = user.to_string() + yield self.check_user_id_is_valid(user_id) + if generate_token: + token = self.auth_handler().generate_access_token(user_id) try: - localpart = self._generate_user_id() - user = UserID(localpart, self.hs.hostname) - user_id = user.to_string() - yield self.check_user_id_is_valid(user_id) - if generate_token: - token = self.auth_handler().generate_access_token(user_id) yield self.store.register( user_id=user_id, token=token, password_hash=password_hash, make_guest=make_guest ) - - yield registered_user(self.distributor, user) except SynapseError: # if user id is taken, just generate another user_id = None token = None attempts += 1 - if attempts > 5: - raise RegistrationError( - 500, "Cannot generate user ID.") + yield registered_user(self.distributor, user) # We used to generate default identicons here, but nowadays # we want clients to generate their own as part of their branding @@ -283,8 +292,16 @@ class RegistrationHandler(BaseHandler): errcode=Codes.EXCLUSIVE ) - def _generate_user_id(self): - return "-" + stringutils.random_string(18) + @defer.inlineCallbacks + def _generate_user_id(self, reseed=False): + if reseed or self._next_generated_user_id is None: + self._next_generated_user_id = ( + yield self.store.find_next_generated_user_id_localpart() + ) + + id = self._next_generated_user_id + self._next_generated_user_id += 1 + defer.returnValue(str(id)) @defer.inlineCallbacks def _validate_captcha(self, ip_addr, private_key, challenge, response): diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index bd35e19be6..967c732bda 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re + from twisted.internet import defer from synapse.api.errors import StoreError, Codes @@ -351,3 +353,37 @@ class RegistrationStore(SQLBaseStore): ret = yield self.runInteraction("count_users", _count_users) defer.returnValue(ret) + + @defer.inlineCallbacks + def find_next_generated_user_id_localpart(self): + """ + Gets the localpart of the next generated user ID. + + Generated user IDs are integers, and we aim for them to be as small as + we can. Unfortunately, it's possible some of them are already taken by + existing users, and there may be gaps in the already taken range. This + function returns the start of the first allocatable gap. This is to + avoid the case of ID 10000000 being pre-allocated, so us wasting the + first (and shortest) many generated user IDs. + """ + def _find_next_generated_user_id(txn): + txn.execute("SELECT name FROM users") + rows = self.cursor_to_dict(txn) + + regex = re.compile("^@(\d+):") + + found = set() + + for r in rows: + user_id = r["name"] + match = regex.search(user_id) + if match: + found.add(int(match.group(1))) + for i in xrange(len(found) + 1): + if i not in found: + return i + + defer.returnValue((yield self.runInteraction( + "find_next_generated_user_id", + _find_next_generated_user_id + ))) From b052621f679103052426baf086fd48074199e81b Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 5 Feb 2016 15:17:47 +0000 Subject: [PATCH 110/349] List the URL patterns in synapse --- scripts-dev/list_url_patterns.py | 54 ++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100755 scripts-dev/list_url_patterns.py diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py new file mode 100755 index 0000000000..23bcc6b034 --- /dev/null +++ b/scripts-dev/list_url_patterns.py @@ -0,0 +1,54 @@ +#! /usr/bin/python + +import ast +import argparse +import os +import sys +import yaml + +PATTERNS = [] + + +class CallVisitor(ast.NodeVisitor): + def visit_Call(self, node): + if isinstance(node.func, ast.Name): + name = node.func.id + else: + return + + if name == "client_path_patterns": + PATTERNS.append(node.args[0].s) + elif name == "client_v2_patterns": + PATTERNS.append(node.args[0].s) + + +def find_patterns_in_code(input_code): + input_ast = ast.parse(input_code) + visitor = CallVisitor() + visitor.visit(input_ast) + + +def find_patterns_in_file(filepath): + with open(filepath) as f: + find_patterns_in_code(f.read()) + + +parser = argparse.ArgumentParser(description='Find url patterns.') + +parser.add_argument( + "directories", nargs='+', metavar="DIR", + help="Directories to search for definitions" +) + +args = parser.parse_args() + + +for directory in args.directories: + for root, dirs, files in os.walk(directory): + for filename in files: + if filename.endswith(".py"): + filepath = os.path.join(root, filename) + find_patterns_in_file(filepath) + + +yaml.dump(sorted(PATTERNS), sys.stdout, default_flow_style=False) From 77c7ed0e93bff2ef7bb1981bf1b9a996aaca6f19 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 5 Feb 2016 15:43:27 +0000 Subject: [PATCH 111/349] Report the v1 and v2 patterns separately --- scripts-dev/list_url_patterns.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py index 23bcc6b034..58d40c4ff4 100755 --- a/scripts-dev/list_url_patterns.py +++ b/scripts-dev/list_url_patterns.py @@ -6,8 +6,13 @@ import os import sys import yaml -PATTERNS = [] +PATTERNS_V1 = [] +PATTERNS_V2 = [] +RESULT = { + "v1": PATTERNS_V1, + "v2": PATTERNS_V2, +} class CallVisitor(ast.NodeVisitor): def visit_Call(self, node): @@ -16,10 +21,11 @@ class CallVisitor(ast.NodeVisitor): else: return + if name == "client_path_patterns": - PATTERNS.append(node.args[0].s) + PATTERNS_V1.append(node.args[0].s) elif name == "client_v2_patterns": - PATTERNS.append(node.args[0].s) + PATTERNS_V2.append(node.args[0].s) def find_patterns_in_code(input_code): @@ -50,5 +56,7 @@ for directory in args.directories: filepath = os.path.join(root, filename) find_patterns_in_file(filepath) +PATTERNS_V1.sort() +PATTERNS_V2.sort() -yaml.dump(sorted(PATTERNS), sys.stdout, default_flow_style=False) +yaml.dump(RESULT, sys.stdout, default_flow_style=False) From 1d19a5ec0fff73af9cee8c21118020b31be47379 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 8 Feb 2016 10:50:55 +0000 Subject: [PATCH 112/349] Reject additional path segments --- synapse/rest/client/v1/admin.py | 2 +- synapse/rest/client/v1/presence.py | 4 ++-- synapse/rest/client/v1/profile.py | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index e2f5eb7b29..5ec52707e7 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -26,7 +26,7 @@ logger = logging.getLogger(__name__) class WhoisRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/admin/whois/(?P[^/]*)") + PATTERNS = client_path_patterns("/admin/whois/(?P[^/]*)$") @defer.inlineCallbacks def on_GET(self, request, user_id): diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index a6f8754e32..9410ac527e 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -28,7 +28,7 @@ logger = logging.getLogger(__name__) class PresenceStatusRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/presence/(?P[^/]*)/status") + PATTERNS = client_path_patterns("/presence/(?P[^/]*)/status$") @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -73,7 +73,7 @@ class PresenceStatusRestServlet(ClientV1RestServlet): class PresenceListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/presence/list/(?P[^/]*)") + PATTERNS = client_path_patterns("/presence/list/(?P[^/]*)$") @defer.inlineCallbacks def on_GET(self, request, user_id): diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 3c5a212920..aeda7bfa39 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -23,7 +23,7 @@ import simplejson as json class ProfileDisplaynameRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)/displayname") + PATTERNS = client_path_patterns("/profile/(?P[^/]*)/displayname$") @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -60,7 +60,7 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): class ProfileAvatarURLRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)/avatar_url") + PATTERNS = client_path_patterns("/profile/(?P[^/]*)/avatar_url$") @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -97,7 +97,7 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): class ProfileRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)") + PATTERNS = client_path_patterns("/profile/(?P[^/]*)$") @defer.inlineCallbacks def on_GET(self, request, user_id): From 13e6262659fd544155875e18c0a3351c12d7651d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Feb 2016 10:15:56 +0000 Subject: [PATCH 113/349] Add metrics to pushers --- synapse/http/server.py | 10 +++++ synapse/push/__init__.py | 84 ++++++++++++++++++++++++--------------- synapse/util/metrics.py | 86 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 149 insertions(+), 31 deletions(-) create mode 100644 synapse/util/metrics.py diff --git a/synapse/http/server.py b/synapse/http/server.py index c250a4604f..06935783ca 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -208,6 +208,9 @@ class JsonResource(HttpServer, resource.Resource): if request.method == "OPTIONS": self._send_response(request, 200, {}) return + + start_context = LoggingContext.current_context() + # Loop through all the registered callbacks to check if the method # and path regex match for path_entry in self.path_regexs.get(request.method, []): @@ -243,6 +246,13 @@ class JsonResource(HttpServer, resource.Resource): if context: tag = context.tag + if context != start_context: + logger.warn( + "Context have unexpectedly changed %r, %r", + context, self.start_context + ) + return + incoming_requests_counter.inc(request.method, servlet_classname, tag) response_timer.inc_by( diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 8b9d0f03e5..64e581b8ba 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -17,6 +17,8 @@ from twisted.internet import defer from synapse.streams.config import PaginationConfig from synapse.types import StreamToken +from synapse.util.logcontext import LoggingContext +from synapse.util.metrics import Measure import synapse.util.async import push_rule_evaluator as push_rule_evaluator @@ -27,6 +29,16 @@ import random logger = logging.getLogger(__name__) +_NEXT_ID = 1 + + +def _get_next_id(): + global _NEXT_ID + _id = _NEXT_ID + _NEXT_ID += 1 + return _id + + # Pushers could now be moved to pull out of the event_push_actions table instead # of listening on the event stream: this would avoid them having to run the # rules again. @@ -57,6 +69,8 @@ class Pusher(object): self.alive = True self.badge = None + self.name = "Pusher-%d" % (_get_next_id(),) + # The last value of last_active_time that we saw self.last_last_active_time = 0 self.has_unread = True @@ -86,38 +100,46 @@ class Pusher(object): @defer.inlineCallbacks def start(self): - if not self.last_token: - # First-time setup: get a token to start from (we can't - # just start from no token, ie. 'now' - # because we need the result to be reproduceable in case - # we fail to dispatch the push) - config = PaginationConfig(from_token=None, limit='1') - chunk = yield self.evStreamHandler.get_stream( - self.user_id, config, timeout=0, affect_presence=False - ) - self.last_token = chunk['end'] - self.store.update_pusher_last_token( - self.app_id, self.pushkey, self.user_id, self.last_token - ) - logger.info("Pusher %s for user %s starting from token %s", - self.pushkey, self.user_id, self.last_token) - - wait = 0 - while self.alive: - try: - if wait > 0: - yield synapse.util.async.sleep(wait) - yield self.get_and_dispatch() - wait = 0 - except: - if wait == 0: - wait = 1 - else: - wait = min(wait * 2, 1800) - logger.exception( - "Exception in pusher loop for pushkey %s. Pausing for %ds", - self.pushkey, wait + with LoggingContext(self.name): + if not self.last_token: + # First-time setup: get a token to start from (we can't + # just start from no token, ie. 'now' + # because we need the result to be reproduceable in case + # we fail to dispatch the push) + config = PaginationConfig(from_token=None, limit='1') + chunk = yield self.evStreamHandler.get_stream( + self.user_id, config, timeout=0, affect_presence=False ) + self.last_token = chunk['end'] + self.store.update_pusher_last_token( + self.app_id, self.pushkey, self.user_id, self.last_token + ) + logger.info("New pusher %s for user %s starting from token %s", + self.pushkey, self.user_id, self.last_token) + + else: + logger.info( + "Old pusher %s for user %s starting", + self.pushkey, self.user_id, + ) + + wait = 0 + while self.alive: + try: + if wait > 0: + yield synapse.util.async.sleep(wait) + with Measure(self.clock, "push"): + yield self.get_and_dispatch() + wait = 0 + except: + if wait == 0: + wait = 1 + else: + wait = min(wait * 2, 1800) + logger.exception( + "Exception in pusher loop for pushkey %s. Pausing for %ds", + self.pushkey, wait + ) @defer.inlineCallbacks def get_and_dispatch(self): diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py new file mode 100644 index 0000000000..daf6087fe0 --- /dev/null +++ b/synapse/util/metrics.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from synapse.util.logcontext import LoggingContext +import synapse.metrics + +import logging + + +logger = logging.getLogger(__name__) + + +metrics = synapse.metrics.get_metrics_for(__name__) + +block_timer = metrics.register_distribution( + "block_timer", + labels=["block_name"] +) + +block_ru_utime = metrics.register_distribution( + "block_ru_utime", labels=["block_name"] +) + +block_ru_stime = metrics.register_distribution( + "block_ru_stime", labels=["block_name"] +) + +block_db_txn_count = metrics.register_distribution( + "block_db_txn_count", labels=["block_name"] +) + +block_db_txn_duration = metrics.register_distribution( + "block_db_txn_duration", labels=["block_name"] +) + + +class Measure(object): + __slots__ = ["clock", "name", "start_context", "start"] + + def __init__(self, clock, name): + self.clock = clock + self.name = name + self.start_context = None + self.start = None + + def __enter__(self): + self.start = self.clock.time_msec() + self.start_context = LoggingContext.current_context() + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + return + + duration = self.clock.time_msec() - self.start + block_timer.inc_by(duration, self.name) + + context = LoggingContext.current_context() + if not context: + return + + if context != self.start_context: + logger.warn( + "Context have unexpectedly changed %r, %r", + context, self.start_context + ) + return + + ru_utime, ru_stime = context.get_resource_usage() + + block_ru_utime.inc_by(ru_utime, self.name) + block_ru_stime.inc_by(ru_stime, self.name) + block_db_txn_count.inc_by(context.db_txn_count, self.name) + block_db_txn_duration.inc_by(context.db_txn_duration, self.name) From 2c1fbea5319db2c64fa486adb32b5e66680b6daf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Feb 2016 10:22:44 +0000 Subject: [PATCH 114/349] Fix up logcontexts --- synapse/api/auth.py | 4 +- synapse/app/homeserver.py | 2 + synapse/crypto/keyring.py | 83 +++++++++-------- synapse/federation/federation_server.py | 4 +- synapse/federation/transaction_queue.py | 3 - synapse/handlers/_base.py | 10 +- synapse/handlers/events.py | 11 ++- synapse/handlers/federation.py | 50 +--------- synapse/handlers/presence.py | 20 ++-- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 11 ++- synapse/handlers/sync.py | 40 ++++---- synapse/http/server.py | 5 +- synapse/notifier.py | 58 ++++++------ synapse/push/__init__.py | 2 +- synapse/push/pusherpool.py | 9 +- synapse/rest/client/v2_alpha/account_data.py | 4 +- synapse/rest/client/v2_alpha/tags.py | 4 +- synapse/storage/_base.py | 18 ++-- synapse/storage/events.py | 34 ++++--- synapse/storage/presence.py | 5 +- synapse/storage/stream.py | 9 +- synapse/util/__init__.py | 6 +- synapse/util/async.py | 11 ++- synapse/util/caches/descriptors.py | 16 +++- synapse/util/caches/snapshot_cache.py | 3 +- synapse/util/distributor.py | 15 +-- synapse/util/logcontext.py | 98 +++++++++++++++++++- synapse/util/logutils.py | 35 +++++++ synapse/util/metrics.py | 10 +- synapse/util/ratelimitutils.py | 3 +- 31 files changed, 356 insertions(+), 229 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 5bba9343f6..e2f84c4d57 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership, JoinRules from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError from synapse.types import Requester, RoomID, UserID, EventID from synapse.util.logutils import log_function +from synapse.util.logcontext import preserve_context_over_fn from unpaddedbase64 import decode_base64 import logging @@ -529,7 +530,8 @@ class Auth(object): default=[""] )[0] if user and access_token and ip_addr: - self.store.insert_client_ip( + preserve_context_over_fn( + self.store.insert_client_ip, user=user, access_token=access_token, ip=ip_addr, diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e5c7e39cf9..2b4be7bdd0 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -709,6 +709,8 @@ def run(hs): phone_home_task.start(60 * 60 * 24, now=False) def in_thread(): + # Uncomment to enable tracing of log context changes. + # sys.settrace(logcontext_tracer) with LoggingContext("run"): change_resource_limit(hs.config.soft_file_limit) reactor.run() diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index cddec0b2bc..d08ee0aa91 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -18,6 +18,10 @@ from synapse.api.errors import SynapseError, Codes from synapse.util.retryutils import get_retry_limiter from synapse.util import unwrapFirstError from synapse.util.async import ObservableDeferred +from synapse.util.logcontext import ( + preserve_context_over_deferred, preserve_context_over_fn, PreserveLoggingContext, + preserve_fn +) from twisted.internet import defer @@ -142,40 +146,43 @@ class Keyring(object): for server_name, _ in server_and_json } - # We want to wait for any previous lookups to complete before - # proceeding. - wait_on_deferred = self.wait_for_previous_lookups( - [server_name for server_name, _ in server_and_json], - server_to_deferred, - ) + with PreserveLoggingContext(): - # Actually start fetching keys. - wait_on_deferred.addBoth( - lambda _: self.get_server_verify_keys(group_id_to_group, deferreds) - ) + # We want to wait for any previous lookups to complete before + # proceeding. + wait_on_deferred = self.wait_for_previous_lookups( + [server_name for server_name, _ in server_and_json], + server_to_deferred, + ) - # When we've finished fetching all the keys for a given server_name, - # resolve the deferred passed to `wait_for_previous_lookups` so that - # any lookups waiting will proceed. - server_to_gids = {} + # Actually start fetching keys. + wait_on_deferred.addBoth( + lambda _: self.get_server_verify_keys(group_id_to_group, deferreds) + ) - def remove_deferreds(res, server_name, group_id): - server_to_gids[server_name].discard(group_id) - if not server_to_gids[server_name]: - d = server_to_deferred.pop(server_name, None) - if d: - d.callback(None) - return res + # When we've finished fetching all the keys for a given server_name, + # resolve the deferred passed to `wait_for_previous_lookups` so that + # any lookups waiting will proceed. + server_to_gids = {} - for g_id, deferred in deferreds.items(): - server_name = group_id_to_group[g_id].server_name - server_to_gids.setdefault(server_name, set()).add(g_id) - deferred.addBoth(remove_deferreds, server_name, g_id) + def remove_deferreds(res, server_name, group_id): + server_to_gids[server_name].discard(group_id) + if not server_to_gids[server_name]: + d = server_to_deferred.pop(server_name, None) + if d: + d.callback(None) + return res + + for g_id, deferred in deferreds.items(): + server_name = group_id_to_group[g_id].server_name + server_to_gids.setdefault(server_name, set()).add(g_id) + deferred.addBoth(remove_deferreds, server_name, g_id) # Pass those keys to handle_key_deferred so that the json object # signatures can be verified return [ - handle_key_deferred( + preserve_context_over_fn( + handle_key_deferred, group_id_to_group[g_id], deferreds[g_id], ) @@ -198,12 +205,13 @@ class Keyring(object): if server_name in self.key_downloads ] if wait_on: - yield defer.DeferredList(wait_on) + with PreserveLoggingContext(): + yield defer.DeferredList(wait_on) else: break for server_name, deferred in server_to_deferred.items(): - d = ObservableDeferred(deferred) + d = ObservableDeferred(preserve_context_over_deferred(deferred)) self.key_downloads[server_name] = d def rm(r, server_name): @@ -244,12 +252,13 @@ class Keyring(object): for group in group_id_to_group.values(): for key_id in group.key_ids: if key_id in merged_results[group.server_name]: - group_id_to_deferred[group.group_id].callback(( - group.group_id, - group.server_name, - key_id, - merged_results[group.server_name][key_id], - )) + with PreserveLoggingContext(): + group_id_to_deferred[group.group_id].callback(( + group.group_id, + group.server_name, + key_id, + merged_results[group.server_name][key_id], + )) break else: missing_groups.setdefault( @@ -504,7 +513,7 @@ class Keyring(object): yield defer.gatherResults( [ - self.store_keys( + preserve_fn(self.store_keys)( server_name=key_server_name, from_server=server_name, verify_keys=verify_keys, @@ -573,7 +582,7 @@ class Keyring(object): yield defer.gatherResults( [ - self.store.store_server_keys_json( + preserve_fn(self.store.store_server_keys_json)( server_name=server_name, key_id=key_id, from_server=server_name, @@ -675,7 +684,7 @@ class Keyring(object): # TODO(markjh): Store whether the keys have expired. yield defer.gatherResults( [ - self.store.store_server_verify_key( + preserve_fn(self.store.store_server_verify_key)( server_name, server_name, key.time_added, key ) for key_id, key in verify_keys.items() diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index a97aa0c94a..90718192dd 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -126,10 +126,8 @@ class FederationServer(FederationBase): results = [] for pdu in pdu_list: - d = self._handle_new_pdu(transaction.origin, pdu) - try: - yield d + yield self._handle_new_pdu(transaction.origin, pdu) results.append({}) except FederationError as e: self.send_failure(e, transaction.origin) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 622adad3ae..1928da03b3 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -103,7 +103,6 @@ class TransactionQueue(object): else: return not destination.startswith("localhost") - @defer.inlineCallbacks def enqueue_pdu(self, pdu, destinations, order): # We loop through all destinations to see whether we already have # a transaction in progress. If we do, stick it in the pending_pdus @@ -141,8 +140,6 @@ class TransactionQueue(object): deferreds.append(deferred) - yield defer.DeferredList(deferreds, consumeErrors=True) - # NO inlineCallbacks def enqueue_edu(self, edu): destination = edu.destination diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 1423df6cf3..fa83d3e464 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -293,19 +293,11 @@ class BaseHandler(object): with PreserveLoggingContext(): # Don't block waiting on waking up all the listeners. - notify_d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - notify_d.addErrback(log_failure) - # If invite, remove room_state from unsigned before sending. event.unsigned.pop("invite_room_state", None) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 5ad8f3779a..4933c31c19 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.util.logutils import log_function from synapse.types import UserID from synapse.events.utils import serialize_event +from synapse.util.logcontext import preserve_context_over_fn from ._base import BaseHandler @@ -29,11 +30,17 @@ logger = logging.getLogger(__name__) def started_user_eventstream(distributor, user): - return distributor.fire("started_user_eventstream", user) + return preserve_context_over_fn( + distributor.fire, + "started_user_eventstream", user + ) def stopped_user_eventstream(distributor, user): - return distributor.fire("stopped_user_eventstream", user) + return preserve_context_over_fn( + distributor.fire, + "stopped_user_eventstream", user + ) class EventStreamHandler(BaseHandler): diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2ce1e9d6c7..b78b0502d9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -221,19 +221,11 @@ class FederationHandler(BaseHandler): extra_users.append(target_user) with PreserveLoggingContext(): - d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - d.addErrback(log_failure) - if event.type == EventTypes.Member: if event.membership == Membership.JOIN: prev_state = context.current_state.get((event.type, event.state_key)) @@ -643,19 +635,11 @@ class FederationHandler(BaseHandler): ) with PreserveLoggingContext(): - d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=[joinee] ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - d.addErrback(log_failure) - logger.debug("Finished joining %s to %s", joinee, room_id) finally: room_queue = self.room_queues[room_id] @@ -730,18 +714,10 @@ class FederationHandler(BaseHandler): extra_users.append(target_user) with PreserveLoggingContext(): - d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - d.addErrback(log_failure) - if event.type == EventTypes.Member: if event.content["membership"] == Membership.JOIN: user = UserID.from_string(event.state_key) @@ -811,19 +787,11 @@ class FederationHandler(BaseHandler): target_user = UserID.from_string(event.state_key) with PreserveLoggingContext(): - d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=[target_user], ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - d.addErrback(log_failure) - defer.returnValue(event) @defer.inlineCallbacks @@ -948,18 +916,10 @@ class FederationHandler(BaseHandler): extra_users.append(target_user) with PreserveLoggingContext(): - d = self.notifier.on_new_room_event( + self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - def log_failure(f): - logger.warn( - "Failed to notify about %s: %s", - event.event_id, f.value - ) - - d.addErrback(log_failure) - new_pdu = event destinations = set() diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index d0c21ff5c9..b61394f2b5 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -378,9 +378,9 @@ class PresenceHandler(BaseHandler): was_polling = target_user in self._user_cachemap if now_online and not was_polling: - self.start_polling_presence(target_user, state=state) + yield self.start_polling_presence(target_user, state=state) elif not now_online and was_polling: - self.stop_polling_presence(target_user) + yield self.stop_polling_presence(target_user) # TODO(paul): perform a presence push as part of start/stop poll so # we don't have to do this all the time @@ -394,7 +394,8 @@ class PresenceHandler(BaseHandler): if now - prev_state.state.get("last_active", 0) < LAST_ACTIVE_GRANULARITY: return - self.changed_presencelike_data(user, {"last_active": now}) + with PreserveLoggingContext(): + self.changed_presencelike_data(user, {"last_active": now}) def get_joined_rooms_for_user(self, user): """Get the list of rooms a user is joined to. @@ -466,11 +467,12 @@ class PresenceHandler(BaseHandler): local_user, room_ids=[room_id], add_to_cache=False ) - self.push_update_to_local_and_remote( - observed_user=local_user, - users_to_push=[user], - statuscache=statuscache, - ) + with PreserveLoggingContext(): + self.push_update_to_local_and_remote( + observed_user=local_user, + users_to_push=[user], + statuscache=statuscache, + ) @defer.inlineCallbacks def send_presence_invite(self, observer_user, observed_user): @@ -556,7 +558,7 @@ class PresenceHandler(BaseHandler): observer_user.localpart, observed_user.to_string() ) - self.start_polling_presence( + yield self.start_polling_presence( observer_user, target_user=observed_user ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 2660fd21a2..24c850ae9b 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -186,7 +186,7 @@ class RegistrationHandler(BaseHandler): token=token, password_hash="" ) - registered_user(self.distributor, user) + yield registered_user(self.distributor, user) defer.returnValue((user_id, token)) @defer.inlineCallbacks diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index bfd7e44e9f..a8e3a9029c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -25,6 +25,7 @@ from synapse.api.constants import ( from synapse.api.errors import AuthError, StoreError, SynapseError, Codes from synapse.util import stringutils, unwrapFirstError from synapse.util.async import run_on_reactor +from synapse.util.logcontext import preserve_context_over_fn from signedjson.sign import verify_signed_json from signedjson.key import decode_verify_key_bytes @@ -46,11 +47,17 @@ def collect_presencelike_data(distributor, user, content): def user_left_room(distributor, user, room_id): - return distributor.fire("user_left_room", user=user, room_id=room_id) + return preserve_context_over_fn( + distributor.fire, + "user_left_room", user=user, room_id=room_id + ) def user_joined_room(distributor, user, room_id): - return distributor.fire("user_joined_room", user=user, room_id=room_id) + return preserve_context_over_fn( + distributor.fire, + "user_joined_room", user=user, room_id=room_id + ) class RoomCreationHandler(BaseHandler): diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 72271f2626..3f1cda5b0b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -18,7 +18,7 @@ from ._base import BaseHandler from synapse.streams.config import PaginationConfig from synapse.api.constants import Membership, EventTypes from synapse.util import unwrapFirstError -from synapse.util.logcontext import LoggingContext +from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from twisted.internet import defer @@ -241,15 +241,16 @@ class SyncHandler(BaseHandler): deferreds = [] for event in room_list: if event.membership == Membership.JOIN: - room_sync_deferred = self.full_state_sync_for_joined_room( - room_id=event.room_id, - sync_config=sync_config, - now_token=now_token, - timeline_since_token=timeline_since_token, - ephemeral_by_room=ephemeral_by_room, - tags_by_room=tags_by_room, - account_data_by_room=account_data_by_room, - ) + with PreserveLoggingContext(LoggingContext.current_context()): + room_sync_deferred = self.full_state_sync_for_joined_room( + room_id=event.room_id, + sync_config=sync_config, + now_token=now_token, + timeline_since_token=timeline_since_token, + ephemeral_by_room=ephemeral_by_room, + tags_by_room=tags_by_room, + account_data_by_room=account_data_by_room, + ) room_sync_deferred.addCallback(joined.append) deferreds.append(room_sync_deferred) elif event.membership == Membership.INVITE: @@ -262,15 +263,16 @@ class SyncHandler(BaseHandler): leave_token = now_token.copy_and_replace( "room_key", "s%d" % (event.stream_ordering,) ) - room_sync_deferred = self.full_state_sync_for_archived_room( - sync_config=sync_config, - room_id=event.room_id, - leave_event_id=event.event_id, - leave_token=leave_token, - timeline_since_token=timeline_since_token, - tags_by_room=tags_by_room, - account_data_by_room=account_data_by_room, - ) + with PreserveLoggingContext(LoggingContext.current_context()): + room_sync_deferred = self.full_state_sync_for_archived_room( + sync_config=sync_config, + room_id=event.room_id, + leave_event_id=event.event_id, + leave_token=leave_token, + timeline_since_token=timeline_since_token, + tags_by_room=tags_by_room, + account_data_by_room=account_data_by_room, + ) room_sync_deferred.addCallback(archived.append) deferreds.append(room_sync_deferred) diff --git a/synapse/http/server.py b/synapse/http/server.py index 06935783ca..a90e2e1125 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -99,9 +99,8 @@ def request_handler(request_handler): request_context.request = request_id with request.processing(): try: - d = request_handler(self, request) - with PreserveLoggingContext(): - yield d + with PreserveLoggingContext(request_context): + yield request_handler(self, request) except CodeMessageException as e: code = e.code if isinstance(e, SynapseError): diff --git a/synapse/notifier.py b/synapse/notifier.py index 1a90bd55cd..560866b26e 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -18,7 +18,8 @@ from synapse.api.constants import EventTypes from synapse.api.errors import AuthError from synapse.util.logutils import log_function -from synapse.util.async import run_on_reactor, ObservableDeferred +from synapse.util.async import ObservableDeferred +from synapse.util.logcontext import PreserveLoggingContext from synapse.types import StreamToken import synapse.metrics @@ -73,7 +74,8 @@ class _NotifierUserStream(object): self.current_token = current_token self.last_notified_ms = time_now_ms - self.notify_deferred = ObservableDeferred(defer.Deferred()) + with PreserveLoggingContext(): + self.notify_deferred = ObservableDeferred(defer.Deferred()) def notify(self, stream_key, stream_id, time_now_ms): """Notify any listeners for this user of a new event from an @@ -88,8 +90,10 @@ class _NotifierUserStream(object): ) self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred - self.notify_deferred = ObservableDeferred(defer.Deferred()) - noify_deferred.callback(self.current_token) + + with PreserveLoggingContext(): + self.notify_deferred = ObservableDeferred(defer.Deferred()) + noify_deferred.callback(self.current_token) def remove(self, notifier): """ Remove this listener from all the indexes in the Notifier @@ -184,8 +188,6 @@ class Notifier(object): lambda: count(bool, self.appservice_to_user_streams.values()), ) - @log_function - @defer.inlineCallbacks def on_new_room_event(self, event, room_stream_id, max_room_stream_id, extra_users=[]): """ Used by handlers to inform the notifier something has happened @@ -199,12 +201,11 @@ class Notifier(object): until all previous events have been persisted before notifying the client streams. """ - yield run_on_reactor() - - self.pending_new_room_events.append(( - room_stream_id, event, extra_users - )) - self._notify_pending_new_room_events(max_room_stream_id) + with PreserveLoggingContext(): + self.pending_new_room_events.append(( + room_stream_id, event, extra_users + )) + self._notify_pending_new_room_events(max_room_stream_id) def _notify_pending_new_room_events(self, max_room_stream_id): """Notify for the room events that were queued waiting for a previous @@ -251,31 +252,29 @@ class Notifier(object): extra_streams=app_streams, ) - @defer.inlineCallbacks - @log_function def on_new_event(self, stream_key, new_token, users=[], rooms=[], extra_streams=set()): """ Used to inform listeners that something has happend event wise. Will wake up all listeners for the given users and rooms. """ - yield run_on_reactor() - user_streams = set() + with PreserveLoggingContext(): + user_streams = set() - for user in users: - user_stream = self.user_to_user_stream.get(str(user)) - if user_stream is not None: - user_streams.add(user_stream) + for user in users: + user_stream = self.user_to_user_stream.get(str(user)) + if user_stream is not None: + user_streams.add(user_stream) - for room in rooms: - user_streams |= self.room_to_user_streams.get(room, set()) + for room in rooms: + user_streams |= self.room_to_user_streams.get(room, set()) - time_now_ms = self.clock.time_msec() - for user_stream in user_streams: - try: - user_stream.notify(stream_key, new_token, time_now_ms) - except: - logger.exception("Failed to notify listener") + time_now_ms = self.clock.time_msec() + for user_stream in user_streams: + try: + user_stream.notify(stream_key, new_token, time_now_ms) + except: + logger.exception("Failed to notify listener") @defer.inlineCallbacks def wait_for_events(self, user_id, timeout, callback, room_ids=None, @@ -325,7 +324,8 @@ class Notifier(object): # that we don't miss any current_token updates. prev_token = current_token listener = user_stream.new_listener(prev_token) - yield listener.deferred + with PreserveLoggingContext(): + yield listener.deferred except defer.CancelledError: break diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 64e581b8ba..8da2d8716c 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -111,7 +111,7 @@ class Pusher(object): self.user_id, config, timeout=0, affect_presence=False ) self.last_token = chunk['end'] - self.store.update_pusher_last_token( + yield self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_id, self.last_token ) logger.info("New pusher %s for user %s starting from token %s", diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index d1b7c0802f..d7dcb2de4b 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -18,6 +18,7 @@ from twisted.internet import defer from httppusher import HttpPusher from synapse.push import PusherConfigException +from synapse.util.logcontext import preserve_fn import logging @@ -76,7 +77,7 @@ class PusherPool: "Removing pusher for app id %s, pushkey %s, user %s", app_id, pushkey, p['user_name'] ) - self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) + yield self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) @defer.inlineCallbacks def remove_pushers_by_user(self, user_id): @@ -91,7 +92,7 @@ class PusherPool: "Removing pusher for app id %s, pushkey %s, user %s", p['app_id'], p['pushkey'], p['user_name'] ) - self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) + yield self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) @defer.inlineCallbacks def _add_pusher_to_store(self, user_id, access_token, profile_tag, kind, @@ -110,7 +111,7 @@ class PusherPool: lang=lang, data=data, ) - self._refresh_pusher(app_id, pushkey, user_id) + yield self._refresh_pusher(app_id, pushkey, user_id) def _create_pusher(self, pusherdict): if pusherdict['kind'] == 'http': @@ -166,7 +167,7 @@ class PusherPool: if fullid in self.pushers: self.pushers[fullid].stop() self.pushers[fullid] = p - p.start() + preserve_fn(p.start)() logger.info("Started pushers") diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index 985efe2a62..1456881c1a 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -57,7 +57,7 @@ class AccountDataServlet(RestServlet): user_id, account_data_type, body ) - yield self.notifier.on_new_event( + self.notifier.on_new_event( "account_data_key", max_id, users=[user_id] ) @@ -99,7 +99,7 @@ class RoomAccountDataServlet(RestServlet): user_id, room_id, account_data_type, body ) - yield self.notifier.on_new_event( + self.notifier.on_new_event( "account_data_key", max_id, users=[user_id] ) diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index 42f2203f3d..79c436a8cf 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -80,7 +80,7 @@ class TagServlet(RestServlet): max_id = yield self.store.add_tag_to_room(user_id, room_id, tag, body) - yield self.notifier.on_new_event( + self.notifier.on_new_event( "account_data_key", max_id, users=[user_id] ) @@ -94,7 +94,7 @@ class TagServlet(RestServlet): max_id = yield self.store.remove_tag_from_room(user_id, room_id, tag) - yield self.notifier.on_new_event( + self.notifier.on_new_event( "account_data_key", max_id, users=[user_id] ) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index cfb87d9328..2e97ac84a8 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -15,7 +15,7 @@ import logging from synapse.api.errors import StoreError -from synapse.util.logcontext import preserve_context_over_fn, LoggingContext +from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.caches.dictionary_cache import DictionaryCache from synapse.util.caches.descriptors import Cache import synapse.metrics @@ -298,10 +298,10 @@ class SQLBaseStore(object): func, *args, **kwargs ) - result = yield preserve_context_over_fn( - self._db_pool.runWithConnection, - inner_func, *args, **kwargs - ) + with PreserveLoggingContext(): + result = yield self._db_pool.runWithConnection( + inner_func, *args, **kwargs + ) for after_callback, after_args in after_callbacks: after_callback(*after_args) @@ -326,10 +326,10 @@ class SQLBaseStore(object): return func(conn, *args, **kwargs) - result = yield preserve_context_over_fn( - self._db_pool.runWithConnection, - inner_func, *args, **kwargs - ) + with PreserveLoggingContext(): + result = yield self._db_pool.runWithConnection( + inner_func, *args, **kwargs + ) defer.returnValue(result) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 4d7cdd00d0..c6ed54721c 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -19,7 +19,7 @@ from twisted.internet import defer, reactor from synapse.events import FrozenEvent, USE_FROZEN_DICTS from synapse.events.utils import prune_event -from synapse.util.logcontext import preserve_context_over_deferred +from synapse.util.logcontext import preserve_fn, PreserveLoggingContext from synapse.util.logutils import log_function from synapse.api.constants import EventTypes @@ -664,14 +664,16 @@ class EventsStore(SQLBaseStore): for ids, d in lst: if not d.called: try: - d.callback([ - res[i] - for i in ids - if i in res - ]) + with PreserveLoggingContext(): + d.callback([ + res[i] + for i in ids + if i in res + ]) except: logger.exception("Failed to callback") - reactor.callFromThread(fire, event_list, row_dict) + with PreserveLoggingContext(): + reactor.callFromThread(fire, event_list, row_dict) except Exception as e: logger.exception("do_fetch") @@ -679,10 +681,12 @@ class EventsStore(SQLBaseStore): def fire(evs): for _, d in evs: if not d.called: - d.errback(e) + with PreserveLoggingContext(): + d.errback(e) if event_list: - reactor.callFromThread(fire, event_list) + with PreserveLoggingContext(): + reactor.callFromThread(fire, event_list) @defer.inlineCallbacks def _enqueue_events(self, events, check_redacted=True, @@ -709,18 +713,20 @@ class EventsStore(SQLBaseStore): should_start = False if should_start: - self.runWithConnection( - self._do_fetch - ) + with PreserveLoggingContext(): + self.runWithConnection( + self._do_fetch + ) - rows = yield preserve_context_over_deferred(events_d) + with PreserveLoggingContext(): + rows = yield events_d if not allow_rejected: rows[:] = [r for r in rows if not r["rejects"]] res = yield defer.gatherResults( [ - self._get_event_from_row( + preserve_fn(self._get_event_from_row)( row["internal_metadata"], row["json"], row["redacts"], check_redacted=check_redacted, get_prev_content=get_prev_content, diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 9b3aecaf8c..ef525f34c5 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -68,8 +68,9 @@ class PresenceStore(SQLBaseStore): for row in rows }) + @defer.inlineCallbacks def set_presence_state(self, user_localpart, new_state): - res = self._simple_update_one( + res = yield self._simple_update_one( table="presence", keyvalues={"user_id": user_localpart}, updatevalues={"state": new_state["state"], @@ -79,7 +80,7 @@ class PresenceStore(SQLBaseStore): ) self.get_presence_state.invalidate((user_localpart,)) - return res + defer.returnValue(res) def allow_presence_visible(self, observed_localpart, observer_userid): return self._simple_insert( diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 50436cb2d2..367ffc9543 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -39,6 +39,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken +from synapse.util.logcontext import preserve_fn import logging @@ -170,12 +171,12 @@ class StreamStore(SQLBaseStore): room_ids = list(room_ids) for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): res = yield defer.gatherResults([ - self.get_room_events_stream_for_room( - room_id, from_key, to_key, limit - ).addCallback(lambda r, rm: (rm, r), room_id) + preserve_fn(self.get_room_events_stream_for_room)( + room_id, from_key, to_key, limit, + ) for room_id in room_ids ]) - results.update(dict(res)) + results.update(dict(zip(rm_ids, res))) defer.returnValue(results) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 7566d9eb33..133671e238 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.util.logcontext import PreserveLoggingContext from twisted.internet import defer, reactor, task @@ -61,10 +61,8 @@ class Clock(object): *args: Postional arguments to pass to function. **kwargs: Key arguments to pass to function. """ - current_context = LoggingContext.current_context() - def wrapped_callback(*args, **kwargs): - with PreserveLoggingContext(current_context): + with PreserveLoggingContext(): callback(*args, **kwargs) with PreserveLoggingContext(): diff --git a/synapse/util/async.py b/synapse/util/async.py index 200edd404c..640fae3890 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -16,13 +16,16 @@ from twisted.internet import defer, reactor -from .logcontext import preserve_context_over_deferred +from .logcontext import PreserveLoggingContext +@defer.inlineCallbacks def sleep(seconds): d = defer.Deferred() - reactor.callLater(seconds, d.callback, seconds) - return preserve_context_over_deferred(d) + with PreserveLoggingContext(): + reactor.callLater(seconds, d.callback, seconds) + res = yield d + defer.returnValue(res) def run_on_reactor(): @@ -54,6 +57,7 @@ class ObservableDeferred(object): object.__setattr__(self, "_result", (True, r)) while self._observers: try: + # TODO: Handle errors here. self._observers.pop().callback(r) except: pass @@ -63,6 +67,7 @@ class ObservableDeferred(object): object.__setattr__(self, "_result", (False, f)) while self._observers: try: + # TODO: Handle errors here. self._observers.pop().errback(f) except: pass diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index e27917c63a..277854ccbc 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -18,6 +18,9 @@ from synapse.util.async import ObservableDeferred from synapse.util import unwrapFirstError from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache +from synapse.util.logcontext import ( + PreserveLoggingContext, preserve_context_over_deferred, preserve_context_over_fn +) from . import caches_by_name, DEBUG_CACHES, cache_counter @@ -190,7 +193,7 @@ class CacheDescriptor(object): defer.returnValue(cached_result) observer.addCallback(check_result) - return observer + return preserve_context_over_deferred(observer) except KeyError: # Get the sequence number of the cache before reading from the # database so that we can tell if the cache is invalidated @@ -198,6 +201,7 @@ class CacheDescriptor(object): sequence = self.cache.sequence ret = defer.maybeDeferred( + preserve_context_over_fn, self.function_to_call, obj, *args, **kwargs ) @@ -211,7 +215,7 @@ class CacheDescriptor(object): ret = ObservableDeferred(ret, consumeErrors=True) self.cache.update(sequence, cache_key, ret) - return ret.observe() + return preserve_context_over_deferred(ret.observe()) wrapped.invalidate = self.cache.invalidate wrapped.invalidate_all = self.cache.invalidate_all @@ -299,6 +303,7 @@ class CacheListDescriptor(object): args_to_call[self.list_name] = missing ret_d = defer.maybeDeferred( + preserve_context_over_fn, self.function_to_call, **args_to_call ) @@ -308,7 +313,8 @@ class CacheListDescriptor(object): # We need to create deferreds for each arg in the list so that # we can insert the new deferred into the cache. for arg in missing: - observer = ret_d.observe() + with PreserveLoggingContext(): + observer = ret_d.observe() observer.addCallback(lambda r, arg: r.get(arg, None), arg) observer = ObservableDeferred(observer) @@ -327,10 +333,10 @@ class CacheListDescriptor(object): cached[arg] = res - return defer.gatherResults( + return preserve_context_over_deferred(defer.gatherResults( cached.values(), consumeErrors=True, - ).addErrback(unwrapFirstError).addCallback(lambda res: dict(res)) + ).addErrback(unwrapFirstError).addCallback(lambda res: dict(res))) obj.__dict__[self.orig.__name__] = wrapped diff --git a/synapse/util/caches/snapshot_cache.py b/synapse/util/caches/snapshot_cache.py index b1e40417fd..d03678b8c8 100644 --- a/synapse/util/caches/snapshot_cache.py +++ b/synapse/util/caches/snapshot_cache.py @@ -87,7 +87,8 @@ class SnapshotCache(object): # expire from the rotation of that cache. self.next_result_cache[key] = result self.pending_result_cache.pop(key, None) + return r - result.observe().addBoth(shuffle_along) + result.addBoth(shuffle_along) return result.observe() diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 4ebfebf701..8875813de4 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -15,9 +15,7 @@ from twisted.internet import defer -from synapse.util.logcontext import ( - PreserveLoggingContext, preserve_context_over_deferred, -) +from synapse.util.logcontext import PreserveLoggingContext from synapse.util import unwrapFirstError @@ -97,6 +95,7 @@ class Signal(object): Each observer callable may return a Deferred.""" self.observers.append(observer) + @defer.inlineCallbacks def fire(self, *args, **kwargs): """Invokes every callable in the observer list, passing in the args and kwargs. Exceptions thrown by observers are logged but ignored. It is @@ -116,6 +115,7 @@ class Signal(object): failure.getTracebackObject())) if not self.suppress_failures: return failure + return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb) with PreserveLoggingContext(): @@ -124,8 +124,11 @@ class Signal(object): for observer in self.observers ] - d = defer.gatherResults(deferreds, consumeErrors=True) + res = yield defer.gatherResults( + deferreds, consumeErrors=True + ).addErrback(unwrapFirstError) - d.addErrback(unwrapFirstError) + defer.returnValue(res) - return preserve_context_over_deferred(d) + def __repr__(self): + return "" % (self.name,) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index e701092cd8..9134e67908 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -48,7 +48,7 @@ class LoggingContext(object): __slots__ = [ "parent_context", "name", "usage_start", "usage_end", "main_thread", - "__dict__", "tag", + "__dict__", "tag", "alive", ] thread_local = threading.local() @@ -88,6 +88,7 @@ class LoggingContext(object): self.usage_start = None self.main_thread = threading.current_thread() self.tag = "" + self.alive = True def __str__(self): return "%s@%x" % (self.name, id(self)) @@ -106,6 +107,7 @@ class LoggingContext(object): The context that was previously active """ current = cls.current_context() + if current is not context: current.stop() cls.thread_local.current_context = context @@ -117,6 +119,7 @@ class LoggingContext(object): if self.parent_context is not None: raise Exception("Attempt to enter logging context multiple times") self.parent_context = self.set_current_context(self) + self.alive = True return self def __exit__(self, type, value, traceback): @@ -136,6 +139,7 @@ class LoggingContext(object): self ) self.parent_context = None + self.alive = False def __getattr__(self, name): """Delegate member lookup to parent context""" @@ -213,7 +217,7 @@ class PreserveLoggingContext(object): exited. Used to restore the context after a function using @defer.inlineCallbacks is resumed by a callback from the reactor.""" - __slots__ = ["current_context", "new_context"] + __slots__ = ["current_context", "new_context", "has_parent"] def __init__(self, new_context=LoggingContext.sentinel): self.new_context = new_context @@ -224,11 +228,26 @@ class PreserveLoggingContext(object): self.new_context ) + if self.current_context: + self.has_parent = self.current_context.parent_context is not None + if not self.current_context.alive: + logger.warn( + "Entering dead context: %s", + self.current_context, + ) + def __exit__(self, type, value, traceback): """Restores the current logging context""" - LoggingContext.set_current_context(self.current_context) + context = LoggingContext.set_current_context(self.current_context) + + if context != self.new_context: + logger.warn( + "Unexpected logging context: %s is not %s", + context, self.new_context, + ) + if self.current_context is not LoggingContext.sentinel: - if self.current_context.parent_context is None: + if not self.current_context.alive: logger.warn( "Restoring dead context: %s", self.current_context, @@ -289,3 +308,74 @@ def preserve_context_over_deferred(deferred): d = _PreservingContextDeferred(current_context) deferred.chainDeferred(d) return d + + +def preserve_fn(f): + """Ensures that function is called with correct context and that context is + restored after return. Useful for wrapping functions that return a deferred + which you don't yield on. + """ + current = LoggingContext.current_context() + + def g(*args, **kwargs): + with PreserveLoggingContext(current): + return f(*args, **kwargs) + + return g + + +# modules to ignore in `logcontext_tracer` +_to_ignore = [ + "synapse.util.logcontext", + "synapse.http.server", + "synapse.storage._base", + "synapse.util.async", +] + + +def logcontext_tracer(frame, event, arg): + """A tracer that logs whenever a logcontext "unexpectedly" changes within + a function. Probably inaccurate. + + Use by calling `sys.settrace(logcontext_tracer)` in the main thread. + """ + if event == 'call': + name = frame.f_globals["__name__"] + if name.startswith("synapse"): + if name == "synapse.util.logcontext": + if frame.f_code.co_name in ["__enter__", "__exit__"]: + tracer = frame.f_back.f_trace + if tracer: + tracer.just_changed = True + + tracer = frame.f_trace + if tracer: + return tracer + + if not any(name.startswith(ig) for ig in _to_ignore): + return LineTracer() + + +class LineTracer(object): + __slots__ = ["context", "just_changed"] + + def __init__(self): + self.context = LoggingContext.current_context() + self.just_changed = False + + def __call__(self, frame, event, arg): + if event in 'line': + if self.just_changed: + self.context = LoggingContext.current_context() + self.just_changed = False + else: + c = LoggingContext.current_context() + if c != self.context: + logger.info( + "Context changed! %s -> %s, %s, %s", + self.context, c, + frame.f_code.co_filename, frame.f_lineno + ) + self.context = c + + return self diff --git a/synapse/util/logutils.py b/synapse/util/logutils.py index c37a157787..3a83828d25 100644 --- a/synapse/util/logutils.py +++ b/synapse/util/logutils.py @@ -168,3 +168,38 @@ def trace_function(f): wrapped.__name__ = func_name return wrapped + + +def get_previous_frames(): + s = inspect.currentframe().f_back.f_back + to_return = [] + while s: + if s.f_globals["__name__"].startswith("synapse"): + filename, lineno, function, _, _ = inspect.getframeinfo(s) + args_string = inspect.formatargvalues(*inspect.getargvalues(s)) + + to_return.append("{{ %s:%d %s - Args: %s }}" % ( + filename, lineno, function, args_string + )) + + s = s.f_back + + return ", ". join(to_return) + + +def get_previous_frame(ignore=[]): + s = inspect.currentframe().f_back.f_back + + while s: + if s.f_globals["__name__"].startswith("synapse"): + if not any(s.f_globals["__name__"].startswith(ig) for ig in ignore): + filename, lineno, function, _, _ = inspect.getframeinfo(s) + args_string = inspect.formatargvalues(*inspect.getargvalues(s)) + + return "{{ %s:%d %s - Args: %s }}" % ( + filename, lineno, function, args_string + ) + + s = s.f_back + + return None diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index daf6087fe0..ca48007218 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -68,16 +68,18 @@ class Measure(object): block_timer.inc_by(duration, self.name) context = LoggingContext.current_context() - if not context: - return if context != self.start_context: logger.warn( - "Context have unexpectedly changed %r, %r", - context, self.start_context + "Context have unexpectedly changed from '%s' to '%s'. (%r)", + context, self.start_context, self.name ) return + if not context: + logger.warn("Expected context. (%r)", self.name) + return + ru_utime, ru_stime = context.get_resource_usage() block_ru_utime.inc_by(ru_utime, self.name) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index ea321bc6a9..4076eed269 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.api.errors import LimitExceededError from synapse.util.async import sleep +from synapse.util.logcontext import preserve_fn import collections import contextlib @@ -163,7 +164,7 @@ class _PerHostRatelimiter(object): "Ratelimit [%s]: sleeping req", id(request_id), ) - ret_defer = sleep(self.sleep_msec / 1000.0) + ret_defer = preserve_fn(sleep)(self.sleep_msec / 1000.0) self.sleeping_requests.add(request_id) From f078ecbc8fa47075a155765ea07a2211dbbad86d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 8 Feb 2016 16:35:44 +0000 Subject: [PATCH 115/349] Derive macaroon_secret_key from signing key. Unfortunately, there are people that are running synapse without a `macaroon_sercret_key` set. Mandating they set one is a good solution, except that breaking auto upgrades is annoying. --- synapse/config/key.py | 28 +++++++++++++++++++++++++++- synapse/config/registration.py | 18 +++--------------- 2 files changed, 30 insertions(+), 16 deletions(-) diff --git a/synapse/config/key.py b/synapse/config/key.py index ac90cd3fc1..a072aec714 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -22,8 +22,14 @@ from signedjson.key import ( read_signing_keys, write_signing_keys, NACL_ED25519 ) from unpaddedbase64 import decode_base64 +from synapse.util.stringutils import random_string_with_symbols import os +import hashlib +import logging + + +logger = logging.getLogger(__name__) class KeyConfig(Config): @@ -40,9 +46,29 @@ class KeyConfig(Config): config["perspectives"] ) - def default_config(self, config_dir_path, server_name, **kwargs): + self.macaroon_secret_key = config.get( + "macaroon_secret_key", self.registration_shared_secret + ) + + if not self.macaroon_secret_key: + # Unfortunately, there are people out there that don't have this + # set. Lets just be "nice" and derive one from their secret key. + logger.warn("Config is missing missing macaroon_secret_key") + seed = self.signing_key[0].seed + self.macaroon_secret_key = hashlib.sha256(seed) + + def default_config(self, config_dir_path, server_name, is_generating_file=False, + **kwargs): base_key_name = os.path.join(config_dir_path, server_name) + + if is_generating_file: + macaroon_secret_key = random_string_with_symbols(50) + else: + macaroon_secret_key = None + return """\ + macaroon_secret_key: "%(macaroon_secret_key)s" + ## Signing Keys ## # Path to the signing key to sign messages with diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 9b6dacc5b8..ab062d528c 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -32,26 +32,14 @@ class RegistrationConfig(Config): ) self.registration_shared_secret = config.get("registration_shared_secret") - self.macaroon_secret_key = config.get("macaroon_secret_key") - if self.macaroon_secret_key is None: - raise Exception( - "Config is missing missing macaroon_secret_key - please set it" - " in your config file." - ) + self.bcrypt_rounds = config.get("bcrypt_rounds", 12) self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"] self.allow_guest_access = config.get("allow_guest_access", False) - def default_config(self, is_generating_file=False, **kwargs): + def default_config(self, **kwargs): registration_shared_secret = random_string_with_symbols(50) - macaroon_line = "" - if is_generating_file: - macaroon_line += '\n macaroon_secret_key: "%s"\n' % ( - random_string_with_symbols(50), - ) - - macaroon_secret_key = random_string_with_symbols(50) return """\ ## Registration ## @@ -61,7 +49,7 @@ class RegistrationConfig(Config): # If set, allows registration by anyone who also has the shared # secret, even if registration is otherwise disabled. registration_shared_secret: "%(registration_shared_secret)s" -%(macaroon_line)s + # Set the number of bcrypt rounds used to generate password hash. # Larger numbers increase the work factor needed to generate the hash. # The default number of rounds is 12. From c486b7b41cafeb120815a53992318f911b0d30f3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 09:20:06 +0000 Subject: [PATCH 116/349] Change logcontext warns to debug --- synapse/util/logcontext.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 9134e67908..b22a36336b 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -231,7 +231,7 @@ class PreserveLoggingContext(object): if self.current_context: self.has_parent = self.current_context.parent_context is not None if not self.current_context.alive: - logger.warn( + logger.debug( "Entering dead context: %s", self.current_context, ) @@ -241,14 +241,14 @@ class PreserveLoggingContext(object): context = LoggingContext.set_current_context(self.current_context) if context != self.new_context: - logger.warn( + logger.debug( "Unexpected logging context: %s is not %s", context, self.new_context, ) if self.current_context is not LoggingContext.sentinel: if not self.current_context.alive: - logger.warn( + logger.debug( "Restoring dead context: %s", self.current_context, ) From 549698b1e025263b2c4d8aa3f9b20f1c8761dc59 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 09:37:09 +0000 Subject: [PATCH 117/349] Don't measure across event stream call, as it lasts for a long time. --- synapse/push/__init__.py | 244 +++++++++++++++++++-------------------- 1 file changed, 122 insertions(+), 122 deletions(-) diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 8da2d8716c..b9522a8050 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -128,8 +128,7 @@ class Pusher(object): try: if wait > 0: yield synapse.util.async.sleep(wait) - with Measure(self.clock, "push"): - yield self.get_and_dispatch() + yield self.get_and_dispatch() wait = 0 except: if wait == 0: @@ -151,115 +150,27 @@ class Pusher(object): only_keys=("room", "receipt",), ) - # limiting to 1 may get 1 event plus 1 presence event, so - # pick out the actual event - single_event = None - read_receipt = None - for c in chunk['chunk']: - if 'event_id' in c: # Hmmm... - single_event = c - elif c['type'] == 'm.receipt': - read_receipt = c + with Measure(self.clock, "push"): + # limiting to 1 may get 1 event plus 1 presence event, so + # pick out the actual event + single_event = None + read_receipt = None + for c in chunk['chunk']: + if 'event_id' in c: # Hmmm... + single_event = c + elif c['type'] == 'm.receipt': + read_receipt = c - have_updated_badge = False - if read_receipt: - for receipt_part in read_receipt['content'].values(): - if 'm.read' in receipt_part: - if self.user_id in receipt_part['m.read'].keys(): - have_updated_badge = True + have_updated_badge = False + if read_receipt: + for receipt_part in read_receipt['content'].values(): + if 'm.read' in receipt_part: + if self.user_id in receipt_part['m.read'].keys(): + have_updated_badge = True - if not single_event: - if have_updated_badge: - yield self.update_badge() - self.last_token = chunk['end'] - yield self.store.update_pusher_last_token( - self.app_id, - self.pushkey, - self.user_id, - self.last_token - ) - return - - if not self.alive: - return - - processed = False - - rule_evaluator = yield \ - push_rule_evaluator.evaluator_for_user_id_and_profile_tag( - self.user_id, self.profile_tag, single_event['room_id'], self.store - ) - - actions = yield rule_evaluator.actions_for_event(single_event) - tweaks = rule_evaluator.tweaks_for_actions(actions) - - if 'notify' in actions: - self.badge = yield self._get_badge_count() - rejected = yield self.dispatch_push(single_event, tweaks, self.badge) - self.has_unread = True - if isinstance(rejected, list) or isinstance(rejected, tuple): - processed = True - for pk in rejected: - if pk != self.pushkey: - # for sanity, we only remove the pushkey if it - # was the one we actually sent... - logger.warn( - ("Ignoring rejected pushkey %s because we" - " didn't send it"), pk - ) - else: - logger.info( - "Pushkey %s was rejected: removing", - pk - ) - yield self.hs.get_pusherpool().remove_pusher( - self.app_id, pk, self.user_id - ) - else: - if have_updated_badge: - yield self.update_badge() - processed = True - - if not self.alive: - return - - if processed: - self.backoff_delay = Pusher.INITIAL_BACKOFF - self.last_token = chunk['end'] - yield self.store.update_pusher_last_token_and_success( - self.app_id, - self.pushkey, - self.user_id, - self.last_token, - self.clock.time_msec() - ) - if self.failing_since: - self.failing_since = None - yield self.store.update_pusher_failing_since( - self.app_id, - self.pushkey, - self.user_id, - self.failing_since) - else: - if not self.failing_since: - self.failing_since = self.clock.time_msec() - yield self.store.update_pusher_failing_since( - self.app_id, - self.pushkey, - self.user_id, - self.failing_since - ) - - if (self.failing_since and - self.failing_since < - self.clock.time_msec() - Pusher.GIVE_UP_AFTER): - # we really only give up so that if the URL gets - # fixed, we don't suddenly deliver a load - # of old notifications. - logger.warn("Giving up on a notification to user %s, " - "pushkey %s", - self.user_id, self.pushkey) - self.backoff_delay = Pusher.INITIAL_BACKOFF + if not single_event: + if have_updated_badge: + yield self.update_badge() self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, @@ -267,25 +178,114 @@ class Pusher(object): self.user_id, self.last_token ) + return - self.failing_since = None - yield self.store.update_pusher_failing_since( + if not self.alive: + return + + processed = False + + rule_evaluator = yield \ + push_rule_evaluator.evaluator_for_user_id_and_profile_tag( + self.user_id, self.profile_tag, single_event['room_id'], self.store + ) + + actions = yield rule_evaluator.actions_for_event(single_event) + tweaks = rule_evaluator.tweaks_for_actions(actions) + + if 'notify' in actions: + self.badge = yield self._get_badge_count() + rejected = yield self.dispatch_push(single_event, tweaks, self.badge) + self.has_unread = True + if isinstance(rejected, list) or isinstance(rejected, tuple): + processed = True + for pk in rejected: + if pk != self.pushkey: + # for sanity, we only remove the pushkey if it + # was the one we actually sent... + logger.warn( + ("Ignoring rejected pushkey %s because we" + " didn't send it"), pk + ) + else: + logger.info( + "Pushkey %s was rejected: removing", + pk + ) + yield self.hs.get_pusherpool().remove_pusher( + self.app_id, pk, self.user_id + ) + else: + if have_updated_badge: + yield self.update_badge() + processed = True + + if not self.alive: + return + + if processed: + self.backoff_delay = Pusher.INITIAL_BACKOFF + self.last_token = chunk['end'] + yield self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.user_id, - self.failing_since + self.last_token, + self.clock.time_msec() ) + if self.failing_since: + self.failing_since = None + yield self.store.update_pusher_failing_since( + self.app_id, + self.pushkey, + self.user_id, + self.failing_since) else: - logger.warn("Failed to dispatch push for user %s " - "(failing for %dms)." - "Trying again in %dms", - self.user_id, - self.clock.time_msec() - self.failing_since, - self.backoff_delay) - yield synapse.util.async.sleep(self.backoff_delay / 1000.0) - self.backoff_delay *= 2 - if self.backoff_delay > Pusher.MAX_BACKOFF: - self.backoff_delay = Pusher.MAX_BACKOFF + if not self.failing_since: + self.failing_since = self.clock.time_msec() + yield self.store.update_pusher_failing_since( + self.app_id, + self.pushkey, + self.user_id, + self.failing_since + ) + + if (self.failing_since and + self.failing_since < + self.clock.time_msec() - Pusher.GIVE_UP_AFTER): + # we really only give up so that if the URL gets + # fixed, we don't suddenly deliver a load + # of old notifications. + logger.warn("Giving up on a notification to user %s, " + "pushkey %s", + self.user_id, self.pushkey) + self.backoff_delay = Pusher.INITIAL_BACKOFF + self.last_token = chunk['end'] + yield self.store.update_pusher_last_token( + self.app_id, + self.pushkey, + self.user_id, + self.last_token + ) + + self.failing_since = None + yield self.store.update_pusher_failing_since( + self.app_id, + self.pushkey, + self.user_id, + self.failing_since + ) + else: + logger.warn("Failed to dispatch push for user %s " + "(failing for %dms)." + "Trying again in %dms", + self.user_id, + self.clock.time_msec() - self.failing_since, + self.backoff_delay) + yield synapse.util.async.sleep(self.backoff_delay / 1000.0) + self.backoff_delay *= 2 + if self.backoff_delay > Pusher.MAX_BACKOFF: + self.backoff_delay = Pusher.MAX_BACKOFF def stop(self): self.alive = False From 97294ef2fd5737a58ff0a5d551a17e77dcf1baaf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 10:10:20 +0000 Subject: [PATCH 118/349] Create new context when measuring --- synapse/util/metrics.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index ca48007218..f70d855122 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -48,26 +48,29 @@ block_db_txn_duration = metrics.register_distribution( class Measure(object): - __slots__ = ["clock", "name", "start_context", "start"] + __slots__ = ["clock", "name", "start_context", "start", "new_context"] def __init__(self, clock, name): self.clock = clock self.name = name self.start_context = None self.start = None + self.new_context = LoggingContext(self.name) def __enter__(self): self.start = self.clock.time_msec() self.start_context = LoggingContext.current_context() + self.new_context.__enter__() def __exit__(self, exc_type, exc_val, exc_tb): + self.new_context.__exit__(exc_type, exc_val, exc_tb) if exc_type is not None: return duration = self.clock.time_msec() - self.start block_timer.inc_by(duration, self.name) - context = LoggingContext.current_context() + context = self.new_context if context != self.start_context: logger.warn( From 241b71852e549de8a3042f8a7c22ba56af482657 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 10:16:50 +0000 Subject: [PATCH 119/349] Fix bug in util.metrics.Measure --- synapse/util/metrics.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index f70d855122..7c6899aa1c 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -63,6 +63,8 @@ class Measure(object): self.new_context.__enter__() def __exit__(self, exc_type, exc_val, exc_tb): + current_context = LoggingContext.current_context() + self.new_context.__exit__(exc_type, exc_val, exc_tb) if exc_type is not None: return @@ -72,7 +74,7 @@ class Measure(object): context = self.new_context - if context != self.start_context: + if context != current_context: logger.warn( "Context have unexpectedly changed from '%s' to '%s'. (%r)", context, self.start_context, self.name From 3e2fcd67b29784f01ed44cb53f152ebd2d98ed6b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 10:50:31 +0000 Subject: [PATCH 120/349] Don't bother copying records on parent context --- synapse/util/logcontext.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index b22a36336b..301b30d58a 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -116,8 +116,6 @@ class LoggingContext(object): def __enter__(self): """Enters this logging context into thread local storage""" - if self.parent_context is not None: - raise Exception("Attempt to enter logging context multiple times") self.parent_context = self.set_current_context(self) self.alive = True return self @@ -141,14 +139,8 @@ class LoggingContext(object): self.parent_context = None self.alive = False - def __getattr__(self, name): - """Delegate member lookup to parent context""" - return getattr(self.parent_context, name) - def copy_to(self, record): """Copy fields from this context and its parents to the record""" - if self.parent_context is not None: - self.parent_context.copy_to(record) for key, value in self.__dict__.items(): setattr(record, key, value) From 9daa4e2a8547b954171d88a7b141e863cef1fdf9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 11:06:19 +0000 Subject: [PATCH 121/349] Don't create new logging context --- synapse/util/metrics.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 7c6899aa1c..57ca3b4751 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -48,33 +48,34 @@ block_db_txn_duration = metrics.register_distribution( class Measure(object): - __slots__ = ["clock", "name", "start_context", "start", "new_context"] + __slots__ = [ + "clock", "name", "start_context", "start", "new_context", "ru_utime", + "ru_stime", "db_txn_count", "db_txn_duration" + ] def __init__(self, clock, name): self.clock = clock self.name = name self.start_context = None self.start = None - self.new_context = LoggingContext(self.name) def __enter__(self): self.start = self.clock.time_msec() self.start_context = LoggingContext.current_context() - self.new_context.__enter__() + self.ru_utime, self.ru_stime = self.start_context.get_resource_usage() + self.db_txn_count = self.start_context.db_txn_count + self.db_txn_duration = self.start_context.db_txn_duration def __exit__(self, exc_type, exc_val, exc_tb): - current_context = LoggingContext.current_context() - - self.new_context.__exit__(exc_type, exc_val, exc_tb) if exc_type is not None: return duration = self.clock.time_msec() - self.start block_timer.inc_by(duration, self.name) - context = self.new_context + context = LoggingContext.current_context() - if context != current_context: + if context != self.start_context: logger.warn( "Context have unexpectedly changed from '%s' to '%s'. (%r)", context, self.start_context, self.name @@ -87,7 +88,9 @@ class Measure(object): ru_utime, ru_stime = context.get_resource_usage() - block_ru_utime.inc_by(ru_utime, self.name) - block_ru_stime.inc_by(ru_stime, self.name) - block_db_txn_count.inc_by(context.db_txn_count, self.name) - block_db_txn_duration.inc_by(context.db_txn_duration, self.name) + block_ru_utime.inc_by(ru_utime - self.ru_utime, self.name) + block_ru_stime.inc_by(ru_stime - self.ru_stime, self.name) + block_db_txn_count.inc_by(context.db_txn_count - self.db_txn_count, self.name) + block_db_txn_duration.inc_by( + context.db_txn_duration - self.db_txn_duration, self.name + ) From 31a2b892d8935dcb49cbc3c71a0ccf8aa34eeb97 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 11:25:09 +0000 Subject: [PATCH 122/349] Revert to putting it around the entire block --- synapse/push/__init__.py | 244 +++++++++++++++++++-------------------- 1 file changed, 122 insertions(+), 122 deletions(-) diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index b9522a8050..8da2d8716c 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -128,7 +128,8 @@ class Pusher(object): try: if wait > 0: yield synapse.util.async.sleep(wait) - yield self.get_and_dispatch() + with Measure(self.clock, "push"): + yield self.get_and_dispatch() wait = 0 except: if wait == 0: @@ -150,27 +151,115 @@ class Pusher(object): only_keys=("room", "receipt",), ) - with Measure(self.clock, "push"): - # limiting to 1 may get 1 event plus 1 presence event, so - # pick out the actual event - single_event = None - read_receipt = None - for c in chunk['chunk']: - if 'event_id' in c: # Hmmm... - single_event = c - elif c['type'] == 'm.receipt': - read_receipt = c + # limiting to 1 may get 1 event plus 1 presence event, so + # pick out the actual event + single_event = None + read_receipt = None + for c in chunk['chunk']: + if 'event_id' in c: # Hmmm... + single_event = c + elif c['type'] == 'm.receipt': + read_receipt = c - have_updated_badge = False - if read_receipt: - for receipt_part in read_receipt['content'].values(): - if 'm.read' in receipt_part: - if self.user_id in receipt_part['m.read'].keys(): - have_updated_badge = True + have_updated_badge = False + if read_receipt: + for receipt_part in read_receipt['content'].values(): + if 'm.read' in receipt_part: + if self.user_id in receipt_part['m.read'].keys(): + have_updated_badge = True - if not single_event: - if have_updated_badge: - yield self.update_badge() + if not single_event: + if have_updated_badge: + yield self.update_badge() + self.last_token = chunk['end'] + yield self.store.update_pusher_last_token( + self.app_id, + self.pushkey, + self.user_id, + self.last_token + ) + return + + if not self.alive: + return + + processed = False + + rule_evaluator = yield \ + push_rule_evaluator.evaluator_for_user_id_and_profile_tag( + self.user_id, self.profile_tag, single_event['room_id'], self.store + ) + + actions = yield rule_evaluator.actions_for_event(single_event) + tweaks = rule_evaluator.tweaks_for_actions(actions) + + if 'notify' in actions: + self.badge = yield self._get_badge_count() + rejected = yield self.dispatch_push(single_event, tweaks, self.badge) + self.has_unread = True + if isinstance(rejected, list) or isinstance(rejected, tuple): + processed = True + for pk in rejected: + if pk != self.pushkey: + # for sanity, we only remove the pushkey if it + # was the one we actually sent... + logger.warn( + ("Ignoring rejected pushkey %s because we" + " didn't send it"), pk + ) + else: + logger.info( + "Pushkey %s was rejected: removing", + pk + ) + yield self.hs.get_pusherpool().remove_pusher( + self.app_id, pk, self.user_id + ) + else: + if have_updated_badge: + yield self.update_badge() + processed = True + + if not self.alive: + return + + if processed: + self.backoff_delay = Pusher.INITIAL_BACKOFF + self.last_token = chunk['end'] + yield self.store.update_pusher_last_token_and_success( + self.app_id, + self.pushkey, + self.user_id, + self.last_token, + self.clock.time_msec() + ) + if self.failing_since: + self.failing_since = None + yield self.store.update_pusher_failing_since( + self.app_id, + self.pushkey, + self.user_id, + self.failing_since) + else: + if not self.failing_since: + self.failing_since = self.clock.time_msec() + yield self.store.update_pusher_failing_since( + self.app_id, + self.pushkey, + self.user_id, + self.failing_since + ) + + if (self.failing_since and + self.failing_since < + self.clock.time_msec() - Pusher.GIVE_UP_AFTER): + # we really only give up so that if the URL gets + # fixed, we don't suddenly deliver a load + # of old notifications. + logger.warn("Giving up on a notification to user %s, " + "pushkey %s", + self.user_id, self.pushkey) + self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, @@ -178,114 +267,25 @@ class Pusher(object): self.user_id, self.last_token ) - return - if not self.alive: - return - - processed = False - - rule_evaluator = yield \ - push_rule_evaluator.evaluator_for_user_id_and_profile_tag( - self.user_id, self.profile_tag, single_event['room_id'], self.store - ) - - actions = yield rule_evaluator.actions_for_event(single_event) - tweaks = rule_evaluator.tweaks_for_actions(actions) - - if 'notify' in actions: - self.badge = yield self._get_badge_count() - rejected = yield self.dispatch_push(single_event, tweaks, self.badge) - self.has_unread = True - if isinstance(rejected, list) or isinstance(rejected, tuple): - processed = True - for pk in rejected: - if pk != self.pushkey: - # for sanity, we only remove the pushkey if it - # was the one we actually sent... - logger.warn( - ("Ignoring rejected pushkey %s because we" - " didn't send it"), pk - ) - else: - logger.info( - "Pushkey %s was rejected: removing", - pk - ) - yield self.hs.get_pusherpool().remove_pusher( - self.app_id, pk, self.user_id - ) - else: - if have_updated_badge: - yield self.update_badge() - processed = True - - if not self.alive: - return - - if processed: - self.backoff_delay = Pusher.INITIAL_BACKOFF - self.last_token = chunk['end'] - yield self.store.update_pusher_last_token_and_success( + self.failing_since = None + yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, - self.last_token, - self.clock.time_msec() + self.failing_since ) - if self.failing_since: - self.failing_since = None - yield self.store.update_pusher_failing_since( - self.app_id, - self.pushkey, - self.user_id, - self.failing_since) else: - if not self.failing_since: - self.failing_since = self.clock.time_msec() - yield self.store.update_pusher_failing_since( - self.app_id, - self.pushkey, - self.user_id, - self.failing_since - ) - - if (self.failing_since and - self.failing_since < - self.clock.time_msec() - Pusher.GIVE_UP_AFTER): - # we really only give up so that if the URL gets - # fixed, we don't suddenly deliver a load - # of old notifications. - logger.warn("Giving up on a notification to user %s, " - "pushkey %s", - self.user_id, self.pushkey) - self.backoff_delay = Pusher.INITIAL_BACKOFF - self.last_token = chunk['end'] - yield self.store.update_pusher_last_token( - self.app_id, - self.pushkey, - self.user_id, - self.last_token - ) - - self.failing_since = None - yield self.store.update_pusher_failing_since( - self.app_id, - self.pushkey, - self.user_id, - self.failing_since - ) - else: - logger.warn("Failed to dispatch push for user %s " - "(failing for %dms)." - "Trying again in %dms", - self.user_id, - self.clock.time_msec() - self.failing_since, - self.backoff_delay) - yield synapse.util.async.sleep(self.backoff_delay / 1000.0) - self.backoff_delay *= 2 - if self.backoff_delay > Pusher.MAX_BACKOFF: - self.backoff_delay = Pusher.MAX_BACKOFF + logger.warn("Failed to dispatch push for user %s " + "(failing for %dms)." + "Trying again in %dms", + self.user_id, + self.clock.time_msec() - self.failing_since, + self.backoff_delay) + yield synapse.util.async.sleep(self.backoff_delay / 1000.0) + self.backoff_delay *= 2 + if self.backoff_delay > Pusher.MAX_BACKOFF: + self.backoff_delay = Pusher.MAX_BACKOFF def stop(self): self.alive = False From 6c558ee8bc1376bc678dd20c9fb462c76bc913bc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 11:31:04 +0000 Subject: [PATCH 123/349] Measure some /sync related things --- synapse/handlers/sync.py | 283 +++++++++++++++++++------------------ synapse/handlers/typing.py | 23 +-- 2 files changed, 159 insertions(+), 147 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 3f1cda5b0b..3e5f27046e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -19,6 +19,7 @@ from synapse.streams.config import PaginationConfig from synapse.api.constants import Membership, EventTypes from synapse.util import unwrapFirstError from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.util.metrics import Measure from twisted.internet import defer @@ -368,50 +369,51 @@ class SyncHandler(BaseHandler): typing events for that room. """ - typing_key = since_token.typing_key if since_token else "0" + with Measure(self.clock, "ephemeral_by_room"): + typing_key = since_token.typing_key if since_token else "0" - rooms = yield self.store.get_rooms_for_user(sync_config.user.to_string()) - room_ids = [room.room_id for room in rooms] + rooms = yield self.store.get_rooms_for_user(sync_config.user.to_string()) + room_ids = [room.room_id for room in rooms] - typing_source = self.event_sources.sources["typing"] - typing, typing_key = yield typing_source.get_new_events( - user=sync_config.user, - from_key=typing_key, - limit=sync_config.filter_collection.ephemeral_limit(), - room_ids=room_ids, - is_guest=sync_config.is_guest, - ) - now_token = now_token.copy_and_replace("typing_key", typing_key) + typing_source = self.event_sources.sources["typing"] + typing, typing_key = yield typing_source.get_new_events( + user=sync_config.user, + from_key=typing_key, + limit=sync_config.filter_collection.ephemeral_limit(), + room_ids=room_ids, + is_guest=sync_config.is_guest, + ) + now_token = now_token.copy_and_replace("typing_key", typing_key) - ephemeral_by_room = {} + ephemeral_by_room = {} - for event in typing: - # we want to exclude the room_id from the event, but modifying the - # result returned by the event source is poor form (it might cache - # the object) - room_id = event["room_id"] - event_copy = {k: v for (k, v) in event.iteritems() - if k != "room_id"} - ephemeral_by_room.setdefault(room_id, []).append(event_copy) + for event in typing: + # we want to exclude the room_id from the event, but modifying the + # result returned by the event source is poor form (it might cache + # the object) + room_id = event["room_id"] + event_copy = {k: v for (k, v) in event.iteritems() + if k != "room_id"} + ephemeral_by_room.setdefault(room_id, []).append(event_copy) - receipt_key = since_token.receipt_key if since_token else "0" + receipt_key = since_token.receipt_key if since_token else "0" - receipt_source = self.event_sources.sources["receipt"] - receipts, receipt_key = yield receipt_source.get_new_events( - user=sync_config.user, - from_key=receipt_key, - limit=sync_config.filter_collection.ephemeral_limit(), - room_ids=room_ids, - is_guest=sync_config.is_guest, - ) - now_token = now_token.copy_and_replace("receipt_key", receipt_key) + receipt_source = self.event_sources.sources["receipt"] + receipts, receipt_key = yield receipt_source.get_new_events( + user=sync_config.user, + from_key=receipt_key, + limit=sync_config.filter_collection.ephemeral_limit(), + room_ids=room_ids, + is_guest=sync_config.is_guest, + ) + now_token = now_token.copy_and_replace("receipt_key", receipt_key) - for event in receipts: - room_id = event["room_id"] - # exclude room id, as above - event_copy = {k: v for (k, v) in event.iteritems() - if k != "room_id"} - ephemeral_by_room.setdefault(room_id, []).append(event_copy) + for event in receipts: + room_id = event["room_id"] + # exclude room id, as above + event_copy = {k: v for (k, v) in event.iteritems() + if k != "room_id"} + ephemeral_by_room.setdefault(room_id, []).append(event_copy) defer.returnValue((now_token, ephemeral_by_room)) @@ -619,58 +621,61 @@ class SyncHandler(BaseHandler): """ :returns a Deferred TimelineBatch """ - filtering_factor = 2 - timeline_limit = sync_config.filter_collection.timeline_limit() - load_limit = max(timeline_limit * filtering_factor, 10) - max_repeat = 5 # Only try a few times per room, otherwise - room_key = now_token.room_key - end_key = room_key + with Measure(self.clock, "load_filtered_recents"): + filtering_factor = 2 + timeline_limit = sync_config.filter_collection.timeline_limit() + load_limit = max(timeline_limit * filtering_factor, 10) + max_repeat = 5 # Only try a few times per room, otherwise + room_key = now_token.room_key + end_key = room_key - limited = recents is None or newly_joined_room or timeline_limit < len(recents) + limited = recents is None or newly_joined_room or timeline_limit < len(recents) - if recents is not None: - recents = sync_config.filter_collection.filter_room_timeline(recents) - recents = yield self._filter_events_for_client( - sync_config.user.to_string(), - recents, - is_peeking=sync_config.is_guest, + if recents is not None: + recents = sync_config.filter_collection.filter_room_timeline(recents) + recents = yield self._filter_events_for_client( + sync_config.user.to_string(), + recents, + is_peeking=sync_config.is_guest, + ) + else: + recents = [] + + since_key = None + if since_token and not newly_joined_room: + since_key = since_token.room_key + + while limited and len(recents) < timeline_limit and max_repeat: + events, end_key = yield self.store.get_room_events_stream_for_room( + room_id, + limit=load_limit + 1, + from_key=since_key, + to_key=end_key, + ) + loaded_recents = sync_config.filter_collection.filter_room_timeline( + events + ) + loaded_recents = yield self._filter_events_for_client( + sync_config.user.to_string(), + loaded_recents, + is_peeking=sync_config.is_guest, + ) + loaded_recents.extend(recents) + recents = loaded_recents + + if len(events) <= load_limit: + limited = False + break + max_repeat -= 1 + + if len(recents) > timeline_limit: + limited = True + recents = recents[-timeline_limit:] + room_key = recents[0].internal_metadata.before + + prev_batch_token = now_token.copy_and_replace( + "room_key", room_key ) - else: - recents = [] - - since_key = None - if since_token and not newly_joined_room: - since_key = since_token.room_key - - while limited and len(recents) < timeline_limit and max_repeat: - events, end_key = yield self.store.get_room_events_stream_for_room( - room_id, - limit=load_limit + 1, - from_key=since_key, - to_key=end_key, - ) - loaded_recents = sync_config.filter_collection.filter_room_timeline(events) - loaded_recents = yield self._filter_events_for_client( - sync_config.user.to_string(), - loaded_recents, - is_peeking=sync_config.is_guest, - ) - loaded_recents.extend(recents) - recents = loaded_recents - - if len(events) <= load_limit: - limited = False - break - max_repeat -= 1 - - if len(recents) > timeline_limit: - limited = True - recents = recents[-timeline_limit:] - room_key = recents[0].internal_metadata.before - - prev_batch_token = now_token.copy_and_replace( - "room_key", room_key - ) defer.returnValue(TimelineBatch( events=recents, @@ -831,50 +836,53 @@ class SyncHandler(BaseHandler): # updates even if they occured logically before the previous event. # TODO(mjark) Check for new redactions in the state events. - if full_state: - if batch: - state = yield self.store.get_state_for_event(batch.events[0].event_id) - else: - state = yield self.get_state_at( - room_id, stream_position=now_token + with Measure(self.clock, "compute_state_delta"): + if full_state: + if batch: + state = yield self.store.get_state_for_event( + batch.events[0].event_id + ) + else: + state = yield self.get_state_at( + room_id, stream_position=now_token + ) + + timeline_state = { + (event.type, event.state_key): event + for event in batch.events if event.is_state() + } + + state = _calculate_state( + timeline_contains=timeline_state, + timeline_start=state, + previous={}, + ) + elif batch.limited: + state_at_previous_sync = yield self.get_state_at( + room_id, stream_position=since_token ) - timeline_state = { - (event.type, event.state_key): event - for event in batch.events if event.is_state() - } + state_at_timeline_start = yield self.store.get_state_for_event( + batch.events[0].event_id + ) - state = _calculate_state( - timeline_contains=timeline_state, - timeline_start=state, - previous={}, - ) - elif batch.limited: - state_at_previous_sync = yield self.get_state_at( - room_id, stream_position=since_token - ) + timeline_state = { + (event.type, event.state_key): event + for event in batch.events if event.is_state() + } - state_at_timeline_start = yield self.store.get_state_for_event( - batch.events[0].event_id - ) + state = _calculate_state( + timeline_contains=timeline_state, + timeline_start=state_at_timeline_start, + previous=state_at_previous_sync, + ) + else: + state = {} - timeline_state = { - (event.type, event.state_key): event - for event in batch.events if event.is_state() - } - - state = _calculate_state( - timeline_contains=timeline_state, - timeline_start=state_at_timeline_start, - previous=state_at_previous_sync, - ) - else: - state = {} - - defer.returnValue({ - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state(state.values()) - }) + defer.returnValue({ + (e.type, e.state_key): e + for e in sync_config.filter_collection.filter_room_state(state.values()) + }) def check_joined_room(self, sync_config, state_delta): """ @@ -896,20 +904,21 @@ class SyncHandler(BaseHandler): @defer.inlineCallbacks def unread_notifs_for_room_id(self, room_id, sync_config, ephemeral_by_room): - last_unread_event_id = self.last_read_event_id_for_room_and_user( - room_id, sync_config.user.to_string(), ephemeral_by_room - ) - - notifs = [] - if last_unread_event_id: - notifs = yield self.store.get_unread_event_push_actions_by_room_for_user( - room_id, sync_config.user.to_string(), last_unread_event_id + with Measure(self.clock, "unread_notifs_for_room_id"): + last_unread_event_id = self.last_read_event_id_for_room_and_user( + room_id, sync_config.user.to_string(), ephemeral_by_room ) - defer.returnValue(notifs) - # There is no new information in this period, so your notification - # count is whatever it was last time. - defer.returnValue(None) + notifs = [] + if last_unread_event_id: + notifs = yield self.store.get_unread_event_push_actions_by_room_for_user( + room_id, sync_config.user.to_string(), last_unread_event_id + ) + defer.returnValue(notifs) + + # There is no new information in this period, so your notification + # count is whatever it was last time. + defer.returnValue(None) def _action_has_highlight(actions): diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 43bf600913..b16d0017df 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -19,6 +19,7 @@ from ._base import BaseHandler from synapse.api.errors import SynapseError, AuthError from synapse.util.logcontext import PreserveLoggingContext +from synapse.util.metrics import Measure from synapse.types import UserID import logging @@ -222,6 +223,7 @@ class TypingNotificationHandler(BaseHandler): class TypingNotificationEventSource(object): def __init__(self, hs): self.hs = hs + self.clock = hs.get_clock() self._handler = None self._room_member_handler = None @@ -247,19 +249,20 @@ class TypingNotificationEventSource(object): } def get_new_events(self, from_key, room_ids, **kwargs): - from_key = int(from_key) - handler = self.handler() + with Measure(self.clock, "typing.get_new_events"): + from_key = int(from_key) + handler = self.handler() - events = [] - for room_id in room_ids: - if room_id not in handler._room_serials: - continue - if handler._room_serials[room_id] <= from_key: - continue + events = [] + for room_id in room_ids: + if room_id not in handler._room_serials: + continue + if handler._room_serials[room_id] <= from_key: + continue - events.append(self._make_event_for(room_id)) + events.append(self._make_event_for(room_id)) - return events, handler._latest_room_serial + return events, handler._latest_room_serial def get_current_key(self): return self.handler()._latest_room_serial From ebaa999f92677d7e1e6b633ea1829f80a69e83c7 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 9 Feb 2016 12:46:42 +0000 Subject: [PATCH 124/349] Revert "Reject additional path segments" This reverts commit 1d19a5ec0fff73af9cee8c21118020b31be47379. iOS Console is apparently relying on these paths. --- synapse/rest/client/v1/admin.py | 2 +- synapse/rest/client/v1/presence.py | 4 ++-- synapse/rest/client/v1/profile.py | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 5ec52707e7..e2f5eb7b29 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -26,7 +26,7 @@ logger = logging.getLogger(__name__) class WhoisRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/admin/whois/(?P[^/]*)$") + PATTERNS = client_path_patterns("/admin/whois/(?P[^/]*)") @defer.inlineCallbacks def on_GET(self, request, user_id): diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 9410ac527e..a6f8754e32 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -28,7 +28,7 @@ logger = logging.getLogger(__name__) class PresenceStatusRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/presence/(?P[^/]*)/status$") + PATTERNS = client_path_patterns("/presence/(?P[^/]*)/status") @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -73,7 +73,7 @@ class PresenceStatusRestServlet(ClientV1RestServlet): class PresenceListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/presence/list/(?P[^/]*)$") + PATTERNS = client_path_patterns("/presence/list/(?P[^/]*)") @defer.inlineCallbacks def on_GET(self, request, user_id): diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index aeda7bfa39..3c5a212920 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -23,7 +23,7 @@ import simplejson as json class ProfileDisplaynameRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)/displayname$") + PATTERNS = client_path_patterns("/profile/(?P[^/]*)/displayname") @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -60,7 +60,7 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): class ProfileAvatarURLRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)/avatar_url$") + PATTERNS = client_path_patterns("/profile/(?P[^/]*)/avatar_url") @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -97,7 +97,7 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): class ProfileRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)$") + PATTERNS = client_path_patterns("/profile/(?P[^/]*)") @defer.inlineCallbacks def on_GET(self, request, user_id): From 5b75b637b838479528dcc87955cdd7a2a4210b58 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 9 Feb 2016 13:44:04 +0000 Subject: [PATCH 125/349] Remove pyc files before running tests --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index bd313a4f36..6f01599af7 100644 --- a/tox.ini +++ b/tox.ini @@ -11,7 +11,7 @@ deps = setenv = PYTHONDONTWRITEBYTECODE = no_byte_code commands = - /bin/bash -c "coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \ + /bin/bash -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \ {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}" {env:DUMP_COVERAGE_COMMAND:coverage report -m} From 82631c5f948cfb3cb133379e0e5ad10811d706a2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 13:50:29 +0000 Subject: [PATCH 126/349] Fix unit tests --- synapse/handlers/sync.py | 5 ++++- synapse/util/metrics.py | 9 +++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 3e5f27046e..446f8bbe93 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -629,7 +629,10 @@ class SyncHandler(BaseHandler): room_key = now_token.room_key end_key = room_key - limited = recents is None or newly_joined_room or timeline_limit < len(recents) + if recents is None or newly_joined_room or timeline_limit < len(recents): + limited = True + else: + limited = False if recents is not None: recents = sync_config.filter_collection.filter_room_timeline(recents) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 57ca3b4751..c51b641125 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -62,12 +62,13 @@ class Measure(object): def __enter__(self): self.start = self.clock.time_msec() self.start_context = LoggingContext.current_context() - self.ru_utime, self.ru_stime = self.start_context.get_resource_usage() - self.db_txn_count = self.start_context.db_txn_count - self.db_txn_duration = self.start_context.db_txn_duration + if self.start_context: + self.ru_utime, self.ru_stime = self.start_context.get_resource_usage() + self.db_txn_count = self.start_context.db_txn_count + self.db_txn_duration = self.start_context.db_txn_duration def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is not None: + if exc_type is not None or not self.start_context: return duration = self.clock.time_msec() - self.start From eff12e838ce10588ca8103c9131dcfe2f2e7950e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 13:55:59 +0000 Subject: [PATCH 127/349] Don't load all ephemeral state for a room on every sync --- synapse/handlers/sync.py | 20 ++++++-------------- synapse/storage/receipts.py | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 446f8bbe93..6a5868f87e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -319,7 +319,6 @@ class SyncHandler(BaseHandler): ephemeral_by_room=ephemeral_by_room, tags_by_room=tags_by_room, account_data_by_room=account_data_by_room, - all_ephemeral_by_room=ephemeral_by_room, batch=batch, full_state=True, ) @@ -453,13 +452,6 @@ class SyncHandler(BaseHandler): ) now_token = now_token.copy_and_replace("presence_key", presence_key) - # We now fetch all ephemeral events for this room in order to get - # this users current read receipt. This could almost certainly be - # optimised. - _, all_ephemeral_by_room = yield self.ephemeral_by_room( - sync_config, now_token - ) - now_token, ephemeral_by_room = yield self.ephemeral_by_room( sync_config, now_token, since_token ) @@ -591,7 +583,6 @@ class SyncHandler(BaseHandler): ephemeral_by_room=ephemeral_by_room, tags_by_room=tags_by_room, account_data_by_room=account_data_by_room, - all_ephemeral_by_room=all_ephemeral_by_room, batch=batch, full_state=full_state, ) @@ -691,7 +682,6 @@ class SyncHandler(BaseHandler): since_token, now_token, ephemeral_by_room, tags_by_room, account_data_by_room, - all_ephemeral_by_room, batch, full_state=False): state = yield self.compute_state_delta( room_id, batch, sync_config, since_token, now_token, @@ -722,7 +712,7 @@ class SyncHandler(BaseHandler): if room_sync: notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config, all_ephemeral_by_room + room_id, sync_config ) if notifs is not None: @@ -906,10 +896,12 @@ class SyncHandler(BaseHandler): return False @defer.inlineCallbacks - def unread_notifs_for_room_id(self, room_id, sync_config, ephemeral_by_room): + def unread_notifs_for_room_id(self, room_id, sync_config): with Measure(self.clock, "unread_notifs_for_room_id"): - last_unread_event_id = self.last_read_event_id_for_room_and_user( - room_id, sync_config.user.to_string(), ephemeral_by_room + last_unread_event_id = yield self.store.get_last_receipt_event_id_for_user( + user_id=sync_config.user.to_string(), + room_id=room_id, + receipt_type="m.read" ) notifs = [] diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 8068c73740..1aff9f070e 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -46,6 +46,20 @@ class ReceiptsStore(SQLBaseStore): desc="get_receipts_for_room", ) + @cached(num_args=3) + def get_last_receipt_event_id_for_user(self, user_id, room_id, receipt_type): + return self._simple_select_one_onecol( + table="receipts_linearized", + keyvalues={ + "room_id": room_id, + "receipt_type": receipt_type, + "user_id": user_id + }, + retcol="event_id", + desc="get_own_receipt_for_user", + allow_none=True, + ) + @cachedInlineCallbacks(num_args=2) def get_receipts_for_user(self, user_id, receipt_type): def f(txn): From 70a8608749e0c1ec7a993a9effc424303af24738 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 14:27:29 +0000 Subject: [PATCH 128/349] Invalidate get_last_receipt_event_id_for_user cache --- synapse/storage/receipts.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 1aff9f070e..4202a6b3dc 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -240,6 +240,11 @@ class ReceiptsStore(SQLBaseStore): room_id, stream_id ) + txn.call_after( + self.get_last_receipt_event_id_for_user.invalidate, + (user_id, room_id, receipt_type) + ) + # We don't want to clobber receipts for more recent events, so we # have to compare orderings of existing receipts sql = ( From feb294d552ac58b867142118c3d19f0b11b2f384 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 14:32:17 +0000 Subject: [PATCH 129/349] Remove dead code --- synapse/handlers/sync.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 6a5868f87e..ddeed27965 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -179,18 +179,6 @@ class SyncHandler(BaseHandler): else: return self.incremental_sync_with_gap(sync_config, since_token) - def last_read_event_id_for_room_and_user(self, room_id, user_id, ephemeral_by_room): - if room_id not in ephemeral_by_room: - return None - for e in ephemeral_by_room[room_id]: - if e['type'] != 'm.receipt': - continue - for receipt_event_id, val in e['content'].items(): - if 'm.read' in val: - if user_id in val['m.read']: - return receipt_event_id - return None - @defer.inlineCallbacks def full_state_sync(self, sync_config, timeline_since_token): """Get a sync for a client which is starting without any state. From 78d6c1b5bec800671d3ff66acecb2f8bbdf41aa1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 14:44:12 +0000 Subject: [PATCH 130/349] Change a log from debug to info --- synapse/storage/prepare_database.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index d782b8e25b..850736c85e 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -211,7 +211,7 @@ def _upgrade_existing_database(cur, current_version, applied_delta_files, logger.debug("applied_delta_files: %s", applied_delta_files) for v in range(start_ver, SCHEMA_VERSION + 1): - logger.debug("Upgrading schema to v%d", v) + logger.info("Upgrading schema to v%d", v) delta_dir = os.path.join(dir_path, "schema", "delta", str(v)) From 13ba8d878ce30dbc16123886a78a0905fc9ad4a5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 14:55:21 +0000 Subject: [PATCH 131/349] Fix test --- tests/config/test_load.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/config/test_load.py b/tests/config/test_load.py index 7f41279715..528e878532 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -54,10 +54,11 @@ class ConfigLoadingTestCase(unittest.TestCase): "was: %r" % (config.macaroon_secret_key,) ) - def test_load_fails_if_macaroon_secret_key_missing(self): + def test_load_suceeds_if_macaroon_secret_key_missing(self): self.generate_config_and_remove_lines_containing("macaroon") - with self.assertRaises(Exception): - HomeServerConfig.load_config("", ["-c", self.file]) + config1 = HomeServerConfig.load_config("", ["-c", self.file]) + config2 = HomeServerConfig.load_config("", ["-c", self.file]) + self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key) def generate_config(self): HomeServerConfig.load_config("", [ From e664e9737ca8ff04043f747a9375ff50440352c2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 14:57:43 +0000 Subject: [PATCH 132/349] Fix test --- tests/util/test_log_context.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/util/test_log_context.py b/tests/util/test_log_context.py index efa0f28bad..65a330a0e9 100644 --- a/tests/util/test_log_context.py +++ b/tests/util/test_log_context.py @@ -5,6 +5,7 @@ from .. import unittest from synapse.util.async import sleep from synapse.util.logcontext import LoggingContext + class LoggingContextTestCase(unittest.TestCase): def _check_test_key(self, value): @@ -17,15 +18,6 @@ class LoggingContextTestCase(unittest.TestCase): context_one.test_key = "test" self._check_test_key("test") - def test_chaining(self): - with LoggingContext() as context_one: - context_one.test_key = "one" - with LoggingContext() as context_two: - self._check_test_key("one") - context_two.test_key = "two" - self._check_test_key("two") - self._check_test_key("one") - @defer.inlineCallbacks def test_sleep(self): @defer.inlineCallbacks From f28cc4518368955f692a1262a688e907cb22d226 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 16:01:40 +0000 Subject: [PATCH 133/349] Pass in current state to push action handler --- synapse/handlers/_base.py | 31 ++++++++++-------------- synapse/push/action_generator.py | 6 +++-- synapse/push/bulk_push_rule_evaluator.py | 10 +++----- 3 files changed, 20 insertions(+), 27 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index fa83d3e464..d3f722b22e 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -53,25 +53,10 @@ class BaseHandler(object): self.event_builder_factory = hs.get_event_builder_factory() @defer.inlineCallbacks - def _filter_events_for_clients(self, user_tuples, events): + def _filter_events_for_clients(self, user_tuples, events, event_id_to_state): """ Returns dict of user_id -> list of events that user is allowed to see. """ - # If there is only one user, just get the state for that one user, - # otherwise just get all the state. - if len(user_tuples) == 1: - types = ( - (EventTypes.RoomHistoryVisibility, ""), - (EventTypes.Member, user_tuples[0][0]), - ) - else: - types = None - - event_id_to_state = yield self.store.get_state_for_events( - frozenset(e.event_id for e in events), - types=types - ) - forgotten = yield defer.gatherResults([ self.store.who_forgot_in_room( room_id, @@ -135,7 +120,17 @@ class BaseHandler(object): @defer.inlineCallbacks def _filter_events_for_client(self, user_id, events, is_peeking=False): # Assumes that user has at some point joined the room if not is_guest. - res = yield self._filter_events_for_clients([(user_id, is_peeking)], events) + types = ( + (EventTypes.RoomHistoryVisibility, ""), + (EventTypes.Member, user_id), + ) + event_id_to_state = yield self.store.get_state_for_events( + frozenset(e.event_id for e in events), + types=types + ) + res = yield self._filter_events_for_clients( + [(user_id, is_peeking)], events, event_id_to_state + ) defer.returnValue(res.get(user_id, [])) def ratelimit(self, user_id): @@ -275,7 +270,7 @@ class BaseHandler(object): action_generator = ActionGenerator(self.hs) yield action_generator.handle_push_actions_for_event( - event, self + event, self, context.current_state ) destinations = set() diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 1d2e558f9a..d8f8256a1f 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -36,7 +36,7 @@ class ActionGenerator: # tag (ie. we just need all the users). @defer.inlineCallbacks - def handle_push_actions_for_event(self, event, handler): + def handle_push_actions_for_event(self, event, handler, current_state): if event.type == EventTypes.Redaction and event.redacts is not None: yield self.store.remove_push_actions_for_event_id( event.room_id, event.redacts @@ -46,7 +46,9 @@ class ActionGenerator: event.room_id, self.hs, self.store ) - actions_by_user = yield bulk_evaluator.action_for_event_by_user(event, handler) + actions_by_user = yield bulk_evaluator.action_for_event_by_user( + event, handler, current_state + ) yield self.store.set_push_actions_for_event_and_users( event, diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 20c60422bf..8ac5ceb9ef 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -98,25 +98,21 @@ class BulkPushRuleEvaluator: self.store = store @defer.inlineCallbacks - def action_for_event_by_user(self, event, handler): + def action_for_event_by_user(self, event, handler, current_state): actions_by_user = {} users_dict = yield self.store.are_guests(self.rules_by_user.keys()) filtered_by_user = yield handler._filter_events_for_clients( - users_dict.items(), [event] + users_dict.items(), [event], {event.event_id: current_state} ) evaluator = PushRuleEvaluatorForEvent(event, len(self.users_in_room)) condition_cache = {} - member_state = yield self.store.get_state_for_event( - event.event_id, - ) - display_names = {} - for ev in member_state.values(): + for ev in current_state.values(): nm = ev.content.get("displayname", None) if nm and ev.type == EventTypes.Member: display_names[ev.state_key] = nm From 7b0d846407a593ccd204f82aaa1090b8af8df84c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 16:19:15 +0000 Subject: [PATCH 134/349] Atomically persit push actions when we persist the event --- synapse/events/snapshot.py | 1 + synapse/handlers/_base.py | 10 +++--- synapse/handlers/federation.py | 12 +++---- synapse/push/action_generator.py | 20 +++--------- synapse/storage/event_push_actions.py | 45 ++++++++++----------------- synapse/storage/events.py | 26 ++++++++++------ 6 files changed, 49 insertions(+), 65 deletions(-) diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index f51200d18e..8a475417a6 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -20,3 +20,4 @@ class EventContext(object): self.current_state = current_state self.state_group = None self.rejected = False + self.push_actions = [] diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index d3f722b22e..064e8723c8 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -264,13 +264,13 @@ class BaseHandler(object): "You don't have permission to redact events" ) - (event_stream_id, max_stream_id) = yield self.store.persist_event( - event, context=context - ) - action_generator = ActionGenerator(self.hs) yield action_generator.handle_push_actions_for_event( - event, self, context.current_state + event, context, self + ) + + (event_stream_id, max_stream_id) = yield self.store.persist_event( + event, context=context ) destinations = set() diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b78b0502d9..da55d43541 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -236,12 +236,6 @@ class FederationHandler(BaseHandler): user = UserID.from_string(event.state_key) yield user_joined_room(self.distributor, user, event.room_id) - if not backfilled and not event.internal_metadata.is_outlier(): - action_generator = ActionGenerator(self.hs) - yield action_generator.handle_push_actions_for_event( - event, self - ) - @defer.inlineCallbacks def _filter_events_for_server(self, server_name, room_id, events): event_to_state = yield self.store.get_state_for_events( @@ -1073,6 +1067,12 @@ class FederationHandler(BaseHandler): auth_events=auth_events, ) + if not backfilled and not event.internal_metadata.is_outlier(): + action_generator = ActionGenerator(self.hs) + yield action_generator.handle_push_actions_for_event( + event, context, self + ) + event_stream_id, max_stream_id = yield self.store.persist_event( event, context=context, diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index d8f8256a1f..e0da0868ec 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -19,8 +19,6 @@ import bulk_push_rule_evaluator import logging -from synapse.api.constants import EventTypes - logger = logging.getLogger(__name__) @@ -36,23 +34,15 @@ class ActionGenerator: # tag (ie. we just need all the users). @defer.inlineCallbacks - def handle_push_actions_for_event(self, event, handler, current_state): - if event.type == EventTypes.Redaction and event.redacts is not None: - yield self.store.remove_push_actions_for_event_id( - event.room_id, event.redacts - ) - + def handle_push_actions_for_event(self, event, context, handler): bulk_evaluator = yield bulk_push_rule_evaluator.evaluator_for_room_id( event.room_id, self.hs, self.store ) actions_by_user = yield bulk_evaluator.action_for_event_by_user( - event, handler, current_state + event, handler, context.current_state ) - yield self.store.set_push_actions_for_event_and_users( - event, - [ - (uid, None, actions) for uid, actions in actions_by_user.items() - ] - ) + context.push_actions = [ + (uid, None, actions) for uid, actions in actions_by_user.items() + ] diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index d0a969f50b..466f07a1c4 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -24,8 +24,7 @@ logger = logging.getLogger(__name__) class EventPushActionsStore(SQLBaseStore): - @defer.inlineCallbacks - def set_push_actions_for_event_and_users(self, event, tuples): + def _set_push_actions_for_event_and_users(self, txn, event, tuples): """ :param event: the event set actions for :param tuples: list of tuples of (user_id, profile_tag, actions) @@ -44,18 +43,12 @@ class EventPushActionsStore(SQLBaseStore): 'highlight': 1 if _action_has_highlight(actions) else 0, }) - def f(txn): - for uid, _, __ in tuples: - txn.call_after( - self.get_unread_event_push_actions_by_room_for_user.invalidate_many, - (event.room_id, uid) - ) - return self._simple_insert_many_txn(txn, "event_push_actions", values) - - yield self.runInteraction( - "set_actions_for_event_and_users", - f, - ) + for uid, _, __ in tuples: + txn.call_after( + self.get_unread_event_push_actions_by_room_for_user.invalidate_many, + (event.room_id, uid) + ) + self._simple_insert_many_txn(txn, "event_push_actions", values) @cachedInlineCallbacks(num_args=3, lru=True, tree=True) def get_unread_event_push_actions_by_room_for_user( @@ -107,21 +100,15 @@ class EventPushActionsStore(SQLBaseStore): ) defer.returnValue(ret) - @defer.inlineCallbacks - def remove_push_actions_for_event_id(self, room_id, event_id): - def f(txn): - # Sad that we have to blow away the cache for the whole room here - txn.call_after( - self.get_unread_event_push_actions_by_room_for_user.invalidate_many, - (room_id,) - ) - txn.execute( - "DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?", - (room_id, event_id) - ) - yield self.runInteraction( - "remove_push_actions_for_event_id", - f + def _remove_push_actions_for_event_id(self, txn, room_id, event_id): + # Sad that we have to blow away the cache for the whole room here + txn.call_after( + self.get_unread_event_push_actions_by_room_for_user.invalidate_many, + (room_id,) + ) + txn.execute( + "DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?", + (room_id, event_id) ) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index c6ed54721c..7d4012c414 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -205,23 +205,29 @@ class EventsStore(SQLBaseStore): @log_function def _persist_events_txn(self, txn, events_and_contexts, backfilled, is_new_state=True): - - # Remove the any existing cache entries for the event_ids - for event, _ in events_and_contexts: + depth_updates = {} + for event, context in events_and_contexts: + # Remove the any existing cache entries for the event_ids txn.call_after(self._invalidate_get_event_cache, event.event_id) - if not backfilled: txn.call_after( self._events_stream_cache.entity_has_changed, event.room_id, event.internal_metadata.stream_ordering, ) - depth_updates = {} - for event, _ in events_and_contexts: - if event.internal_metadata.is_outlier(): - continue - depth_updates[event.room_id] = max( - event.depth, depth_updates.get(event.room_id, event.depth) + if not event.internal_metadata.is_outlier(): + depth_updates[event.room_id] = max( + event.depth, depth_updates.get(event.room_id, event.depth) + ) + + if context.push_actions: + self._set_push_actions_for_event_and_users( + txn, event, context.push_actions + ) + + if event.type == EventTypes.Redaction and event.redacts is not None: + self._remove_push_actions_for_event_id( + txn, event.room_id, event.redacts ) for room_id, depth in depth_updates.items(): From 78a54822670c94763e6de708797fd561260bbcf5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Feb 2016 16:23:11 +0000 Subject: [PATCH 135/349] Typo --- tests/config/test_load.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/config/test_load.py b/tests/config/test_load.py index 528e878532..fbbbf93fef 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -54,7 +54,7 @@ class ConfigLoadingTestCase(unittest.TestCase): "was: %r" % (config.macaroon_secret_key,) ) - def test_load_suceeds_if_macaroon_secret_key_missing(self): + def test_load_succeeds_if_macaroon_secret_key_missing(self): self.generate_config_and_remove_lines_containing("macaroon") config1 = HomeServerConfig.load_config("", ["-c", self.file]) config2 = HomeServerConfig.load_config("", ["-c", self.file]) From 02147452396c67e7874b201460f8b1cc8996a90a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 11:09:56 +0000 Subject: [PATCH 136/349] Rename functions --- synapse/storage/event_push_actions.py | 4 ++-- synapse/storage/events.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 466f07a1c4..d77a817682 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -24,7 +24,7 @@ logger = logging.getLogger(__name__) class EventPushActionsStore(SQLBaseStore): - def _set_push_actions_for_event_and_users(self, txn, event, tuples): + def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples): """ :param event: the event set actions for :param tuples: list of tuples of (user_id, profile_tag, actions) @@ -100,7 +100,7 @@ class EventPushActionsStore(SQLBaseStore): ) defer.returnValue(ret) - def _remove_push_actions_for_event_id(self, txn, room_id, event_id): + def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id): # Sad that we have to blow away the cache for the whole room here txn.call_after( self.get_unread_event_push_actions_by_room_for_user.invalidate_many, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 7d4012c414..3a5c6ee4b1 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -221,12 +221,12 @@ class EventsStore(SQLBaseStore): ) if context.push_actions: - self._set_push_actions_for_event_and_users( + self._set_push_actions_for_event_and_users_txn( txn, event, context.push_actions ) if event.type == EventTypes.Redaction and event.redacts is not None: - self._remove_push_actions_for_event_id( + self._remove_push_actions_for_event_id_txn( txn, event.room_id, event.redacts ) From 9777c5f49ab354206f5aaecdc20880fbb8b17660 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 11:23:32 +0000 Subject: [PATCH 137/349] Set parent context on instansiation --- synapse/util/logcontext.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 301b30d58a..ffab03819d 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -79,7 +79,7 @@ class LoggingContext(object): sentinel = Sentinel() def __init__(self, name=None): - self.parent_context = None + self.parent_context = LoggingContext.current_context() self.name = name self.ru_stime = 0. self.ru_utime = 0. @@ -116,7 +116,12 @@ class LoggingContext(object): def __enter__(self): """Enters this logging context into thread local storage""" - self.parent_context = self.set_current_context(self) + old_context = self.set_current_context(self) + if self.parent_context != old_context: + logger.warn( + "Expected parent context %r, found %r", + self.parent_context, old_context + ) self.alive = True return self From 00c9ad49dffe92b766ebfaf4f2e0ec82100a3009 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 11:25:19 +0000 Subject: [PATCH 138/349] s/parent_context/previous_context/ --- synapse/util/logcontext.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index ffab03819d..e1c55390ac 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -47,7 +47,7 @@ class LoggingContext(object): """ __slots__ = [ - "parent_context", "name", "usage_start", "usage_end", "main_thread", + "previous_context", "name", "usage_start", "usage_end", "main_thread", "__dict__", "tag", "alive", ] @@ -79,7 +79,7 @@ class LoggingContext(object): sentinel = Sentinel() def __init__(self, name=None): - self.parent_context = LoggingContext.current_context() + self.previous_context = LoggingContext.current_context() self.name = name self.ru_stime = 0. self.ru_utime = 0. @@ -117,10 +117,10 @@ class LoggingContext(object): def __enter__(self): """Enters this logging context into thread local storage""" old_context = self.set_current_context(self) - if self.parent_context != old_context: + if self.previous_context != old_context: logger.warn( - "Expected parent context %r, found %r", - self.parent_context, old_context + "Expected previous context %r, found %r", + self.previous_context, old_context ) self.alive = True return self @@ -131,7 +131,7 @@ class LoggingContext(object): Returns: None to avoid suppressing any exeptions that were thrown. """ - current = self.set_current_context(self.parent_context) + current = self.set_current_context(self.previous_context) if current is not self: if current is self.sentinel: logger.debug("Expected logging context %s has been lost", self) @@ -141,11 +141,11 @@ class LoggingContext(object): current, self ) - self.parent_context = None + self.previous_context = None self.alive = False def copy_to(self, record): - """Copy fields from this context and its parents to the record""" + """Copy fields from this context to the record""" for key, value in self.__dict__.items(): setattr(record, key, value) @@ -226,7 +226,7 @@ class PreserveLoggingContext(object): ) if self.current_context: - self.has_parent = self.current_context.parent_context is not None + self.has_parent = self.current_context.previous_context is not None if not self.current_context.alive: logger.debug( "Entering dead context: %s", From 4eb8f9ca8a4e69d11591584c3e9b89c388b01f87 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 11:29:21 +0000 Subject: [PATCH 139/349] Remove comment --- synapse/util/logcontext.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index e1c55390ac..5316259d15 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -41,7 +41,7 @@ except: class LoggingContext(object): """Additional context for log formatting. Contexts are scoped within a - "with" block. Contexts inherit the state of their parent contexts. + "with" block. Args: name (str): Name for the context for debugging. """ From 8e49892b218fa10313b2502b4913ae8416321051 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 11:41:04 +0000 Subject: [PATCH 140/349] Only calculate initial sync for 10 rooms at a time This helps to ensure we don't completely starve other requests. --- synapse/handlers/sync.py | 53 ++++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ddeed27965..84f29e3867 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -18,7 +18,7 @@ from ._base import BaseHandler from synapse.streams.config import PaginationConfig from synapse.api.constants import Membership, EventTypes from synapse.util import unwrapFirstError -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn from synapse.util.metrics import Measure from twisted.internet import defer @@ -228,10 +228,14 @@ class SyncHandler(BaseHandler): invited = [] archived = [] deferreds = [] - for event in room_list: - if event.membership == Membership.JOIN: - with PreserveLoggingContext(LoggingContext.current_context()): - room_sync_deferred = self.full_state_sync_for_joined_room( + + room_list_chunks = [room_list[i:i + 10] for i in xrange(0, len(room_list), 10)] + for room_list_chunk in room_list_chunks: + for event in room_list_chunk: + if event.membership == Membership.JOIN: + room_sync_deferred = preserve_fn( + self.full_state_sync_for_joined_room + )( room_id=event.room_id, sync_config=sync_config, now_token=now_token, @@ -240,20 +244,21 @@ class SyncHandler(BaseHandler): tags_by_room=tags_by_room, account_data_by_room=account_data_by_room, ) - room_sync_deferred.addCallback(joined.append) - deferreds.append(room_sync_deferred) - elif event.membership == Membership.INVITE: - invite = yield self.store.get_event(event.event_id) - invited.append(InvitedSyncResult( - room_id=event.room_id, - invite=invite, - )) - elif event.membership in (Membership.LEAVE, Membership.BAN): - leave_token = now_token.copy_and_replace( - "room_key", "s%d" % (event.stream_ordering,) - ) - with PreserveLoggingContext(LoggingContext.current_context()): - room_sync_deferred = self.full_state_sync_for_archived_room( + room_sync_deferred.addCallback(joined.append) + deferreds.append(room_sync_deferred) + elif event.membership == Membership.INVITE: + invite = yield self.store.get_event(event.event_id) + invited.append(InvitedSyncResult( + room_id=event.room_id, + invite=invite, + )) + elif event.membership in (Membership.LEAVE, Membership.BAN): + leave_token = now_token.copy_and_replace( + "room_key", "s%d" % (event.stream_ordering,) + ) + room_sync_deferred = preserve_fn( + self.full_state_sync_for_archived_room + )( sync_config=sync_config, room_id=event.room_id, leave_event_id=event.event_id, @@ -262,12 +267,12 @@ class SyncHandler(BaseHandler): tags_by_room=tags_by_room, account_data_by_room=account_data_by_room, ) - room_sync_deferred.addCallback(archived.append) - deferreds.append(room_sync_deferred) + room_sync_deferred.addCallback(archived.append) + deferreds.append(room_sync_deferred) - yield defer.gatherResults( - deferreds, consumeErrors=True - ).addErrback(unwrapFirstError) + yield defer.gatherResults( + deferreds, consumeErrors=True + ).addErrback(unwrapFirstError) account_data_for_user = sync_config.filter_collection.filter_account_data( self.account_data_for_user(account_data) From 24f00a6c33900cf701330ff324b0479c1898d5ce Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 12:57:50 +0000 Subject: [PATCH 141/349] Use _simple_select_many for _get_state_group_for_events --- synapse/handlers/sync.py | 2 +- synapse/storage/state.py | 26 ++++++++++---------------- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 84f29e3867..1d0f0058a2 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -18,7 +18,7 @@ from ._base import BaseHandler from synapse.streams.config import PaginationConfig from synapse.api.constants import Membership, EventTypes from synapse.util import unwrapFirstError -from synapse.util.logcontext import LoggingContext, PreserveLoggingContext, preserve_fn +from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.metrics import Measure from twisted.internet import defer diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 6c32e8f7b3..90ec50bb50 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -264,26 +264,20 @@ class StateStore(SQLBaseStore): ) @cachedList(cache=_get_state_group_for_event.cache, list_name="event_ids", - num_args=1) + num_args=1, inlineCallbacks=True) def _get_state_group_for_events(self, event_ids): """Returns mapping event_id -> state_group """ - def f(txn): - results = {} - for event_id in event_ids: - results[event_id] = self._simple_select_one_onecol_txn( - txn, - table="event_to_state_groups", - keyvalues={ - "event_id": event_id, - }, - retcol="state_group", - allow_none=True, - ) + rows = yield self._simple_select_many_batch( + table="event_to_state_groups", + column="event_id", + iterable=event_ids, + keyvalues={}, + retcols=("event_id", "state_group",), + desc="_get_state_group_for_events", + ) - return results - - return self.runInteraction("_get_state_group_for_events", f) + defer.returnValue({row["event_id"]: row["state_group"] for row in rows}) def _get_some_state_from_cache(self, group, types): """Checks if group is in cache. See `_get_state_for_groups` From 5189bfdef4c87a7b0527de603eae52ac27bd500c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 13:24:42 +0000 Subject: [PATCH 142/349] Batch fetch _get_state_groups_from_groups --- synapse/storage/state.py | 72 +++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 35 deletions(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 90ec50bb50..372b540002 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -171,41 +171,43 @@ class StateStore(SQLBaseStore): events = yield self._get_events(event_ids, get_prev_content=False) defer.returnValue(events) - def _get_state_groups_from_groups(self, groups_and_types): + def _get_state_groups_from_groups(self, groups, types): """Returns dictionary state_group -> state event ids - - Args: - groups_and_types (list): list of 2-tuple (`group`, `types`) """ - def f(txn): + def f(txn, groups): + if types is not None: + where_clause = "AND (%s)" % ( + " OR ".join(["(type = ? AND state_key = ?)"] * len(types)), + ) + else: + where_clause = "" + + sql = ( + "SELECT state_group, event_id FROM state_groups_state WHERE" + " state_group IN (%s) %s" % ( + ",".join("?" for _ in groups), + where_clause, + ) + ) + + args = list(groups) + if types is not None: + args.extend([i for typ in types for i in typ]) + + txn.execute(sql, args) + rows = self.cursor_to_dict(txn) + results = {} - for group, types in groups_and_types: - if types is not None: - where_clause = "AND (%s)" % ( - " OR ".join(["(type = ? AND state_key = ?)"] * len(types)), - ) - else: - where_clause = "" - - sql = ( - "SELECT event_id FROM state_groups_state WHERE" - " state_group = ? %s" - ) % (where_clause,) - - args = [group] - if types is not None: - args.extend([i for typ in types for i in typ]) - - txn.execute(sql, args) - - results[group] = [r[0] for r in txn.fetchall()] - + for row in rows: + results.setdefault(row["state_group"], []).append(row["event_id"]) return results - return self.runInteraction( - "_get_state_groups_from_groups", - f, - ) + chunks = [groups[i:i + 100] for i in xrange(0, len(groups), 100)] + for chunk in chunks: + return self.runInteraction( + "_get_state_groups_from_groups", + f, chunk + ) @defer.inlineCallbacks def get_state_for_events(self, event_ids, types): @@ -349,7 +351,7 @@ class StateStore(SQLBaseStore): all events are returned. """ results = {} - missing_groups_and_types = [] + missing_groups = [] if types is not None: for group in set(groups): state_dict, missing_types, got_all = self._get_some_state_from_cache( @@ -358,7 +360,7 @@ class StateStore(SQLBaseStore): results[group] = state_dict if not got_all: - missing_groups_and_types.append((group, missing_types)) + missing_groups.append(group) else: for group in set(groups): state_dict, got_all = self._get_all_state_from_cache( @@ -367,9 +369,9 @@ class StateStore(SQLBaseStore): results[group] = state_dict if not got_all: - missing_groups_and_types.append((group, None)) + missing_groups.append(group) - if not missing_groups_and_types: + if not missing_groups: defer.returnValue({ group: { type_tuple: event @@ -383,7 +385,7 @@ class StateStore(SQLBaseStore): cache_seq_num = self._state_group_cache.sequence group_state_dict = yield self._get_state_groups_from_groups( - missing_groups_and_types + missing_groups, types ) state_events = yield self._get_events( From 6e89e69d08fe47b3dde4723fe24911231ed390c7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 14:36:06 +0000 Subject: [PATCH 143/349] Bump version and changelog --- CHANGES.rst | 22 ++++++++++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index b9c956fd03..3ebbdd4822 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,25 @@ +Changes in synapse v0.13.0 (2016-02-10) +======================================= + +This version includes an upgrade of the schema, specifically adding an index to +the ``events`` table, which may take a minute to complete. + +Changes: + +* Improve general performance (PR #540, #543. #544, #54, #549, #567) +* Change guest user ids to be incrementing integers (PR #550) +* Improve performance of public room list API (PR #552) +* Change profile API to omit keys rather than return null (PR #557) +* Change to forward ``/media/r0`` to ``/media/v1`` (PR #595) + +Bug fixes: + +* Fix bug with upgrading guest accounts where it would fail if you opened the + registration email on a different device (PR #547) +* Fix bug where unread count could be wrong (PR #568) + + + Changes in synapse v0.12.1-rc1 (2016-01-29) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index aacacda4fb..426330cc21 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.12.1-rc1" +__version__ = "0.13.0" From 6c3126d950d31aa88020e967d8fd1634f6312ccc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 10 Feb 2016 14:49:48 +0000 Subject: [PATCH 144/349] Update CHANGES --- CHANGES.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 3ebbdd4822..f1c67b20e0 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -2,7 +2,8 @@ Changes in synapse v0.13.0 (2016-02-10) ======================================= This version includes an upgrade of the schema, specifically adding an index to -the ``events`` table, which may take a minute to complete. +the ``events`` table. This may cause synapse to pause for several minutes the +first time it is started after the upgrade. Changes: @@ -10,7 +11,8 @@ Changes: * Change guest user ids to be incrementing integers (PR #550) * Improve performance of public room list API (PR #552) * Change profile API to omit keys rather than return null (PR #557) -* Change to forward ``/media/r0`` to ``/media/v1`` (PR #595) +* Add ``/media/r0`` endpoint prefix, which is equivalent to ``/media/v1/`` + (PR #595) Bug fixes: From b3ecb96e368ae237bddc67963742159fcfe3114b Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 10 Feb 2016 16:27:01 +0000 Subject: [PATCH 145/349] try to bump syweb to 0.6.8 --- synapse/python_dependencies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index e95316720e..797283e70c 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -39,7 +39,7 @@ REQUIREMENTS = { } CONDITIONAL_REQUIREMENTS = { "web_client": { - "matrix_angular_sdk>=0.6.6": ["syweb>=0.6.6"], + "matrix_angular_sdk>=0.6.8": ["syweb>=0.6.8"], } } From a45cc801d254bd11fed1bfa6a3f9061fb313166f Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 10 Feb 2016 16:34:46 +0000 Subject: [PATCH 146/349] bump for 0.13.1 --- CHANGES.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index f1c67b20e0..199fb1384c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,10 @@ +Changes in synapse v0.13.1 (2016-02-10) +======================================= + +* Bump matrix-angular-sdk (matrix web console) dependency to 0.6.8 to + pull in the fix for SYWEB-361 so that the default client can display + HTML messages again(!) + Changes in synapse v0.13.0 (2016-02-10) ======================================= From 103b432c8470598546fc80aaebe5c30f9053966b Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 10 Feb 2016 16:35:17 +0000 Subject: [PATCH 147/349] 0.13.1 --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 426330cc21..8da62c7b07 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.13.0" +__version__ = "0.13.1" From a1b79029443c2edabd90ec3cd372443b44079c8e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 11 Feb 2016 09:22:37 +0000 Subject: [PATCH 148/349] Add some paranoia logging --- synapse/rest/client/v2_alpha/sync.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 140ce2704b..accbc6cfac 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -28,6 +28,7 @@ from synapse.api.errors import SynapseError from ._base import client_v2_patterns import copy +import itertools import logging import ujson as json @@ -288,6 +289,15 @@ class SyncRestServlet(RestServlet): state_events = state_dict.values() + for event in itertools.chain(state_events, timeline_events): + # We've had bug reports that events were coming down under the + # wrong room. + if event.room_id != room.room_id: + logger.warn( + "Event %r is under room %r instead of %r", + event.event_id, room.room_id, event.room_id, + ) + serialized_state = [serialize(e) for e in state_events] serialized_timeline = [serialize(e) for e in timeline_events] From 0eff7405239c794d4c000ee93c0e38a87ea2b1bd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 11 Feb 2016 10:07:27 +0000 Subject: [PATCH 149/349] Return events in correct order for /events --- synapse/handlers/room.py | 1 + synapse/storage/stream.py | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a8e3a9029c..b2de2cd0c0 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1061,6 +1061,7 @@ class RoomEventSource(object): from_key=from_key, to_key=to_key, limit=limit or 10, + order='ASC', ) events = list(room_events) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 367ffc9543..0d1034c6f1 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -157,7 +157,8 @@ class StreamStore(SQLBaseStore): defer.returnValue(results) @defer.inlineCallbacks - def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0): + def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0, + order='DESC'): from_id = RoomStreamToken.parse_stream_token(from_key).stream room_ids = yield self._events_stream_cache.get_entities_changed( @@ -172,7 +173,7 @@ class StreamStore(SQLBaseStore): for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): res = yield defer.gatherResults([ preserve_fn(self.get_room_events_stream_for_room)( - room_id, from_key, to_key, limit, + room_id, from_key, to_key, limit, order=order, ) for room_id in room_ids ]) @@ -181,7 +182,8 @@ class StreamStore(SQLBaseStore): defer.returnValue(results) @defer.inlineCallbacks - def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): + def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0, + order='DESC'): if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream else: @@ -206,8 +208,8 @@ class StreamStore(SQLBaseStore): " room_id = ?" " AND not outlier" " AND stream_ordering > ? AND stream_ordering <= ?" - " ORDER BY stream_ordering DESC LIMIT ?" - ) + " ORDER BY stream_ordering %s LIMIT ?" + ) % (order,) txn.execute(sql, (room_id, from_id, to_id, limit)) else: sql = ( @@ -215,8 +217,8 @@ class StreamStore(SQLBaseStore): " room_id = ?" " AND not outlier" " AND stream_ordering <= ?" - " ORDER BY stream_ordering DESC LIMIT ?" - ) + " ORDER BY stream_ordering %s LIMIT ?" + ) % (order,) txn.execute(sql, (room_id, to_id, limit)) rows = self.cursor_to_dict(txn) @@ -232,7 +234,8 @@ class StreamStore(SQLBaseStore): self._set_before_and_after(ret, rows, topo_order=False) - ret.reverse() + if order.lower() == "desc": + ret.reverse() if rows: key = "s%d" % min(r["stream_ordering"] for r in rows) From 1a830b751d2de5fed67da9f55264dd3de507517d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 11 Feb 2016 10:52:39 +0000 Subject: [PATCH 150/349] Bump version and changelog --- CHANGES.rst | 6 ++++++ synapse/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 199fb1384c..ac7dcdca2c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,9 @@ +Changes in synapse v0.13.2 (2016-02-11) +======================================= + +* Fix bug where ``/events`` would fail to skip some events if there had been + more events than the limit specified since the last request (PR #570) + Changes in synapse v0.13.1 (2016-02-10) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index 8da62c7b07..9c4213a4a9 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.13.1" +__version__ = "0.13.2" From ce14c7a9954ebfa80831efc0901ca04d8cfe6ab5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 11 Feb 2016 15:02:56 +0000 Subject: [PATCH 151/349] Fix SYN-627, events are in incorrect room in /sync --- synapse/storage/stream.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 0d1034c6f1..c236dafafb 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -175,7 +175,7 @@ class StreamStore(SQLBaseStore): preserve_fn(self.get_room_events_stream_for_room)( room_id, from_key, to_key, limit, order=order, ) - for room_id in room_ids + for room_id in rm_ids ]) results.update(dict(zip(rm_ids, res))) From 2a24f906a9a7e29e0744ad89cb2de5328c5f3987 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 11 Feb 2016 16:03:47 +0000 Subject: [PATCH 152/349] Bump version and changelog --- CHANGES.rst | 5 +++++ synapse/__init__.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index ac7dcdca2c..bebf489e21 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,8 @@ +Changes in synapse v0.13.3 (2016-02-11) +======================================= + +* Fix bug where ``/sync`` would occasionally return events in the wrong room. + Changes in synapse v0.13.2 (2016-02-11) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index 9c4213a4a9..bc50bec9db 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.13.2" +__version__ = "0.13.3" From 763360594dfb90433f693056d3d64ac82409fc87 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Thu, 11 Feb 2016 14:10:00 +0000 Subject: [PATCH 153/349] Mark AS users with their AS's ID --- scripts/synapse_port_db | 9 +++- synapse/app/homeserver.py | 2 +- synapse/storage/appservice.py | 34 +++++++----- synapse/storage/engines/__init__.py | 5 +- synapse/storage/engines/postgres.py | 5 +- synapse/storage/engines/sqlite3.py | 5 +- synapse/storage/prepare_database.py | 15 +++--- synapse/storage/schema/delta/30/as_users.py | 59 +++++++++++++++++++++ tests/storage/test_base.py | 3 +- tests/utils.py | 20 ++++--- 10 files changed, 121 insertions(+), 36 deletions(-) create mode 100644 synapse/storage/schema/delta/30/as_users.py diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index fc92bbf2d8..a2a0f364cf 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -309,8 +309,8 @@ class Porter(object): **self.postgres_config["args"] ) - sqlite_engine = create_engine("sqlite3") - postgres_engine = create_engine("psycopg2") + sqlite_engine = create_engine(FakeConfig(sqlite_config)) + postgres_engine = create_engine(FakeConfig(postgres_config)) self.sqlite_store = Store(sqlite_db_pool, sqlite_engine) self.postgres_store = Store(postgres_db_pool, postgres_engine) @@ -792,3 +792,8 @@ if __name__ == "__main__": if end_error_exec_info: exc_type, exc_value, exc_traceback = end_error_exec_info traceback.print_exception(exc_type, exc_value, exc_traceback) + + +class FakeConfig: + def __init__(self, database_config): + self.database_config = database_config diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 2b4be7bdd0..d2e758c401 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -382,7 +382,7 @@ def setup(config_options): tls_server_context_factory = context_factory.ServerContextFactory(config) - database_engine = create_engine(config.database_config["name"]) + database_engine = create_engine(config) config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection hs = SynapseHomeServer( diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 1100c67714..371600eebb 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -34,8 +34,8 @@ class ApplicationServiceStore(SQLBaseStore): def __init__(self, hs): super(ApplicationServiceStore, self).__init__(hs) self.hostname = hs.hostname - self.services_cache = [] - self._populate_appservice_cache( + self.services_cache = ApplicationServiceStore.load_appservices( + hs.hostname, hs.config.app_service_config_files ) @@ -144,21 +144,23 @@ class ApplicationServiceStore(SQLBaseStore): return rooms_for_user_matching_user_id - def _load_appservice(self, as_info): + @classmethod + def _load_appservice(cls, hostname, as_info, config_filename): required_string_fields = [ - # TODO: Add id here when it's stable to release - "url", "as_token", "hs_token", "sender_localpart" + "id", "url", "as_token", "hs_token", "sender_localpart" ] for field in required_string_fields: if not isinstance(as_info.get(field), basestring): - raise KeyError("Required string field: '%s'", field) + raise KeyError("Required string field: '%s' (%s)" % ( + field, config_filename, + )) localpart = as_info["sender_localpart"] if urllib.quote(localpart) != localpart: raise ValueError( "sender_localpart needs characters which are not URL encoded." ) - user = UserID(localpart, self.hostname) + user = UserID(localpart, hostname) user_id = user.to_string() # namespace checks @@ -188,25 +190,30 @@ class ApplicationServiceStore(SQLBaseStore): namespaces=as_info["namespaces"], hs_token=as_info["hs_token"], sender=user_id, - id=as_info["id"] if "id" in as_info else as_info["as_token"], + id=as_info["id"], ) - def _populate_appservice_cache(self, config_files): - """Populates a cache of Application Services from the config files.""" + @classmethod + def load_appservices(cls, hostname, config_files): + """Returns a list of Application Services from the config files.""" if not isinstance(config_files, list): logger.warning( "Expected %s to be a list of AS config files.", config_files ) - return + return [] # Dicts of value -> filename seen_as_tokens = {} seen_ids = {} + appservices = [] + for config_file in config_files: try: with open(config_file, 'r') as f: - appservice = self._load_appservice(yaml.load(f)) + appservice = ApplicationServiceStore._load_appservice( + hostname, yaml.load(f), config_file + ) if appservice.id in seen_ids: raise ConfigError( "Cannot reuse ID across application services: " @@ -226,11 +233,12 @@ class ApplicationServiceStore(SQLBaseStore): ) seen_as_tokens[appservice.token] = config_file logger.info("Loaded application service: %s", appservice) - self.services_cache.append(appservice) + appservices.append(appservice) except Exception as e: logger.error("Failed to load appservice from '%s'", config_file) logger.exception(e) raise + return appservices class ApplicationServiceTransactionStore(SQLBaseStore): diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index 4290aea83a..a48230b93f 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -26,12 +26,13 @@ SUPPORTED_MODULE = { } -def create_engine(name): +def create_engine(config): + name = config.database_config["name"] engine_class = SUPPORTED_MODULE.get(name, None) if engine_class: module = importlib.import_module(name) - return engine_class(module) + return engine_class(module, config=config) raise RuntimeError( "Unsupported database engine '%s'" % (name,) diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 17b7a9c077..a09685b4df 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -21,9 +21,10 @@ from ._base import IncorrectDatabaseSetup class PostgresEngine(object): single_threaded = False - def __init__(self, database_module): + def __init__(self, database_module, config): self.module = database_module self.module.extensions.register_type(self.module.extensions.UNICODE) + self.config = config def check_database(self, txn): txn.execute("SHOW SERVER_ENCODING") @@ -44,7 +45,7 @@ class PostgresEngine(object): ) def prepare_database(self, db_conn): - prepare_database(db_conn, self) + prepare_database(db_conn, self, config=self.config) def is_deadlock(self, error): if isinstance(error, self.module.DatabaseError): diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite3.py index 91fac33b8b..522b905949 100644 --- a/synapse/storage/engines/sqlite3.py +++ b/synapse/storage/engines/sqlite3.py @@ -23,8 +23,9 @@ import struct class Sqlite3Engine(object): single_threaded = True - def __init__(self, database_module): + def __init__(self, database_module, config): self.module = database_module + self.config = config def check_database(self, txn): pass @@ -38,7 +39,7 @@ class Sqlite3Engine(object): def prepare_database(self, db_conn): prepare_sqlite3_database(db_conn) - prepare_database(db_conn, self) + prepare_database(db_conn, self, config=self.config) def is_deadlock(self, error): return False diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 850736c85e..3f29aad1e8 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 29 +SCHEMA_VERSION = 30 dir_path = os.path.abspath(os.path.dirname(__file__)) @@ -50,7 +50,7 @@ class UpgradeDatabaseException(PrepareDatabaseException): pass -def prepare_database(db_conn, database_engine): +def prepare_database(db_conn, database_engine, config): """Prepares a database for usage. Will either create all necessary tables or upgrade from an older schema version. """ @@ -61,10 +61,10 @@ def prepare_database(db_conn, database_engine): if version_info: user_version, delta_files, upgraded = version_info _upgrade_existing_database( - cur, user_version, delta_files, upgraded, database_engine + cur, user_version, delta_files, upgraded, database_engine, config ) else: - _setup_new_database(cur, database_engine) + _setup_new_database(cur, database_engine, config) # cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,)) @@ -75,7 +75,7 @@ def prepare_database(db_conn, database_engine): raise -def _setup_new_database(cur, database_engine): +def _setup_new_database(cur, database_engine, config): """Sets up the database by finding a base set of "full schemas" and then applying any necessary deltas. @@ -148,11 +148,12 @@ def _setup_new_database(cur, database_engine): applied_delta_files=[], upgraded=False, database_engine=database_engine, + config=config, ) def _upgrade_existing_database(cur, current_version, applied_delta_files, - upgraded, database_engine): + upgraded, database_engine, config): """Upgrades an existing database. Delta files can either be SQL stored in *.sql files, or python modules @@ -245,7 +246,7 @@ def _upgrade_existing_database(cur, current_version, applied_delta_files, module_name, absolute_path, python_file ) logger.debug("Running script %s", relative_path) - module.run_upgrade(cur, database_engine) + module.run_upgrade(cur, database_engine, config=config) elif ext == ".pyc": # Sometimes .pyc files turn up anyway even though we've # disabled their generation; e.g. from distribution package diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py new file mode 100644 index 0000000000..4cf4dd0917 --- /dev/null +++ b/synapse/storage/schema/delta/30/as_users.py @@ -0,0 +1,59 @@ +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from synapse.storage.appservice import ApplicationServiceStore + + +logger = logging.getLogger(__name__) + + +def run_upgrade(cur, database_engine, config, *args, **kwargs): + # NULL indicates user was not registered by an appservice. + cur.execute("ALTER TABLE users ADD COLUMN appservice_id TEXT") + + cur.execute("SELECT name FROM users") + rows = cur.fetchall() + + config_files = [] + try: + config_files = config.app_service_config_files + except AttributeError: + logger.warning("Could not get app_service_config_files from config") + pass + + appservices = ApplicationServiceStore.load_appservices( + config.server_name, config_files + ) + + owned = {} + + for row in rows: + user_id = row[0] + for appservice in appservices: + if appservice.is_exclusive_user(user_id): + if user_id in owned.keys(): + logger.error( + "user_id %s was owned by more than one application" + " service (IDs %s and %s); assigning arbitrarily to %s" % + (user_id, owned[user_id], appservice.id, owned[user_id]) + ) + owned[user_id] = appservice.id + + for user_id, as_id in owned.items(): + cur.execute( + database_engine.convert_param_style( + "UPDATE users SET appservice_id = ? WHERE name = ?" + ), + (as_id, user_id) + ) diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 152d027663..0684fb6f70 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -48,11 +48,12 @@ class SQLBaseStoreTestCase(unittest.TestCase): config = Mock() config.event_cache_size = 1 + config.database_config = {"name": "sqlite3"} hs = HomeServer( "test", db_pool=self.db_pool, config=config, - database_engine=create_engine("sqlite3"), + database_engine=create_engine(config), ) self.datastore = SQLBaseStore(hs) diff --git a/tests/utils.py b/tests/utils.py index 3b1eb50d8d..f3935648a0 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -51,6 +51,8 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): config.server_name = "server.under.test" config.trusted_third_party_id_servers = [] + config.database_config = {"name": "sqlite3"} + if "clock" not in kargs: kargs["clock"] = MockClock() @@ -60,7 +62,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): hs = HomeServer( name, db_pool=db_pool, config=config, version_string="Synapse/tests", - database_engine=create_engine("sqlite3"), + database_engine=create_engine(config), get_db_conn=db_pool.get_db_conn, **kargs ) @@ -69,7 +71,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): hs = HomeServer( name, db_pool=None, datastore=datastore, config=config, version_string="Synapse/tests", - database_engine=create_engine("sqlite3"), + database_engine=create_engine(config), **kargs ) @@ -277,18 +279,24 @@ class SQLiteMemoryDbPool(ConnectionPool, object): cp_max=1, ) + self.config = Mock() + self.config.database_config = {"name": "sqlite3"} + def prepare(self): - engine = create_engine("sqlite3") + engine = self.create_engine() return self.runWithConnection( - lambda conn: prepare_database(conn, engine) + lambda conn: prepare_database(conn, engine, self.config) ) def get_db_conn(self): conn = self.connect() - engine = create_engine("sqlite3") - prepare_database(conn, engine) + engine = self.create_engine() + prepare_database(conn, engine, self.config) return conn + def create_engine(self): + return create_engine(self.config) + class MemoryDataStore(object): From dc6da63e30a636133b8047c09e45d2c2716cbecb Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Thu, 11 Feb 2016 17:37:38 +0000 Subject: [PATCH 154/349] Enforce user_id exclusivity for AS registrations This whole set of checks is kind of an ugly mess, which I may clean up at some point, but for now let's be correct. --- synapse/handlers/register.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 24c850ae9b..f8959e5d82 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -60,7 +60,7 @@ class RegistrationHandler(BaseHandler): user = UserID(localpart, self.hs.hostname) user_id = user.to_string() - yield self.check_user_id_is_valid(user_id) + yield self.check_user_id_not_appservice_exclusive(user_id) users = yield self.store.get_users_by_id_case_insensitive(user_id) if users: @@ -145,7 +145,7 @@ class RegistrationHandler(BaseHandler): localpart = yield self._generate_user_id(attempts > 0) user = UserID(localpart, self.hs.hostname) user_id = user.to_string() - yield self.check_user_id_is_valid(user_id) + yield self.check_user_id_not_appservice_exclusive(user_id) if generate_token: token = self.auth_handler().generate_access_token(user_id) try: @@ -180,6 +180,11 @@ class RegistrationHandler(BaseHandler): 400, "Invalid user localpart for this application service.", errcode=Codes.EXCLUSIVE ) + + yield self.check_user_id_not_appservice_exclusive( + user_id, allowed_appservice=service + ) + token = self.auth_handler().generate_access_token(user_id) yield self.store.register( user_id=user_id, @@ -226,7 +231,7 @@ class RegistrationHandler(BaseHandler): user = UserID(localpart, self.hs.hostname) user_id = user.to_string() - yield self.check_user_id_is_valid(user_id) + yield self.check_user_id_not_appservice_exclusive(user_id) token = self.auth_handler().generate_access_token(user_id) try: yield self.store.register( @@ -278,12 +283,14 @@ class RegistrationHandler(BaseHandler): yield identity_handler.bind_threepid(c, user_id) @defer.inlineCallbacks - def check_user_id_is_valid(self, user_id): + def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None): # valid user IDs must not clash with any user ID namespaces claimed by # application services. services = yield self.store.get_app_services() interested_services = [ - s for s in services if s.is_interested_in_user(user_id) + s for s in services + if s.is_interested_in_user(user_id) + and s != allowed_appservice ] for service in interested_services: if service.is_exclusive_user(user_id): From 4d54d87c3e640819a5e1707f58529af8e1c57c7d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 12 Feb 2016 11:13:06 +0000 Subject: [PATCH 155/349] Mitigate against incorrect old state in /sync. There have been reports from the field that servers occasionally have incorrect notions of what the old state of a room is. This proves problematic as /sync relies on a correct old state. This patch makes /sync specifically include in the 'state' key any current state events that haven't been correctly included. --- synapse/handlers/sync.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1d0f0058a2..b2b9b928c9 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -823,15 +823,17 @@ class SyncHandler(BaseHandler): # TODO(mjark) Check for new redactions in the state events. with Measure(self.clock, "compute_state_delta"): + current_state = yield self.get_state_at( + room_id, stream_position=now_token + ) + if full_state: if batch: state = yield self.store.get_state_for_event( batch.events[0].event_id ) else: - state = yield self.get_state_at( - room_id, stream_position=now_token - ) + state = current_state timeline_state = { (event.type, event.state_key): event @@ -842,6 +844,7 @@ class SyncHandler(BaseHandler): timeline_contains=timeline_state, timeline_start=state, previous={}, + current=current_state, ) elif batch.limited: state_at_previous_sync = yield self.get_state_at( @@ -861,6 +864,7 @@ class SyncHandler(BaseHandler): timeline_contains=timeline_state, timeline_start=state_at_timeline_start, previous=state_at_previous_sync, + current=current_state, ) else: state = {} @@ -920,7 +924,7 @@ def _action_has_highlight(actions): return False -def _calculate_state(timeline_contains, timeline_start, previous): +def _calculate_state(timeline_contains, timeline_start, previous, current): """Works out what state to include in a sync response. Args: @@ -928,6 +932,7 @@ def _calculate_state(timeline_contains, timeline_start, previous): timeline_start (dict): state at the start of the timeline previous (dict): state at the end of the previous sync (or empty dict if this is an initial sync) + current (dict): state at the end of the timeline Returns: dict @@ -938,14 +943,16 @@ def _calculate_state(timeline_contains, timeline_start, previous): timeline_contains.values(), previous.values(), timeline_start.values(), + current.values(), ) } + c_ids = set(e.event_id for e in current.values()) tc_ids = set(e.event_id for e in timeline_contains.values()) p_ids = set(e.event_id for e in previous.values()) ts_ids = set(e.event_id for e in timeline_start.values()) - state_ids = (ts_ids - p_ids) - tc_ids + state_ids = ((c_ids | ts_ids) - p_ids) - tc_ids evs = (event_id_to_state[e] for e in state_ids) return { From 58c9f206929560044fccae84c36fdd89724ccfc0 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 12 Feb 2016 13:46:59 +0000 Subject: [PATCH 156/349] Catch the exceptions thrown by twisted when you write to a closed connection --- synapse/http/server.py | 21 ++++++++++++++++++++- synapse/rest/client/v1/login.py | 10 ++++++---- synapse/rest/client/v2_alpha/auth.py | 5 +++-- synapse/rest/media/v0/content_repository.py | 4 ++-- synapse/rest/media/v1/base_resource.py | 4 ++-- 5 files changed, 33 insertions(+), 11 deletions(-) diff --git a/synapse/http/server.py b/synapse/http/server.py index a90e2e1125..b17b190ee5 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -367,10 +367,29 @@ def respond_with_json_bytes(request, code, json_bytes, send_cors=False, "Origin, X-Requested-With, Content-Type, Accept") request.write(json_bytes) - request.finish() + finish_request(request) return NOT_DONE_YET +def finish_request(request): + """ Finish writing the response to the request. + + Twisted throws a RuntimeException if the connection closed before the + response was written but doesn't provide a convenient or reliable way to + determine if the connection was closed. So we catch and log the RuntimeException + + You might think that ``request.notifyFinish`` could be used to tell if the + request was finished. However the deferred it returns won't fire if the + connection was already closed, meaning we'd have to have called the method + right at the start of the request. By the time we want to write the response + it will already be too late. + """ + try: + request.finish() + except RuntimeError as e: + logger.info("Connection disconnected before response was written: %r", e) + + def _request_user_agent_is_curl(request): user_agents = request.requestHeaders.getRawHeaders( "User-Agent", default=[] diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 7199113dac..79101106ac 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -17,6 +17,8 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, LoginError, Codes from synapse.types import UserID +from synapse.http.server import finish_request + from base import ClientV1RestServlet, client_path_patterns import simplejson as json @@ -263,7 +265,7 @@ class SAML2RestServlet(ClientV1RestServlet): '?status=authenticated&access_token=' + token + '&user_id=' + user_id + '&ava=' + urllib.quote(json.dumps(saml2_auth.ava))) - request.finish() + finish_request(request) defer.returnValue(None) defer.returnValue((200, {"status": "authenticated", "user_id": user_id, "token": token, @@ -272,7 +274,7 @@ class SAML2RestServlet(ClientV1RestServlet): request.redirect(urllib.unquote( request.args['RelayState'][0]) + '?status=not_authenticated') - request.finish() + finish_request(request) defer.returnValue(None) defer.returnValue((200, {"status": "not_authenticated"})) @@ -309,7 +311,7 @@ class CasRedirectServlet(ClientV1RestServlet): "service": "%s?%s" % (hs_redirect_url, client_redirect_url_param) }) request.redirect("%s?%s" % (self.cas_server_url, service_param)) - request.finish() + finish_request(request) class CasTicketServlet(ClientV1RestServlet): @@ -362,7 +364,7 @@ class CasTicketServlet(ClientV1RestServlet): redirect_url = self.add_login_token_to_redirect_url(client_redirect_url, login_token) request.redirect(redirect_url) - request.finish() + finish_request(request) def add_login_token_to_redirect_url(self, url, token): url_parts = list(urlparse.urlparse(url)) diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index ff71c40b43..78181b7b18 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.api.constants import LoginType from synapse.api.errors import SynapseError from synapse.api.urls import CLIENT_V2_ALPHA_PREFIX +from synapse.http.server import finish_request from synapse.http.servlet import RestServlet from ._base import client_v2_patterns @@ -130,7 +131,7 @@ class AuthRestServlet(RestServlet): request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) request.write(html_bytes) - request.finish() + finish_request(request) defer.returnValue(None) else: raise SynapseError(404, "Unknown auth stage type") @@ -176,7 +177,7 @@ class AuthRestServlet(RestServlet): request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) request.write(html_bytes) - request.finish() + finish_request(request) defer.returnValue(None) else: diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py index dcf3eaee1f..d9fc045fc6 100644 --- a/synapse/rest/media/v0/content_repository.py +++ b/synapse/rest/media/v0/content_repository.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import respond_with_json_bytes +from synapse.http.server import respond_with_json_bytes, finish_request from synapse.util.stringutils import random_string from synapse.api.errors import ( @@ -144,7 +144,7 @@ class ContentRepoResource(resource.Resource): # after the file has been sent, clean up and finish the request def cbFinished(ignored): f.close() - request.finish() + finish_request(request) d.addCallback(cbFinished) else: respond_with_json_bytes( diff --git a/synapse/rest/media/v1/base_resource.py b/synapse/rest/media/v1/base_resource.py index 58d56ec7a4..58ef91c0b8 100644 --- a/synapse/rest/media/v1/base_resource.py +++ b/synapse/rest/media/v1/base_resource.py @@ -16,7 +16,7 @@ from .thumbnailer import Thumbnailer from synapse.http.matrixfederationclient import MatrixFederationHttpClient -from synapse.http.server import respond_with_json +from synapse.http.server import respond_with_json, finish_request from synapse.util.stringutils import random_string from synapse.api.errors import ( cs_error, Codes, SynapseError @@ -238,7 +238,7 @@ class BaseMediaResource(Resource): with open(file_path, "rb") as f: yield FileSender().beginFileTransfer(f, request) - request.finish() + finish_request(request) else: self._respond_404(request) From cf81375b94c4763766440471e632fc4b103450ab Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Fri, 12 Feb 2016 15:11:49 +0000 Subject: [PATCH 157/349] Merge two of the room join codepaths There's at least one more to merge in. Side-effects: * Stop reporting None as displayname and avatar_url in some cases * Joining a room by alias populates guest-ness in join event * Remove unspec'd PUT version of /join/ which has not been called on matrix.org according to logs * Stop recording access_token_id on /join/room_id - currently we don't record it on /join/room_alias; I can try to thread it through at some point. --- synapse/api/errors.py | 5 +++ synapse/handlers/profile.py | 11 +++++- synapse/handlers/room.py | 44 +++++++++++++++++----- synapse/rest/client/v1/room.py | 68 +++++++--------------------------- synapse/types.py | 14 +++++-- 5 files changed, 73 insertions(+), 69 deletions(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index b106fbed6d..0c7858f78d 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -84,6 +84,11 @@ class RegistrationError(SynapseError): pass +class BadIdentifierError(SynapseError): + """An error indicating an identifier couldn't be parsed.""" + pass + + class UnrecognizedRequestError(SynapseError): """An error indicating we don't understand the request you're trying to make""" def __init__(self, *args, **kwargs): diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 629e6e3594..32af622733 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -169,8 +169,15 @@ class ProfileHandler(BaseHandler): consumeErrors=True ).addErrback(unwrapFirstError) - state["displayname"] = displayname - state["avatar_url"] = avatar_url + if displayname is None: + del state["displayname"] + else: + state["displayname"] = displayname + + if avatar_url is None: + del state["avatar_url"] + else: + state["avatar_url"] = avatar_url defer.returnValue(None) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b2de2cd0c0..2950ed14e4 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -527,7 +527,17 @@ class RoomMemberHandler(BaseHandler): defer.returnValue({"room_id": room_id}) @defer.inlineCallbacks - def join_room_alias(self, joinee, room_alias, content={}): + def lookup_room_alias(self, room_alias): + """ + Gets the room ID for an alias. + + Args: + room_alias (str): The room alias to look up. + Returns: + A tuple of the room ID (str) and the hosts hosting the room ([str]) + Raises: + SynapseError if the room couldn't be looked up. + """ directory_handler = self.hs.get_handlers().directory_handler mapping = yield directory_handler.get_association(room_alias) @@ -539,24 +549,40 @@ class RoomMemberHandler(BaseHandler): if not hosts: raise SynapseError(404, "No known servers") - # If event doesn't include a display name, add one. - yield collect_presencelike_data(self.distributor, joinee, content) + defer.returnValue((room_id, hosts)) + + @defer.inlineCallbacks + def do_join(self, requester, room_id, hosts=None): + """ + Joins requester to room_id. + + Args: + requester (Requester): The user joining the room. + room_id (str): The room ID (not alias) being joined. + hosts ([str]): A list of hosts which are hopefully in the room. + Raises: + SynapseError if the room couldn't be joined. + """ + hosts = hosts or [] + + content = {"membership": Membership.JOIN} + if requester.is_guest: + content["kind"] = "guest" + + yield collect_presencelike_data(self.distributor, requester.user, content) - content.update({"membership": Membership.JOIN}) builder = self.event_builder_factory.new({ "type": EventTypes.Member, - "state_key": joinee.to_string(), + "state_key": requester.user.to_string(), "room_id": room_id, - "sender": joinee.to_string(), - "membership": Membership.JOIN, + "sender": requester.user.to_string(), + "membership": Membership.JOIN, # For backwards compatibility "content": content, }) event, context = yield self._create_new_client_event(builder) yield self._do_join(event, context, room_hosts=hosts) - defer.returnValue({"room_id": room_id}) - @defer.inlineCallbacks def _do_join(self, event, context, room_hosts=None): room_id = event.room_id diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 81bfe377bd..1dd33b0a56 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -216,11 +216,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet): # TODO: Needs unit testing for room ID + alias joins class JoinRoomAliasServlet(ClientV1RestServlet): - - def register(self, http_server): - # /join/$room_identifier[/$txn_id] - PATTERNS = ("/join/(?P[^/]*)") - register_txn_path(self, PATTERNS, http_server) + PATTERNS = client_path_patterns("/join/(?P[^/]*)$") @defer.inlineCallbacks def on_POST(self, request, room_identifier, txn_id=None): @@ -229,60 +225,22 @@ class JoinRoomAliasServlet(ClientV1RestServlet): allow_guest=True, ) - # the identifier could be a room alias or a room id. Try one then the - # other if it fails to parse, without swallowing other valid - # SynapseErrors. + handler = self.handlers.room_member_handler - identifier = None - is_room_alias = False - try: - identifier = RoomAlias.from_string(room_identifier) - is_room_alias = True - except SynapseError: - identifier = RoomID.from_string(room_identifier) + room_id = None + hosts = [] + if RoomAlias.is_valid(room_identifier): + room_alias = RoomAlias.from_string(room_identifier) + room_id, hosts = yield handler.lookup_room_alias(room_alias) + else: + room_id = RoomID.from_string(room_identifier).to_string() # TODO: Support for specifying the home server to join with? - if is_room_alias: - handler = self.handlers.room_member_handler - ret_dict = yield handler.join_room_alias( - requester.user, - identifier, - ) - defer.returnValue((200, ret_dict)) - else: # room id - msg_handler = self.handlers.message_handler - content = {"membership": Membership.JOIN} - if requester.is_guest: - content["kind"] = "guest" - yield msg_handler.create_and_send_event( - { - "type": EventTypes.Member, - "content": content, - "room_id": identifier.to_string(), - "sender": requester.user.to_string(), - "state_key": requester.user.to_string(), - }, - token_id=requester.access_token_id, - txn_id=txn_id, - is_guest=requester.is_guest, - ) - - defer.returnValue((200, {"room_id": identifier.to_string()})) - - @defer.inlineCallbacks - def on_PUT(self, request, room_identifier, txn_id): - try: - defer.returnValue( - self.txns.get_client_transaction(request, txn_id) - ) - except KeyError: - pass - - response = yield self.on_POST(request, room_identifier, txn_id) - - self.txns.store_client_transaction(request, txn_id, response) - defer.returnValue(response) + yield handler.do_join( + requester, room_id, hosts=hosts + ) + defer.returnValue((200, {"room_id": room_id})) # TODO: Needs unit testing diff --git a/synapse/types.py b/synapse/types.py index 2095837ba6..0be8384e18 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.errors import SynapseError +from synapse.api.errors import SynapseError, BadIdentifierError from collections import namedtuple @@ -51,13 +51,13 @@ class DomainSpecificString( def from_string(cls, s): """Parse the string given by 's' into a structure object.""" if len(s) < 1 or s[0] != cls.SIGIL: - raise SynapseError(400, "Expected %s string to start with '%s'" % ( + raise BadIdentifierError(400, "Expected %s string to start with '%s'" % ( cls.__name__, cls.SIGIL, )) parts = s[1:].split(':', 1) if len(parts) != 2: - raise SynapseError( + raise BadIdentifierError( 400, "Expected %s of the form '%slocalname:domain'" % ( cls.__name__, cls.SIGIL, ) @@ -69,6 +69,14 @@ class DomainSpecificString( # names on one HS return cls(localpart=parts[0], domain=domain) + @classmethod + def is_valid(cls, s): + try: + cls.from_string(s) + return True + except: + return False + def to_string(self): """Return a string encoding the fields of the structure object.""" return "%s%s:%s" % (self.SIGIL, self.localpart, self.domain) From 4de08a4672c62eebda2ad3ee89643c2c32242cbf Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Fri, 12 Feb 2016 16:17:24 +0000 Subject: [PATCH 158/349] Revert "Merge two of the room join codepaths" This reverts commit cf81375b94c4763766440471e632fc4b103450ab. It subtly violates a guest joining auth check --- synapse/api/errors.py | 5 --- synapse/handlers/profile.py | 11 +----- synapse/handlers/room.py | 44 +++++----------------- synapse/rest/client/v1/room.py | 68 +++++++++++++++++++++++++++------- synapse/types.py | 14 ++----- 5 files changed, 69 insertions(+), 73 deletions(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 0c7858f78d..b106fbed6d 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -84,11 +84,6 @@ class RegistrationError(SynapseError): pass -class BadIdentifierError(SynapseError): - """An error indicating an identifier couldn't be parsed.""" - pass - - class UnrecognizedRequestError(SynapseError): """An error indicating we don't understand the request you're trying to make""" def __init__(self, *args, **kwargs): diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 32af622733..629e6e3594 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -169,15 +169,8 @@ class ProfileHandler(BaseHandler): consumeErrors=True ).addErrback(unwrapFirstError) - if displayname is None: - del state["displayname"] - else: - state["displayname"] = displayname - - if avatar_url is None: - del state["avatar_url"] - else: - state["avatar_url"] = avatar_url + state["displayname"] = displayname + state["avatar_url"] = avatar_url defer.returnValue(None) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 2950ed14e4..b2de2cd0c0 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -527,17 +527,7 @@ class RoomMemberHandler(BaseHandler): defer.returnValue({"room_id": room_id}) @defer.inlineCallbacks - def lookup_room_alias(self, room_alias): - """ - Gets the room ID for an alias. - - Args: - room_alias (str): The room alias to look up. - Returns: - A tuple of the room ID (str) and the hosts hosting the room ([str]) - Raises: - SynapseError if the room couldn't be looked up. - """ + def join_room_alias(self, joinee, room_alias, content={}): directory_handler = self.hs.get_handlers().directory_handler mapping = yield directory_handler.get_association(room_alias) @@ -549,40 +539,24 @@ class RoomMemberHandler(BaseHandler): if not hosts: raise SynapseError(404, "No known servers") - defer.returnValue((room_id, hosts)) - - @defer.inlineCallbacks - def do_join(self, requester, room_id, hosts=None): - """ - Joins requester to room_id. - - Args: - requester (Requester): The user joining the room. - room_id (str): The room ID (not alias) being joined. - hosts ([str]): A list of hosts which are hopefully in the room. - Raises: - SynapseError if the room couldn't be joined. - """ - hosts = hosts or [] - - content = {"membership": Membership.JOIN} - if requester.is_guest: - content["kind"] = "guest" - - yield collect_presencelike_data(self.distributor, requester.user, content) + # If event doesn't include a display name, add one. + yield collect_presencelike_data(self.distributor, joinee, content) + content.update({"membership": Membership.JOIN}) builder = self.event_builder_factory.new({ "type": EventTypes.Member, - "state_key": requester.user.to_string(), + "state_key": joinee.to_string(), "room_id": room_id, - "sender": requester.user.to_string(), - "membership": Membership.JOIN, # For backwards compatibility + "sender": joinee.to_string(), + "membership": Membership.JOIN, "content": content, }) event, context = yield self._create_new_client_event(builder) yield self._do_join(event, context, room_hosts=hosts) + defer.returnValue({"room_id": room_id}) + @defer.inlineCallbacks def _do_join(self, event, context, room_hosts=None): room_id = event.room_id diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 1dd33b0a56..81bfe377bd 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -216,7 +216,11 @@ class RoomSendEventRestServlet(ClientV1RestServlet): # TODO: Needs unit testing for room ID + alias joins class JoinRoomAliasServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/join/(?P[^/]*)$") + + def register(self, http_server): + # /join/$room_identifier[/$txn_id] + PATTERNS = ("/join/(?P[^/]*)") + register_txn_path(self, PATTERNS, http_server) @defer.inlineCallbacks def on_POST(self, request, room_identifier, txn_id=None): @@ -225,22 +229,60 @@ class JoinRoomAliasServlet(ClientV1RestServlet): allow_guest=True, ) - handler = self.handlers.room_member_handler + # the identifier could be a room alias or a room id. Try one then the + # other if it fails to parse, without swallowing other valid + # SynapseErrors. - room_id = None - hosts = [] - if RoomAlias.is_valid(room_identifier): - room_alias = RoomAlias.from_string(room_identifier) - room_id, hosts = yield handler.lookup_room_alias(room_alias) - else: - room_id = RoomID.from_string(room_identifier).to_string() + identifier = None + is_room_alias = False + try: + identifier = RoomAlias.from_string(room_identifier) + is_room_alias = True + except SynapseError: + identifier = RoomID.from_string(room_identifier) # TODO: Support for specifying the home server to join with? - yield handler.do_join( - requester, room_id, hosts=hosts - ) - defer.returnValue((200, {"room_id": room_id})) + if is_room_alias: + handler = self.handlers.room_member_handler + ret_dict = yield handler.join_room_alias( + requester.user, + identifier, + ) + defer.returnValue((200, ret_dict)) + else: # room id + msg_handler = self.handlers.message_handler + content = {"membership": Membership.JOIN} + if requester.is_guest: + content["kind"] = "guest" + yield msg_handler.create_and_send_event( + { + "type": EventTypes.Member, + "content": content, + "room_id": identifier.to_string(), + "sender": requester.user.to_string(), + "state_key": requester.user.to_string(), + }, + token_id=requester.access_token_id, + txn_id=txn_id, + is_guest=requester.is_guest, + ) + + defer.returnValue((200, {"room_id": identifier.to_string()})) + + @defer.inlineCallbacks + def on_PUT(self, request, room_identifier, txn_id): + try: + defer.returnValue( + self.txns.get_client_transaction(request, txn_id) + ) + except KeyError: + pass + + response = yield self.on_POST(request, room_identifier, txn_id) + + self.txns.store_client_transaction(request, txn_id, response) + defer.returnValue(response) # TODO: Needs unit testing diff --git a/synapse/types.py b/synapse/types.py index 0be8384e18..2095837ba6 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.errors import SynapseError, BadIdentifierError +from synapse.api.errors import SynapseError from collections import namedtuple @@ -51,13 +51,13 @@ class DomainSpecificString( def from_string(cls, s): """Parse the string given by 's' into a structure object.""" if len(s) < 1 or s[0] != cls.SIGIL: - raise BadIdentifierError(400, "Expected %s string to start with '%s'" % ( + raise SynapseError(400, "Expected %s string to start with '%s'" % ( cls.__name__, cls.SIGIL, )) parts = s[1:].split(':', 1) if len(parts) != 2: - raise BadIdentifierError( + raise SynapseError( 400, "Expected %s of the form '%slocalname:domain'" % ( cls.__name__, cls.SIGIL, ) @@ -69,14 +69,6 @@ class DomainSpecificString( # names on one HS return cls(localpart=parts[0], domain=domain) - @classmethod - def is_valid(cls, s): - try: - cls.from_string(s) - return True - except: - return False - def to_string(self): """Return a string encoding the fields of the structure object.""" return "%s%s:%s" % (self.SIGIL, self.localpart, self.domain) From dbeed36dec021df3036e088910c72d5727910dd3 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 15 Feb 2016 14:38:27 +0000 Subject: [PATCH 159/349] Merge some room joining codepaths Force joining by alias to go through the send_membership_event checks, rather than bypassing them straight into _do_join. This is the first of many stages of cleanup. --- synapse/handlers/room.py | 14 ++++++++++---- synapse/rest/client/v1/room.py | 2 +- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b2de2cd0c0..89695cc0cf 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -455,7 +455,7 @@ class RoomMemberHandler(BaseHandler): yield self.forget(requester.user, room_id) @defer.inlineCallbacks - def send_membership_event(self, event, context, is_guest=False): + def send_membership_event(self, event, context, is_guest=False, room_hosts=None): """ Change the membership status of a user in a room. Args: @@ -490,7 +490,7 @@ class RoomMemberHandler(BaseHandler): if not is_guest_access_allowed: raise AuthError(403, "Guest access not allowed") - yield self._do_join(event, context) + yield self._do_join(event, context, room_hosts=room_hosts) else: if event.membership == Membership.LEAVE: is_host_in_room = yield self.is_host_in_room(room_id, context) @@ -527,7 +527,8 @@ class RoomMemberHandler(BaseHandler): defer.returnValue({"room_id": room_id}) @defer.inlineCallbacks - def join_room_alias(self, joinee, room_alias, content={}): + def join_room_alias(self, requester, room_alias, content={}): + joinee = requester.user directory_handler = self.hs.get_handlers().directory_handler mapping = yield directory_handler.get_association(room_alias) @@ -553,7 +554,12 @@ class RoomMemberHandler(BaseHandler): }) event, context = yield self._create_new_client_event(builder) - yield self._do_join(event, context, room_hosts=hosts) + yield self.send_membership_event( + event, + context, + is_guest=requester.is_guest, + room_hosts=hosts + ) defer.returnValue({"room_id": room_id}) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 81bfe377bd..76025213dc 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -246,7 +246,7 @@ class JoinRoomAliasServlet(ClientV1RestServlet): if is_room_alias: handler = self.handlers.room_member_handler ret_dict = yield handler.join_room_alias( - requester.user, + requester, identifier, ) defer.returnValue((200, ret_dict)) From e71095801fc376aac30ff9408ae7f0203684024d Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 15 Feb 2016 15:39:16 +0000 Subject: [PATCH 160/349] Merge implementation of /join by alias or ID This code is kind of rough (passing the remote servers down a long chain), but is a step towards improvement. --- synapse/handlers/_base.py | 5 ++- synapse/handlers/message.py | 20 ++++++---- synapse/handlers/room.py | 40 ++++++++----------- synapse/rest/client/v1/room.py | 70 ++++++++++++++++------------------ synapse/types.py | 8 ++++ 5 files changed, 72 insertions(+), 71 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 064e8723c8..8508ecdd49 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -188,9 +188,12 @@ class BaseHandler(object): ) @defer.inlineCallbacks - def handle_new_client_event(self, event, context, extra_users=[]): + def handle_new_client_event(self, event, context, ratelimit=True, extra_users=[]): # We now need to go and hit out to wherever we need to hit out to. + if ratelimit: + self.ratelimit(event.sender) + self.auth.check(event, auth_events=context.current_state) yield self.maybe_kick_guest_users(event, context.current_state.values()) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 82c8cb5f0c..a94fad1735 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -216,7 +216,7 @@ class MessageHandler(BaseHandler): defer.returnValue((event, context)) @defer.inlineCallbacks - def send_event(self, event, context, ratelimit=True, is_guest=False): + def send_event(self, event, context, ratelimit=True, is_guest=False, room_hosts=None): """ Persists and notifies local clients and federation of an event. @@ -230,9 +230,6 @@ class MessageHandler(BaseHandler): assert self.hs.is_mine(user), "User must be our own: %s" % (user,) - if ratelimit: - self.ratelimit(event.sender) - if event.is_state(): prev_state = context.current_state.get((event.type, event.state_key)) if prev_state and event.user_id == prev_state.user_id: @@ -245,11 +242,18 @@ class MessageHandler(BaseHandler): if event.type == EventTypes.Member: member_handler = self.hs.get_handlers().room_member_handler - yield member_handler.send_membership_event(event, context, is_guest=is_guest) + yield member_handler.send_membership_event( + event, + context, + is_guest=is_guest, + ratelimit=ratelimit, + room_hosts=room_hosts + ) else: yield self.handle_new_client_event( event=event, context=context, + ratelimit=ratelimit, ) if event.type == EventTypes.Message: @@ -259,7 +263,8 @@ class MessageHandler(BaseHandler): @defer.inlineCallbacks def create_and_send_event(self, event_dict, ratelimit=True, - token_id=None, txn_id=None, is_guest=False): + token_id=None, txn_id=None, is_guest=False, + room_hosts=None): """ Creates an event, then sends it. @@ -274,7 +279,8 @@ class MessageHandler(BaseHandler): event, context, ratelimit=ratelimit, - is_guest=is_guest + is_guest=is_guest, + room_hosts=room_hosts, ) defer.returnValue(event) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 89695cc0cf..b748e81d20 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -455,7 +455,9 @@ class RoomMemberHandler(BaseHandler): yield self.forget(requester.user, room_id) @defer.inlineCallbacks - def send_membership_event(self, event, context, is_guest=False, room_hosts=None): + def send_membership_event( + self, event, context, is_guest=False, room_hosts=None, ratelimit=True + ): """ Change the membership status of a user in a room. Args: @@ -527,8 +529,17 @@ class RoomMemberHandler(BaseHandler): defer.returnValue({"room_id": room_id}) @defer.inlineCallbacks - def join_room_alias(self, requester, room_alias, content={}): - joinee = requester.user + def lookup_room_alias(self, room_alias): + """ + Get the room ID associated with a room alias. + + Args: + room_alias (RoomAlias): The alias to look up. + Returns: + The room ID as a RoomID object. + Raises: + SynapseError if room alias could not be found. + """ directory_handler = self.hs.get_handlers().directory_handler mapping = yield directory_handler.get_association(room_alias) @@ -540,28 +551,7 @@ class RoomMemberHandler(BaseHandler): if not hosts: raise SynapseError(404, "No known servers") - # If event doesn't include a display name, add one. - yield collect_presencelike_data(self.distributor, joinee, content) - - content.update({"membership": Membership.JOIN}) - builder = self.event_builder_factory.new({ - "type": EventTypes.Member, - "state_key": joinee.to_string(), - "room_id": room_id, - "sender": joinee.to_string(), - "membership": Membership.JOIN, - "content": content, - }) - event, context = yield self._create_new_client_event(builder) - - yield self.send_membership_event( - event, - context, - is_guest=requester.is_guest, - room_hosts=hosts - ) - - defer.returnValue({"room_id": room_id}) + defer.returnValue((RoomID.from_string(room_id), hosts)) @defer.inlineCallbacks def _do_join(self, event, context, room_hosts=None): diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 76025213dc..340c24635d 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -229,46 +229,40 @@ class JoinRoomAliasServlet(ClientV1RestServlet): allow_guest=True, ) - # the identifier could be a room alias or a room id. Try one then the - # other if it fails to parse, without swallowing other valid - # SynapseErrors. - - identifier = None - is_room_alias = False - try: - identifier = RoomAlias.from_string(room_identifier) - is_room_alias = True - except SynapseError: - identifier = RoomID.from_string(room_identifier) - - # TODO: Support for specifying the home server to join with? - - if is_room_alias: + if RoomID.is_valid(room_identifier): + room_id = room_identifier + room_hosts = None + elif RoomAlias.is_valid(room_identifier): handler = self.handlers.room_member_handler - ret_dict = yield handler.join_room_alias( - requester, - identifier, - ) - defer.returnValue((200, ret_dict)) - else: # room id - msg_handler = self.handlers.message_handler - content = {"membership": Membership.JOIN} - if requester.is_guest: - content["kind"] = "guest" - yield msg_handler.create_and_send_event( - { - "type": EventTypes.Member, - "content": content, - "room_id": identifier.to_string(), - "sender": requester.user.to_string(), - "state_key": requester.user.to_string(), - }, - token_id=requester.access_token_id, - txn_id=txn_id, - is_guest=requester.is_guest, - ) + room_alias = RoomAlias.from_string(room_identifier) + room_id, room_hosts = yield handler.lookup_room_alias(room_alias) + room_id = room_id.to_string() + else: + raise SynapseError(400, "%s was not legal room ID or room alias" % ( + room_identifier, + )) - defer.returnValue((200, {"room_id": identifier.to_string()})) + msg_handler = self.handlers.message_handler + content = {"membership": Membership.JOIN} + if requester.is_guest: + content["kind"] = "guest" + yield msg_handler.create_and_send_event( + { + "type": EventTypes.Member, + "content": content, + "room_id": room_id, + "sender": requester.user.to_string(), + "state_key": requester.user.to_string(), + + "membership": Membership.JOIN, # For backwards compatibility + }, + token_id=requester.access_token_id, + txn_id=txn_id, + is_guest=requester.is_guest, + room_hosts=room_hosts, + ) + + defer.returnValue((200, {"room_id": room_id})) @defer.inlineCallbacks def on_PUT(self, request, room_identifier, txn_id): diff --git a/synapse/types.py b/synapse/types.py index 2095837ba6..d5bd95cbd3 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -73,6 +73,14 @@ class DomainSpecificString( """Return a string encoding the fields of the structure object.""" return "%s%s:%s" % (self.SIGIL, self.localpart, self.domain) + @classmethod + def is_valid(cls, s): + try: + cls.from_string(s) + return True + except: + return False + __str__ = to_string @classmethod From f318d4f2a4b45aaab234a124d4db2bee986ba911 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 15 Feb 2016 15:57:10 +0000 Subject: [PATCH 161/349] Inline _do_join as it now only has one caller Also, consistently apply rate limiting. Again, ugly, but a step in the right direction. --- synapse/handlers/room.py | 95 ++++++++++++++++++++-------------------- 1 file changed, 47 insertions(+), 48 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b748e81d20..44bab19c51 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -492,7 +492,50 @@ class RoomMemberHandler(BaseHandler): if not is_guest_access_allowed: raise AuthError(403, "Guest access not allowed") - yield self._do_join(event, context, room_hosts=room_hosts) + room_id = event.room_id + + # XXX: We don't do an auth check if we are doing an invite + # join dance for now, since we're kinda implicitly checking + # that we are allowed to join when we decide whether or not we + # need to do the invite/join dance. + + is_host_in_room = yield self.is_host_in_room(room_id, context) + if is_host_in_room: + should_do_dance = False + elif room_hosts: # TODO: Shouldn't this be remote_room_host? + should_do_dance = True + else: + inviter = yield self.get_inviter(event) + if not inviter: + # return the same error as join_room_alias does + raise SynapseError(404, "No known servers") + should_do_dance = not self.hs.is_mine(inviter) + room_hosts = [inviter.domain] + + if should_do_dance: + handler = self.hs.get_handlers().federation_handler + yield handler.do_invite_join( + room_hosts, + room_id, + event.user_id, + event.content, + ) + else: + logger.debug("Doing normal join") + + yield self._do_local_membership_update( + event, + context=context, + ratelimit=ratelimit, + ) + + prev_state = context.current_state.get((event.type, event.state_key)) + if not prev_state or prev_state.membership != Membership.JOIN: + # Only fire user_joined_room if the user has acutally joined the + # room. Don't bother if the user is just changing their profile + # info. + user = UserID.from_string(event.user_id) + yield user_joined_room(self.distributor, user, room_id) else: if event.membership == Membership.LEAVE: is_host_in_room = yield self.is_host_in_room(room_id, context) @@ -520,6 +563,7 @@ class RoomMemberHandler(BaseHandler): yield self._do_local_membership_update( event, context=context, + ratelimit=ratelimit, ) if prev_state and prev_state.membership == Membership.JOIN: @@ -553,52 +597,6 @@ class RoomMemberHandler(BaseHandler): defer.returnValue((RoomID.from_string(room_id), hosts)) - @defer.inlineCallbacks - def _do_join(self, event, context, room_hosts=None): - room_id = event.room_id - - # XXX: We don't do an auth check if we are doing an invite - # join dance for now, since we're kinda implicitly checking - # that we are allowed to join when we decide whether or not we - # need to do the invite/join dance. - - is_host_in_room = yield self.is_host_in_room(room_id, context) - if is_host_in_room: - should_do_dance = False - elif room_hosts: # TODO: Shouldn't this be remote_room_host? - should_do_dance = True - else: - inviter = yield self.get_inviter(event) - if not inviter: - # return the same error as join_room_alias does - raise SynapseError(404, "No known servers") - should_do_dance = not self.hs.is_mine(inviter) - room_hosts = [inviter.domain] - - if should_do_dance: - handler = self.hs.get_handlers().federation_handler - yield handler.do_invite_join( - room_hosts, - room_id, - event.user_id, - event.content, - ) - else: - logger.debug("Doing normal join") - - yield self._do_local_membership_update( - event, - context=context, - ) - - prev_state = context.current_state.get((event.type, event.state_key)) - if not prev_state or prev_state.membership != Membership.JOIN: - # Only fire user_joined_room if the user has acutally joined the - # room. Don't bother if the user is just changing their profile - # info. - user = UserID.from_string(event.user_id) - yield user_joined_room(self.distributor, user, room_id) - @defer.inlineCallbacks def get_inviter(self, event): # TODO(markjh): get prev_state from snapshot @@ -653,7 +651,7 @@ class RoomMemberHandler(BaseHandler): defer.returnValue(room_ids) @defer.inlineCallbacks - def _do_local_membership_update(self, event, context): + def _do_local_membership_update(self, event, context, ratelimit=True): yield run_on_reactor() target_user = UserID.from_string(event.state_key) @@ -662,6 +660,7 @@ class RoomMemberHandler(BaseHandler): event, context, extra_users=[target_user], + ratelimit=ratelimit, ) @defer.inlineCallbacks From 73e616df2a4e0fb7bc83340f558dede32cc2efc3 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 15 Feb 2016 16:02:22 +0000 Subject: [PATCH 162/349] Inline _do_local_membership_update --- synapse/handlers/room.py | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 44bab19c51..d17e5c1b7b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -24,7 +24,6 @@ from synapse.api.constants import ( ) from synapse.api.errors import AuthError, StoreError, SynapseError, Codes from synapse.util import stringutils, unwrapFirstError -from synapse.util.async import run_on_reactor from synapse.util.logcontext import preserve_context_over_fn from signedjson.sign import verify_signed_json @@ -466,6 +465,7 @@ class RoomMemberHandler(BaseHandler): SynapseError if there was a problem changing the membership. """ target_user_id = event.state_key + target_user = UserID.from_string(event.state_key) prev_state = context.current_state.get( (EventTypes.Member, target_user_id), @@ -523,9 +523,10 @@ class RoomMemberHandler(BaseHandler): else: logger.debug("Doing normal join") - yield self._do_local_membership_update( + yield self.handle_new_client_event( event, - context=context, + context, + extra_users=[target_user], ratelimit=ratelimit, ) @@ -560,9 +561,10 @@ class RoomMemberHandler(BaseHandler): defer.returnValue({}) return - yield self._do_local_membership_update( + yield self.handle_new_client_event( event, - context=context, + context, + extra_users=[target_user], ratelimit=ratelimit, ) @@ -650,19 +652,6 @@ class RoomMemberHandler(BaseHandler): defer.returnValue(room_ids) - @defer.inlineCallbacks - def _do_local_membership_update(self, event, context, ratelimit=True): - yield run_on_reactor() - - target_user = UserID.from_string(event.state_key) - - yield self.handle_new_client_event( - event, - context, - extra_users=[target_user], - ratelimit=ratelimit, - ) - @defer.inlineCallbacks def do_3pid_invite( self, From 150fcde0dce02670c2180f9d4657783eb204daa8 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 15 Feb 2016 16:16:03 +0000 Subject: [PATCH 163/349] Reuse update_membership from /join --- synapse/handlers/room.py | 12 +++++++++--- synapse/rest/client/v1/room.py | 21 +++++---------------- 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d17e5c1b7b..04916d4e24 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -403,7 +403,9 @@ class RoomMemberHandler(BaseHandler): remotedomains.add(member.domain) @defer.inlineCallbacks - def update_membership(self, requester, target, room_id, action, txn_id=None): + def update_membership( + self, requester, target, room_id, action, txn_id=None, room_hosts=None + ): effective_membership_state = action if action in ["kick", "unban"]: effective_membership_state = "leave" @@ -412,7 +414,7 @@ class RoomMemberHandler(BaseHandler): msg_handler = self.hs.get_handlers().message_handler - content = {"membership": unicode(effective_membership_state)} + content = {"membership": effective_membership_state} if requester.is_guest: content["kind"] = "guest" @@ -423,6 +425,9 @@ class RoomMemberHandler(BaseHandler): "room_id": room_id, "sender": requester.user.to_string(), "state_key": target.to_string(), + + # For backwards compatibility: + "membership": effective_membership_state, }, token_id=requester.access_token_id, txn_id=txn_id, @@ -447,7 +452,8 @@ class RoomMemberHandler(BaseHandler): event, context, ratelimit=True, - is_guest=requester.is_guest + is_guest=requester.is_guest, + room_hosts=room_hosts, ) if action == "forget": diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 340c24635d..f8cd746a88 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -242,23 +242,12 @@ class JoinRoomAliasServlet(ClientV1RestServlet): room_identifier, )) - msg_handler = self.handlers.message_handler - content = {"membership": Membership.JOIN} - if requester.is_guest: - content["kind"] = "guest" - yield msg_handler.create_and_send_event( - { - "type": EventTypes.Member, - "content": content, - "room_id": room_id, - "sender": requester.user.to_string(), - "state_key": requester.user.to_string(), - - "membership": Membership.JOIN, # For backwards compatibility - }, - token_id=requester.access_token_id, + yield self.handlers.room_member_handler.update_membership( + requester=requester, + target=requester.user, + room_id=room_id, + action="join", txn_id=txn_id, - is_guest=requester.is_guest, room_hosts=room_hosts, ) From 1bbb67c452f33d14e1d4c5b51cc1a694de53bbb8 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 15 Feb 2016 16:40:22 +0000 Subject: [PATCH 164/349] Use update_membership to kick guests --- synapse/handlers/_base.py | 24 ++++++++++-------------- synapse/handlers/room.py | 11 +++++++++-- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 8508ecdd49..cad37f50e7 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import LimitExceededError, SynapseError, AuthError from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.api.constants import Membership, EventTypes -from synapse.types import UserID, RoomAlias +from synapse.types import UserID, RoomAlias, Requester from synapse.push.action_generator import ActionGenerator from synapse.util.logcontext import PreserveLoggingContext @@ -319,7 +319,8 @@ class BaseHandler(object): if member_event.type != EventTypes.Member: continue - if not self.hs.is_mine(UserID.from_string(member_event.state_key)): + target_user = UserID.from_string(member_event.state_key) + if not self.hs.is_mine(target_user): continue if member_event.content["membership"] not in { @@ -341,18 +342,13 @@ class BaseHandler(object): # and having homeservers have their own users leave keeps more # of that decision-making and control local to the guest-having # homeserver. - message_handler = self.hs.get_handlers().message_handler - yield message_handler.create_and_send_event( - { - "type": EventTypes.Member, - "state_key": member_event.state_key, - "content": { - "membership": Membership.LEAVE, - "kind": "guest" - }, - "room_id": member_event.room_id, - "sender": member_event.state_key - }, + requester = Requester(target_user, "", True) + handler = self.hs.get_handlers().room_member_handler + yield handler.update_membership( + requester, + target_user, + member_event.room_id, + "leave", ratelimit=False, ) except Exception as e: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 04916d4e24..8c8bacf5dd 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -404,7 +404,14 @@ class RoomMemberHandler(BaseHandler): @defer.inlineCallbacks def update_membership( - self, requester, target, room_id, action, txn_id=None, room_hosts=None + self, + requester, + target, + room_id, + action, + txn_id=None, + room_hosts=None, + ratelimit=True, ): effective_membership_state = action if action in ["kick", "unban"]: @@ -451,7 +458,7 @@ class RoomMemberHandler(BaseHandler): yield msg_handler.send_event( event, context, - ratelimit=True, + ratelimit=ratelimit, is_guest=requester.is_guest, room_hosts=room_hosts, ) From 8168341e9bf8aa3c29b3fde9af12df0f1839baf1 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 15 Feb 2016 17:14:34 +0000 Subject: [PATCH 165/349] Use update_membership for profile updates --- synapse/handlers/profile.py | 28 ++++++++++++---------------- synapse/handlers/room.py | 4 ---- 2 files changed, 12 insertions(+), 20 deletions(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 629e6e3594..2850db4a1f 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -16,8 +16,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, AuthError, CodeMessageException -from synapse.api.constants import EventTypes, Membership -from synapse.types import UserID +from synapse.types import UserID, Requester from synapse.util import unwrapFirstError from ._base import BaseHandler @@ -208,21 +207,18 @@ class ProfileHandler(BaseHandler): ) for j in joins: - content = { - "membership": Membership.JOIN, - } - - yield collect_presencelike_data(self.distributor, user, content) - - msg_handler = self.hs.get_handlers().message_handler + handler = self.hs.get_handlers().room_member_handler try: - yield msg_handler.create_and_send_event({ - "type": EventTypes.Member, - "room_id": j.room_id, - "state_key": user.to_string(), - "content": content, - "sender": user.to_string() - }, ratelimit=False) + # Assume the user isn't a guest because we don't let guests set + # profile or avatar data. + requester = Requester(user, "", False) + yield handler.update_membership( + requester, + user, + j.room_id, + "join", # We treat a profile update like a join. + ratelimit=False, + ) except Exception as e: logger.warn( "Failed to update join event for room %s - %s", diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 8c8bacf5dd..505fb383ec 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -41,10 +41,6 @@ logger = logging.getLogger(__name__) id_server_scheme = "https://" -def collect_presencelike_data(distributor, user, content): - return distributor.fire("collect_presencelike_data", user, content) - - def user_left_room(distributor, user, room_id): return preserve_context_over_fn( distributor.fire, From e560045cfd73e2dbfa6a272fc298dab820e6e943 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 15 Feb 2016 18:13:10 +0000 Subject: [PATCH 166/349] Simplify room creation code --- synapse/handlers/room.py | 68 ++++++++++++++-------------------- synapse/rest/client/v1/room.py | 18 ++------- 2 files changed, 31 insertions(+), 55 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b2de2cd0c0..8e3c86d3a7 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -81,20 +81,20 @@ class RoomCreationHandler(BaseHandler): } @defer.inlineCallbacks - def create_room(self, user_id, room_id, config): + def create_room(self, requester, config): """ Creates a new room. Args: - user_id (str): The ID of the user creating the new room. - room_id (str): The proposed ID for the new room. Can be None, in - which case one will be created for you. + requester (Requester): The user who requested the room creation. config (dict) : A dict of configuration options. Returns: The new room ID. Raises: - SynapseError if the room ID was taken, couldn't be stored, or - something went horribly wrong. + SynapseError if the room ID couldn't be stored, or something went + horribly wrong. """ + user_id = requester.user.to_string() + self.ratelimit(user_id) if "room_alias_name" in config: @@ -126,40 +126,28 @@ class RoomCreationHandler(BaseHandler): is_public = config.get("visibility", None) == "public" - if room_id: - # Ensure room_id is the correct type - room_id_obj = RoomID.from_string(room_id) - if not self.hs.is_mine(room_id_obj): - raise SynapseError(400, "Room id must be local") - - yield self.store.store_room( - room_id=room_id, - room_creator_user_id=user_id, - is_public=is_public - ) - else: - # autogen room IDs and try to create it. We may clash, so just - # try a few times till one goes through, giving up eventually. - attempts = 0 - room_id = None - while attempts < 5: - try: - random_string = stringutils.random_string(18) - gen_room_id = RoomID.create( - random_string, - self.hs.hostname, - ) - yield self.store.store_room( - room_id=gen_room_id.to_string(), - room_creator_user_id=user_id, - is_public=is_public - ) - room_id = gen_room_id.to_string() - break - except StoreError: - attempts += 1 - if not room_id: - raise StoreError(500, "Couldn't generate a room ID.") + # autogen room IDs and try to create it. We may clash, so just + # try a few times till one goes through, giving up eventually. + attempts = 0 + room_id = None + while attempts < 5: + try: + random_string = stringutils.random_string(18) + gen_room_id = RoomID.create( + random_string, + self.hs.hostname, + ) + yield self.store.store_room( + room_id=gen_room_id.to_string(), + room_creator_user_id=user_id, + is_public=is_public + ) + room_id = gen_room_id.to_string() + break + except StoreError: + attempts += 1 + if not room_id: + raise StoreError(500, "Couldn't generate a room ID.") if room_alias: directory_handler = self.hs.get_handlers().directory_handler diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 81bfe377bd..d3c1b359a2 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -63,24 +63,12 @@ class RoomCreateRestServlet(ClientV1RestServlet): def on_POST(self, request): requester = yield self.auth.get_user_by_req(request) - room_config = self.get_room_config(request) - info = yield self.make_room( - room_config, - requester.user, - None, - ) - room_config.update(info) - defer.returnValue((200, info)) - - @defer.inlineCallbacks - def make_room(self, room_config, auth_user, room_id): handler = self.handlers.room_creation_handler info = yield handler.create_room( - user_id=auth_user.to_string(), - room_id=room_id, - config=room_config + requester, self.get_room_config(request) ) - defer.returnValue(info) + + defer.returnValue((200, info)) def get_room_config(self, request): try: From 1a2197d7bf62437208643f750ee757b8b85e2db6 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 15 Feb 2016 18:13:10 +0000 Subject: [PATCH 167/349] Simplify room creation code --- synapse/handlers/room.py | 62 ++++++++++++++-------------------- synapse/rest/client/v1/room.py | 18 ++-------- 2 files changed, 28 insertions(+), 52 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 505fb383ec..bdaa05e0b6 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -76,13 +76,11 @@ class RoomCreationHandler(BaseHandler): } @defer.inlineCallbacks - def create_room(self, user_id, room_id, config): + def create_room(self, requester, config): """ Creates a new room. Args: user_id (str): The ID of the user creating the new room. - room_id (str): The proposed ID for the new room. Can be None, in - which case one will be created for you. config (dict) : A dict of configuration options. Returns: The new room ID. @@ -90,6 +88,8 @@ class RoomCreationHandler(BaseHandler): SynapseError if the room ID was taken, couldn't be stored, or something went horribly wrong. """ + user_id = requester.user.to_string() + self.ratelimit(user_id) if "room_alias_name" in config: @@ -121,40 +121,28 @@ class RoomCreationHandler(BaseHandler): is_public = config.get("visibility", None) == "public" - if room_id: - # Ensure room_id is the correct type - room_id_obj = RoomID.from_string(room_id) - if not self.hs.is_mine(room_id_obj): - raise SynapseError(400, "Room id must be local") - - yield self.store.store_room( - room_id=room_id, - room_creator_user_id=user_id, - is_public=is_public - ) - else: - # autogen room IDs and try to create it. We may clash, so just - # try a few times till one goes through, giving up eventually. - attempts = 0 - room_id = None - while attempts < 5: - try: - random_string = stringutils.random_string(18) - gen_room_id = RoomID.create( - random_string, - self.hs.hostname, - ) - yield self.store.store_room( - room_id=gen_room_id.to_string(), - room_creator_user_id=user_id, - is_public=is_public - ) - room_id = gen_room_id.to_string() - break - except StoreError: - attempts += 1 - if not room_id: - raise StoreError(500, "Couldn't generate a room ID.") + # autogen room IDs and try to create it. We may clash, so just + # try a few times till one goes through, giving up eventually. + attempts = 0 + room_id = None + while attempts < 5: + try: + random_string = stringutils.random_string(18) + gen_room_id = RoomID.create( + random_string, + self.hs.hostname, + ) + yield self.store.store_room( + room_id=gen_room_id.to_string(), + room_creator_user_id=user_id, + is_public=is_public + ) + room_id = gen_room_id.to_string() + break + except StoreError: + attempts += 1 + if not room_id: + raise StoreError(500, "Couldn't generate a room ID.") if room_alias: directory_handler = self.hs.get_handlers().directory_handler diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index f8cd746a88..5f5c26a91c 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -63,24 +63,12 @@ class RoomCreateRestServlet(ClientV1RestServlet): def on_POST(self, request): requester = yield self.auth.get_user_by_req(request) - room_config = self.get_room_config(request) - info = yield self.make_room( - room_config, - requester.user, - None, - ) - room_config.update(info) - defer.returnValue((200, info)) - - @defer.inlineCallbacks - def make_room(self, room_config, auth_user, room_id): handler = self.handlers.room_creation_handler info = yield handler.create_room( - user_id=auth_user.to_string(), - room_id=room_id, - config=room_config + requester, self.get_room_config(request) ) - defer.returnValue(info) + + defer.returnValue((200, info)) def get_room_config(self, request): try: From 4bfb32f685cff919141a3fc0cd9179447febc765 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Mon, 15 Feb 2016 18:21:30 +0000 Subject: [PATCH 168/349] Branch off member and non member sends Unclean, needs tidy-up, but works --- synapse/handlers/directory.py | 2 +- synapse/handlers/federation.py | 4 +- synapse/handlers/message.py | 66 ++++++++++++++-------------- synapse/handlers/room.py | 80 +++++++++++++++++++--------------- synapse/rest/client/v1/room.py | 21 ++++++--- 5 files changed, 99 insertions(+), 74 deletions(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 4efecb1ffd..e0a778e7ff 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -216,7 +216,7 @@ class DirectoryHandler(BaseHandler): aliases = yield self.store.get_aliases_for_room(room_id) msg_handler = self.hs.get_handlers().message_handler - yield msg_handler.create_and_send_event({ + yield msg_handler.create_and_send_nonmember_event({ "type": EventTypes.Aliases, "state_key": self.hs.hostname, "room_id": room_id, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index da55d43541..ac15f9e5dd 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1658,7 +1658,7 @@ class FederationHandler(BaseHandler): self.auth.check(event, context.current_state) yield self._validate_keyserver(event, auth_events=context.current_state) member_handler = self.hs.get_handlers().room_member_handler - yield member_handler.send_membership_event(event, context) + yield member_handler.send_membership_event(event, context, from_client=False) else: destinations = set([x.split(":", 1)[-1] for x in (sender, room_id)]) yield self.replication_layer.forward_third_party_invite( @@ -1687,7 +1687,7 @@ class FederationHandler(BaseHandler): # TODO: Make sure the signatures actually are correct. event.signatures.update(returned_invite.signatures) member_handler = self.hs.get_handlers().room_member_handler - yield member_handler.send_membership_event(event, context) + yield member_handler.send_membership_event(event, context, from_client=False) @defer.inlineCallbacks def add_display_name_to_third_party_invite(self, event_dict, event, context): diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index a94fad1735..05dab172b8 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import AuthError, Codes +from synapse.api.errors import AuthError, Codes, SynapseError from synapse.streams.config import PaginationConfig from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator @@ -216,7 +216,7 @@ class MessageHandler(BaseHandler): defer.returnValue((event, context)) @defer.inlineCallbacks - def send_event(self, event, context, ratelimit=True, is_guest=False, room_hosts=None): + def send_nonmember_event(self, event, context, ratelimit=True): """ Persists and notifies local clients and federation of an event. @@ -226,61 +226,63 @@ class MessageHandler(BaseHandler): ratelimit (bool): Whether to rate limit this send. is_guest (bool): Whether the sender is a guest. """ + if event.type == EventTypes.Member: + raise SynapseError( + 500, + "Tried to send member even through non-member codepath" + ) + user = UserID.from_string(event.sender) assert self.hs.is_mine(user), "User must be our own: %s" % (user,) if event.is_state(): - prev_state = context.current_state.get((event.type, event.state_key)) - if prev_state and event.user_id == prev_state.user_id: - prev_content = encode_canonical_json(prev_state.content) - next_content = encode_canonical_json(event.content) - if prev_content == next_content: - # Duplicate suppression for state updates with same sender - # and content. - defer.returnValue(prev_state) + prev_state = self.deduplicate_state_event(event, context) + if prev_state is not None: + defer.returnValue(prev_state) - if event.type == EventTypes.Member: - member_handler = self.hs.get_handlers().room_member_handler - yield member_handler.send_membership_event( - event, - context, - is_guest=is_guest, - ratelimit=ratelimit, - room_hosts=room_hosts - ) - else: - yield self.handle_new_client_event( - event=event, - context=context, - ratelimit=ratelimit, - ) + yield self.handle_new_client_event( + event=event, + context=context, + ratelimit=ratelimit, + ) if event.type == EventTypes.Message: presence = self.hs.get_handlers().presence_handler with PreserveLoggingContext(): presence.bump_presence_active_time(user) + def deduplicate_state_event(self, event, context): + prev_state = context.current_state.get((event.type, event.state_key)) + if prev_state and event.user_id == prev_state.user_id: + prev_content = encode_canonical_json(prev_state.content) + next_content = encode_canonical_json(event.content) + if prev_content == next_content: + return prev_state + return None + @defer.inlineCallbacks - def create_and_send_event(self, event_dict, ratelimit=True, - token_id=None, txn_id=None, is_guest=False, - room_hosts=None): + def create_and_send_nonmember_event( + self, + event_dict, + ratelimit=True, + token_id=None, + txn_id=None + ): """ Creates an event, then sends it. - See self.create_event and self.send_event. + See self.create_event and self.send_nonmember_event. """ event, context = yield self.create_event( event_dict, token_id=token_id, txn_id=txn_id ) - yield self.send_event( + yield self.send_nonmember_event( event, context, ratelimit=ratelimit, - is_guest=is_guest, - room_hosts=room_hosts, ) defer.returnValue(event) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index bdaa05e0b6..5d4e87b3b0 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -179,13 +179,24 @@ class RoomCreationHandler(BaseHandler): ) msg_handler = self.hs.get_handlers().message_handler + room_member_handler = self.hs.get_handlers().room_member_handler for event in creation_events: - yield msg_handler.create_and_send_event(event, ratelimit=False) + if event["type"] == EventTypes.Member: + # TODO(danielwh): This is hideous + yield room_member_handler.update_membership( + requester, + user, + room_id, + "join", + ratelimit=False, + ) + else: + yield msg_handler.create_and_send_nonmember_event(event, ratelimit=False) if "name" in config: name = config["name"] - yield msg_handler.create_and_send_event({ + yield msg_handler.create_and_send_nonmember_event({ "type": EventTypes.Name, "room_id": room_id, "sender": user_id, @@ -195,7 +206,7 @@ class RoomCreationHandler(BaseHandler): if "topic" in config: topic = config["topic"] - yield msg_handler.create_and_send_event({ + yield msg_handler.create_and_send_nonmember_event({ "type": EventTypes.Topic, "room_id": room_id, "sender": user_id, @@ -204,13 +215,13 @@ class RoomCreationHandler(BaseHandler): }, ratelimit=False) for invitee in invite_list: - yield msg_handler.create_and_send_event({ - "type": EventTypes.Member, - "state_key": invitee, - "room_id": room_id, - "sender": user_id, - "content": {"membership": Membership.INVITE}, - }, ratelimit=False) + room_member_handler.update_membership( + requester, + UserID.from_string(invitee), + room_id, + "invite", + ratelimit=False, + ) for invite_3pid in invite_3pid_list: id_server = invite_3pid["id_server"] @@ -222,7 +233,7 @@ class RoomCreationHandler(BaseHandler): medium, address, id_server, - token_id=None, + requester, txn_id=None, ) @@ -439,12 +450,14 @@ class RoomMemberHandler(BaseHandler): errcode=Codes.BAD_STATE ) - yield msg_handler.send_event( + member_handler = self.hs.get_handlers().room_member_handler + yield member_handler.send_membership_event( event, context, - ratelimit=ratelimit, is_guest=requester.is_guest, + ratelimit=ratelimit, room_hosts=room_hosts, + from_client=True, ) if action == "forget": @@ -452,7 +465,7 @@ class RoomMemberHandler(BaseHandler): @defer.inlineCallbacks def send_membership_event( - self, event, context, is_guest=False, room_hosts=None, ratelimit=True + self, event, context, is_guest=False, room_hosts=None, ratelimit=True, from_client=True, ): """ Change the membership status of a user in a room. @@ -461,6 +474,16 @@ class RoomMemberHandler(BaseHandler): Raises: SynapseError if there was a problem changing the membership. """ + if from_client: + user = UserID.from_string(event.sender) + + assert self.hs.is_mine(user), "User must be our own: %s" % (user,) + + if event.is_state(): + prev_state = self.hs.get_handlers().message_handler.deduplicate_state_event(event, context) + if prev_state is not None: + return + target_user_id = event.state_key target_user = UserID.from_string(event.state_key) @@ -549,13 +572,11 @@ class RoomMemberHandler(BaseHandler): room_id, event.user_id ) - defer.returnValue({"room_id": room_id}) return # FIXME: This isn't idempotency. if prev_state and prev_state.membership == event.membership: # double same action, treat this event as a NOOP. - defer.returnValue({}) return yield self.handle_new_client_event( @@ -569,8 +590,6 @@ class RoomMemberHandler(BaseHandler): user = UserID.from_string(event.user_id) user_left_room(self.distributor, user, event.room_id) - defer.returnValue({"room_id": room_id}) - @defer.inlineCallbacks def lookup_room_alias(self, room_alias): """ @@ -657,7 +676,7 @@ class RoomMemberHandler(BaseHandler): medium, address, id_server, - token_id, + requester, txn_id ): invitee = yield self._lookup_3pid( @@ -665,19 +684,12 @@ class RoomMemberHandler(BaseHandler): ) if invitee: - # make sure it looks like a user ID; it'll throw if it's invalid. - UserID.from_string(invitee) - yield self.hs.get_handlers().message_handler.create_and_send_event( - { - "type": EventTypes.Member, - "content": { - "membership": unicode("invite") - }, - "room_id": room_id, - "sender": inviter.to_string(), - "state_key": invitee, - }, - token_id=token_id, + handler = self.hs.get_handlers().room_member_handler + yield handler.update_membership( + requester, + UserID.from_string(invitee), + room_id, + "invite", txn_id=txn_id, ) else: @@ -687,7 +699,7 @@ class RoomMemberHandler(BaseHandler): address, room_id, inviter, - token_id, + requester.access_token_id, txn_id=txn_id ) @@ -798,7 +810,7 @@ class RoomMemberHandler(BaseHandler): ) ) msg_handler = self.hs.get_handlers().message_handler - yield msg_handler.create_and_send_event( + yield msg_handler.create_and_send_nonmember_event( { "type": EventTypes.ThirdPartyInvite, "content": { diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 5f5c26a91c..179fe9a010 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -150,10 +150,21 @@ class RoomStateEventRestServlet(ClientV1RestServlet): event_dict["state_key"] = state_key msg_handler = self.handlers.message_handler - yield msg_handler.create_and_send_event( - event_dict, token_id=requester.access_token_id, txn_id=txn_id, + event, context = yield msg_handler.create_event( + event_dict, + token_id=requester.access_token_id, + txn_id=txn_id, ) + if event_type == EventTypes.Member: + yield self.handlers.room_member_handler.send_membership_event( + event, + context, + is_guest=requester.is_guest, + ) + else: + yield msg_handler.send_nonmember_event(event, context) + defer.returnValue((200, {})) @@ -171,7 +182,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet): content = _parse_json(request) msg_handler = self.handlers.message_handler - event = yield msg_handler.create_and_send_event( + event = yield msg_handler.create_and_send_nonmember_event( { "type": event_type, "content": content, @@ -434,7 +445,7 @@ class RoomMembershipRestServlet(ClientV1RestServlet): content["medium"], content["address"], content["id_server"], - requester.access_token_id, + requester, txn_id ) defer.returnValue((200, {})) @@ -490,7 +501,7 @@ class RoomRedactEventRestServlet(ClientV1RestServlet): content = _parse_json(request) msg_handler = self.handlers.message_handler - event = yield msg_handler.create_and_send_event( + event = yield msg_handler.create_and_send_nonmember_event( { "type": EventTypes.Redaction, "content": content, From 04686df17ae26f86484965365d12039161d8ee2d Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 16 Feb 2016 11:52:46 +0000 Subject: [PATCH 169/349] Add comment --- synapse/handlers/profile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 2850db4a1f..f3e73d926e 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -217,7 +217,7 @@ class ProfileHandler(BaseHandler): user, j.room_id, "join", # We treat a profile update like a join. - ratelimit=False, + ratelimit=False, # Try to hide that these events aren't atomic. ) except Exception as e: logger.warn( From 1f403325acfe355125eeb4e7d7f9f04bf9fb807e Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 16 Feb 2016 12:00:50 +0000 Subject: [PATCH 170/349] Tidy? up room creation event sending --- synapse/handlers/room.py | 135 +++++++++++++++++++++------------------ 1 file changed, 72 insertions(+), 63 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 722dadde77..e384370d7e 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -168,9 +168,14 @@ class RoomCreationHandler(BaseHandler): creation_content = config.get("creation_content", {}) - user = UserID.from_string(user_id) - creation_events = self._create_events_for_new_room( - user, room_id, + msg_handler = self.hs.get_handlers().message_handler + room_member_handler = self.hs.get_handlers().room_member_handler + + yield self._send_events_for_new_room( + requester, + room_id, + msg_handler, + room_member_handler, preset_config=preset_config, invite_list=invite_list, initial_state=initial_state, @@ -178,22 +183,6 @@ class RoomCreationHandler(BaseHandler): room_alias=room_alias, ) - msg_handler = self.hs.get_handlers().message_handler - room_member_handler = self.hs.get_handlers().room_member_handler - - for event in creation_events: - if event["type"] == EventTypes.Member: - # TODO(danielwh): This is hideous - yield room_member_handler.update_membership( - requester, - user, - room_id, - "join", - ratelimit=False, - ) - else: - yield msg_handler.create_and_send_nonmember_event(event, ratelimit=False) - if "name" in config: name = config["name"] yield msg_handler.create_and_send_nonmember_event({ @@ -229,7 +218,7 @@ class RoomCreationHandler(BaseHandler): medium = invite_3pid["medium"] yield self.hs.get_handlers().room_member_handler.do_3pid_invite( room_id, - user, + requester.user, medium, address, id_server, @@ -247,19 +236,19 @@ class RoomCreationHandler(BaseHandler): defer.returnValue(result) - def _create_events_for_new_room(self, creator, room_id, preset_config, - invite_list, initial_state, creation_content, - room_alias): - config = RoomCreationHandler.PRESETS_DICT[preset_config] - - creator_id = creator.to_string() - - event_keys = { - "room_id": room_id, - "sender": creator_id, - "state_key": "", - } - + @defer.inlineCallbacks + def _send_events_for_new_room( + self, + creator, # A Requester object. + room_id, + msg_handler, + room_member_handler, + preset_config, + invite_list, + initial_state, + creation_content, + room_alias + ): def create(etype, content, **kwargs): e = { "type": etype, @@ -271,26 +260,39 @@ class RoomCreationHandler(BaseHandler): return e - creation_content.update({"creator": creator.to_string()}) - creation_event = create( + @defer.inlineCallbacks + def send(etype, content, **kwargs): + event = create(etype, content, **kwargs) + yield msg_handler.create_and_send_nonmember_event(event, ratelimit=False) + + config = RoomCreationHandler.PRESETS_DICT[preset_config] + + creator_id = creator.user.to_string() + + event_keys = { + "room_id": room_id, + "sender": creator_id, + "state_key": "", + } + + creation_content.update({"creator": creator_id}) + yield send( etype=EventTypes.Create, content=creation_content, ) - join_event = create( - etype=EventTypes.Member, - state_key=creator_id, - content={ - "membership": Membership.JOIN, - }, + yield room_member_handler.update_membership( + creator, + creator.user, + room_id, + "join", + ratelimit=False, ) - returned_events = [creation_event, join_event] - if (EventTypes.PowerLevels, '') not in initial_state: power_level_content = { "users": { - creator.to_string(): 100, + creator_id: 100, }, "users_default": 0, "events": { @@ -312,45 +314,35 @@ class RoomCreationHandler(BaseHandler): for invitee in invite_list: power_level_content["users"][invitee] = 100 - power_levels_event = create( + yield send( etype=EventTypes.PowerLevels, content=power_level_content, ) - returned_events.append(power_levels_event) - if room_alias and (EventTypes.CanonicalAlias, '') not in initial_state: - room_alias_event = create( + yield send( etype=EventTypes.CanonicalAlias, content={"alias": room_alias.to_string()}, ) - returned_events.append(room_alias_event) - if (EventTypes.JoinRules, '') not in initial_state: - join_rules_event = create( + yield send( etype=EventTypes.JoinRules, content={"join_rule": config["join_rules"]}, ) - returned_events.append(join_rules_event) - if (EventTypes.RoomHistoryVisibility, '') not in initial_state: - history_event = create( + yield send( etype=EventTypes.RoomHistoryVisibility, content={"history_visibility": config["history_visibility"]} ) - returned_events.append(history_event) - for (etype, state_key), content in initial_state.items(): - returned_events.append(create( + yield send( etype=etype, state_key=state_key, content=content, - )) - - return returned_events + ) class RoomMemberHandler(BaseHandler): @@ -465,12 +457,28 @@ class RoomMemberHandler(BaseHandler): @defer.inlineCallbacks def send_membership_event( - self, event, context, is_guest=False, room_hosts=None, ratelimit=True, from_client=True, + self, + event, + context, + is_guest=False, + room_hosts=None, + ratelimit=True, + from_client=True, ): """ Change the membership status of a user in a room. Args: - event (SynapseEvent): The membership event + event (SynapseEvent): The membership event. + context: The context of the event. + is_guest (bool): Whether the sender is a guest. + room_hosts ([str]): Homeservers which are likely to already be in + the room, and could be danced with in order to join this + homeserver for the first time. + ratelimit (bool): Whether to rate limit this request. + from_client (bool): Whether this request is the result of a local + client request (rather than over federation). If so, we will + perform extra checks, like that this homeserver can act as this + client. Raises: SynapseError if there was a problem changing the membership. """ @@ -480,7 +488,8 @@ class RoomMemberHandler(BaseHandler): assert self.hs.is_mine(user), "User must be our own: %s" % (user,) if event.is_state(): - prev_state = self.hs.get_handlers().message_handler.deduplicate_state_event(event, context) + message_handler = self.hs.get_handlers().message_handler + prev_state = message_handler.deduplicate_state_event(event, context) if prev_state is not None: return From d1fb790818e0b93019850ca5db3ffba2baddc745 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 16 Feb 2016 14:25:23 +0000 Subject: [PATCH 171/349] Some cleanup --- synapse/handlers/message.py | 14 +++++++++---- synapse/handlers/room.py | 39 +++++++++++++++++++------------------ 2 files changed, 30 insertions(+), 23 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 05dab172b8..9c3471f2e3 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -253,12 +253,18 @@ class MessageHandler(BaseHandler): presence.bump_presence_active_time(user) def deduplicate_state_event(self, event, context): - prev_state = context.current_state.get((event.type, event.state_key)) - if prev_state and event.user_id == prev_state.user_id: - prev_content = encode_canonical_json(prev_state.content) + """ + Checks whether event is in the latest resolved state in context. + + If so, returns the version of the event in context. + Otherwise, returns None. + """ + prev_event = context.current_state.get((event.type, event.state_key)) + if prev_event and event.user_id == prev_event.user_id: + prev_content = encode_canonical_json(prev_event.content) next_content = encode_canonical_json(event.content) if prev_content == next_content: - return prev_state + return prev_event return None @defer.inlineCallbacks diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index e384370d7e..b7ea321a9b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -493,35 +493,24 @@ class RoomMemberHandler(BaseHandler): if prev_state is not None: return - target_user_id = event.state_key target_user = UserID.from_string(event.state_key) prev_state = context.current_state.get( - (EventTypes.Member, target_user_id), + (EventTypes.Member, target_user.to_string()), None ) room_id = event.room_id - # If we're trying to join a room then we have to do this differently - # if this HS is not currently in the room, i.e. we have to do the - # invite/join dance. if event.membership == Membership.JOIN: - if is_guest: - guest_access = context.current_state.get( - (EventTypes.GuestAccess, ""), - None - ) - is_guest_access_allowed = ( - guest_access - and guest_access.content - and "guest_access" in guest_access.content - and guest_access.content["guest_access"] == "can_join" - ) - if not is_guest_access_allowed: - raise AuthError(403, "Guest access not allowed") + if is_guest and not self._can_guest_join(context.current_state): + # This should be an auth check, but guests are a local concept, + # so don't really fit into the general auth process. + raise AuthError(403, "Guest access not allowed") - room_id = event.room_id + # If we're trying to join a room then we have to do this differently + # if this HS is not currently in the room, i.e. we have to do the + # invite/join dance. # XXX: We don't do an auth check if we are doing an invite # join dance for now, since we're kinda implicitly checking @@ -599,6 +588,18 @@ class RoomMemberHandler(BaseHandler): user = UserID.from_string(event.user_id) user_left_room(self.distributor, user, event.room_id) + def _can_guest_join(self, current_state): + """ + Returns whether a guest can join a room based on its current state. + """ + guest_access = current_state.get((EventTypes.GuestAccess, ""), None) + return ( + guest_access + and guest_access.content + and "guest_access" in guest_access.content + and guest_access.content["guest_access"] == "can_join" + ) + @defer.inlineCallbacks def lookup_room_alias(self, room_alias): """ From a9c9868957277408c7ae3956d73ff87964692b73 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 16 Feb 2016 15:53:38 +0000 Subject: [PATCH 172/349] Make adding push rules idempotent Also remove the **kwargs from the add_push_rule method. Fixes https://matrix.org/jira/browse/SYN-391 --- synapse/storage/push_rule.py | 168 ++++++++++++++++++----------------- 1 file changed, 86 insertions(+), 82 deletions(-) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index f9a48171ba..e19a81e41f 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -99,38 +99,36 @@ class PushRuleStore(SQLBaseStore): results.setdefault(row['user_name'], {})[row['rule_id']] = row['enabled'] defer.returnValue(results) - @defer.inlineCallbacks - def add_push_rule(self, before, after, **kwargs): - vals = kwargs - if 'conditions' in vals: - vals['conditions'] = json.dumps(vals['conditions']) - if 'actions' in vals: - vals['actions'] = json.dumps(vals['actions']) - - # we could check the rest of the keys are valid column names - # but sqlite will do that anyway so I think it's just pointless. - vals.pop("id", None) + def add_push_rule( + self, user_id, rule_id, priority_class, conditions, actions, + before=None, after=None + ): + conditions_json = json.dumps(conditions) + actions_json = json.dumps(actions) if before or after: - ret = yield self.runInteraction( + return self.runInteraction( "_add_push_rule_relative_txn", self._add_push_rule_relative_txn, - before=before, - after=after, - **vals + user_id, rule_id, priority_class, + conditions_json, actions_json, before, after, ) - defer.returnValue(ret) else: - ret = yield self.runInteraction( + return self.runInteraction( "_add_push_rule_highest_priority_txn", self._add_push_rule_highest_priority_txn, - **vals + user_id, rule_id, priority_class, + conditions_json, actions_json, ) - defer.returnValue(ret) - def _add_push_rule_relative_txn(self, txn, user_id, **kwargs): - after = kwargs.pop("after", None) - before = kwargs.pop("before", None) + def _add_push_rule_relative_txn( + self, txn, user_id, rule_id, priority_class, + conditions_json, actions_json, before, after + ): + # Lock the table since otherwise we'll have annoying races between the + # SELECT here and the UPSERT below. + self.database_engine.lock_table(txn, "push_rules") + relative_to_rule = before or after res = self._simple_select_one_txn( @@ -149,69 +147,45 @@ class PushRuleStore(SQLBaseStore): "before/after rule not found: %s" % (relative_to_rule,) ) - priority_class = res["priority_class"] + base_priority_class = res["priority_class"] base_rule_priority = res["priority"] - if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class: + if base_priority_class != priority_class: raise InconsistentRuleException( "Given priority class does not match class of relative rule" ) - new_rule = kwargs - new_rule.pop("before", None) - new_rule.pop("after", None) - new_rule['priority_class'] = priority_class - new_rule['user_name'] = user_id - new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn) - - # check if the priority before/after is free - new_rule_priority = base_rule_priority - if after: - new_rule_priority -= 1 + if before: + # Higher priority rules are executed first, So adding a rule before + # a rule means giving it a higher priority than that rule. + new_rule_priority = base_rule_priority + 1 else: - new_rule_priority += 1 - - new_rule['priority'] = new_rule_priority + # We increment the priority of the existing rules to make space for + # the new rule. Therefore if we want this rule to appear after + # an existing rule we give it the priority of the existing rule, + # and then increment the priority of the existing rule. + new_rule_priority = base_rule_priority sql = ( - "SELECT COUNT(*) FROM push_rules" - " WHERE user_name = ? AND priority_class = ? AND priority = ?" + "UPDATE push_rules SET priority = priority + 1" + " WHERE user_name = ? AND priority_class = ? AND priority >= ?" ) + txn.execute(sql, (user_id, priority_class, new_rule_priority)) - res = txn.fetchall() - num_conflicting = res[0][0] - # if there are conflicting rules, bump everything - if num_conflicting: - sql = "UPDATE push_rules SET priority = priority " - if after: - sql += "-1" - else: - sql += "+1" - sql += " WHERE user_name = ? AND priority_class = ? AND priority " - if after: - sql += "<= ?" - else: - sql += ">= ?" - - txn.execute(sql, (user_id, priority_class, new_rule_priority)) - - txn.call_after( - self.get_push_rules_for_user.invalidate, (user_id,) + self._upsert_push_rule_txn( + txn, user_id, rule_id, priority_class, new_rule_priority, + conditions_json, actions_json, ) - txn.call_after( - self.get_push_rules_enabled_for_user.invalidate, (user_id,) - ) + def _add_push_rule_highest_priority_txn( + self, txn, user_id, rule_id, priority_class, + conditions_json, actions_json + ): + # Lock the table since otherwise we'll have annoying races between the + # SELECT here and the UPSERT below. + self.database_engine.lock_table(txn, "push_rules") - self._simple_insert_txn( - txn, - table="push_rules", - values=new_rule, - ) - - def _add_push_rule_highest_priority_txn(self, txn, user_id, - priority_class, **kwargs): # find the highest priority rule in that class sql = ( "SELECT COUNT(*), MAX(priority) FROM push_rules" @@ -225,12 +199,48 @@ class PushRuleStore(SQLBaseStore): if how_many > 0: new_prio = highest_prio + 1 - # and insert the new rule - new_rule = kwargs - new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn) - new_rule['user_name'] = user_id - new_rule['priority_class'] = priority_class - new_rule['priority'] = new_prio + self._upsert_push_rule_txn( + txn, + user_id, rule_id, priority_class, new_prio, + conditions_json, actions_json, + ) + + def _upsert_push_rule_txn( + self, txn, user_id, rule_id, priority_class, + priority, conditions_json, actions_json + ): + """Specialised version of _simple_upsert_txn that picks a push_rule_id + using the _push_rule_id_gen if it needs to insert the rule. It assumes + that the "push_rules" table is locked""" + + sql = ( + "UPDATE push_rules" + " SET priority_class = ?, priority = ?, conditions = ?, actions = ?" + " WHERE user_name = ? AND rule_id = ?" + ) + + txn.execute(sql, ( + priority_class, priority, conditions_json, actions_json, + user_id, rule_id, + )) + + if txn.rowcount == 0: + # We didn't update a row with the given rule_id so insert one + push_rule_id = self._push_rule_id_gen.get_next_txn(txn) + + self._simple_insert_txn( + txn, + table="push_rules", + values={ + "id": push_rule_id, + "user_name": user_id, + "rule_id": rule_id, + "priority_class": priority_class, + "priority": priority, + "conditions": conditions_json, + "actions": actions_json, + }, + ) txn.call_after( self.get_push_rules_for_user.invalidate, (user_id,) @@ -239,12 +249,6 @@ class PushRuleStore(SQLBaseStore): self.get_push_rules_enabled_for_user.invalidate, (user_id,) ) - self._simple_insert_txn( - txn, - table="push_rules", - values=new_rule, - ) - @defer.inlineCallbacks def delete_push_rule(self, user_id, rule_id): """ From 458782bf67ef7c188af752b0f455d4a0f9f4cdd5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 16 Feb 2016 18:00:30 +0000 Subject: [PATCH 173/349] Fix typo in request validation for adding push rules. --- synapse/rest/client/v1/push_rule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 96633a176c..7766b8be1d 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -400,7 +400,7 @@ def _filter_ruleset_with_path(ruleset, path): def _priority_class_from_spec(spec): if spec['template'] not in PRIORITY_CLASS_MAP.keys(): - raise InvalidRuleException("Unknown template: %s" % (spec['kind'])) + raise InvalidRuleException("Unknown template: %s" % (spec['template'])) pc = PRIORITY_CLASS_MAP[spec['template']] if spec['scope'] == 'device': From 6605adf6699f6c00d219de763d9d14e5b38ddea2 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 16 Feb 2016 19:05:02 +0000 Subject: [PATCH 174/349] Some cleanup, some TODOs, more to do --- synapse/handlers/room.py | 132 ++++++++++++++++++--------------------- 1 file changed, 62 insertions(+), 70 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b7ea321a9b..d4bb21e69e 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -482,9 +482,9 @@ class RoomMemberHandler(BaseHandler): Raises: SynapseError if there was a problem changing the membership. """ - if from_client: - user = UserID.from_string(event.sender) + user = UserID.from_string(event.sender) + if from_client: assert self.hs.is_mine(user), "User must be our own: %s" % (user,) if event.is_state(): @@ -502,81 +502,59 @@ class RoomMemberHandler(BaseHandler): room_id = event.room_id + handled = False + if event.membership == Membership.JOIN: if is_guest and not self._can_guest_join(context.current_state): # This should be an auth check, but guests are a local concept, # so don't really fit into the general auth process. raise AuthError(403, "Guest access not allowed") - # If we're trying to join a room then we have to do this differently - # if this HS is not currently in the room, i.e. we have to do the - # invite/join dance. - - # XXX: We don't do an auth check if we are doing an invite - # join dance for now, since we're kinda implicitly checking - # that we are allowed to join when we decide whether or not we - # need to do the invite/join dance. - - is_host_in_room = yield self.is_host_in_room(room_id, context) - if is_host_in_room: - should_do_dance = False - elif room_hosts: # TODO: Shouldn't this be remote_room_host? - should_do_dance = True - else: - inviter = yield self.get_inviter(event) - if not inviter: - # return the same error as join_room_alias does - raise SynapseError(404, "No known servers") - should_do_dance = not self.hs.is_mine(inviter) - room_hosts = [inviter.domain] + should_do_dance, room_hosts = yield self._should_do_dance( + room_id, + context, + (yield self.get_inviter(target_user.to_string(), room_id)), + room_hosts, + ) if should_do_dance: - handler = self.hs.get_handlers().federation_handler - yield handler.do_invite_join( + if len(room_hosts) == 0: + # return the same error as join_room_alias does + raise SynapseError(404, "No known servers") + + # We don't do an auth check if we are doing an invite + # join dance for now, since we're kinda implicitly checking + # that we are allowed to join when we decide whether or not we + # need to do the invite/join dance. + yield self.hs.get_handlers().federation_handler.do_invite_join( room_hosts, room_id, event.user_id, event.content, ) - else: - logger.debug("Doing normal join") - - yield self.handle_new_client_event( - event, - context, - extra_users=[target_user], - ratelimit=ratelimit, + handled = True + if event.membership == Membership.LEAVE: + is_host_in_room = yield self.is_host_in_room(room_id, context) + if not is_host_in_room: + # Rejecting an invite, rather than leaving a joined room + handler = self.hs.get_handlers().federation_handler + inviter = yield self.get_inviter(target_user.to_string(), room_id) + if not inviter: + # return the same error as join_room_alias does + raise SynapseError(404, "No known servers") + yield handler.do_remotely_reject_invite( + [inviter.domain], + room_id, + event.user_id ) - - prev_state = context.current_state.get((event.type, event.state_key)) - if not prev_state or prev_state.membership != Membership.JOIN: - # Only fire user_joined_room if the user has acutally joined the - # room. Don't bother if the user is just changing their profile - # info. - user = UserID.from_string(event.user_id) - yield user_joined_room(self.distributor, user, room_id) - else: - if event.membership == Membership.LEAVE: - is_host_in_room = yield self.is_host_in_room(room_id, context) - if not is_host_in_room: - # Rejecting an invite, rather than leaving a joined room - handler = self.hs.get_handlers().federation_handler - inviter = yield self.get_inviter(event) - if not inviter: - # return the same error as join_room_alias does - raise SynapseError(404, "No known servers") - yield handler.do_remotely_reject_invite( - [inviter.domain], - room_id, - event.user_id - ) - return + handled = True # FIXME: This isn't idempotency. if prev_state and prev_state.membership == event.membership: # double same action, treat this event as a NOOP. return + if not handled: yield self.handle_new_client_event( event, context, @@ -584,9 +562,15 @@ class RoomMemberHandler(BaseHandler): ratelimit=ratelimit, ) + if event.membership == Membership.JOIN: + if not prev_state or prev_state.membership != Membership.JOIN: + # Only fire user_joined_room if the user has acutally joined the + # room. Don't bother if the user is just changing their profile + # info. + yield user_joined_room(self.distributor, target_user, room_id) + elif event.membership == Membership.LEAVE: if prev_state and prev_state.membership == Membership.JOIN: - user = UserID.from_string(event.user_id) - user_left_room(self.distributor, user, event.room_id) + user_left_room(self.distributor, target_user, room_id) def _can_guest_join(self, current_state): """ @@ -600,6 +584,21 @@ class RoomMemberHandler(BaseHandler): and guest_access.content["guest_access"] == "can_join" ) + @defer.inlineCallbacks + def _should_do_dance(self, room_id, context, inviter, room_hosts=None): + # TODO: Shouldn't this be remote_room_host? + room_hosts = room_hosts or [] + + # TODO(danielwh): This shouldn't need to yield for this check, we have a context. + is_host_in_room = yield self.is_host_in_room(room_id, context) + if is_host_in_room: + defer.returnValue((False, room_hosts)) + + if inviter and not self.hs.is_mine(inviter): + room_hosts.append(inviter.domain) + + defer.returnValue((True, room_hosts)) + @defer.inlineCallbacks def lookup_room_alias(self, room_alias): """ @@ -625,24 +624,17 @@ class RoomMemberHandler(BaseHandler): defer.returnValue((RoomID.from_string(room_id), hosts)) + # TODO(danielwh): This should use the context, rather than looking up the store. @defer.inlineCallbacks - def get_inviter(self, event): + def get_inviter(self, user_id, room_id): # TODO(markjh): get prev_state from snapshot prev_state = yield self.store.get_room_member( - event.user_id, event.room_id + user_id, room_id ) - if prev_state and prev_state.membership == Membership.INVITE: defer.returnValue(UserID.from_string(prev_state.user_id)) - return - elif "third_party_invite" in event.content: - if "sender" in event.content["third_party_invite"]: - inviter = UserID.from_string( - event.content["third_party_invite"]["sender"] - ) - defer.returnValue(inviter) - defer.returnValue(None) + # TODO(danielwh): This looks insane. Please make it not insane. @defer.inlineCallbacks def is_host_in_room(self, room_id, context): is_host_in_room = yield self.auth.check_host_in_room( From 71d5d2c669139305b829bdfdbd403a0b8a52b66f Mon Sep 17 00:00:00 2001 From: Patrik Oldsberg Date: Wed, 17 Feb 2016 11:52:30 +0100 Subject: [PATCH 175/349] client/v1/room: include event_id in response to state event PUT, in accordance with the spec Signed-off-by: Patrik Oldsberg --- synapse/rest/client/v1/room.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index d3c1b359a2..24706f9383 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -150,11 +150,11 @@ class RoomStateEventRestServlet(ClientV1RestServlet): event_dict["state_key"] = state_key msg_handler = self.handlers.message_handler - yield msg_handler.create_and_send_event( + event = yield msg_handler.create_and_send_event( event_dict, token_id=requester.access_token_id, txn_id=txn_id, ) - defer.returnValue((200, {})) + defer.returnValue((200, {"event_id": event.event_id})) # TODO: Needs unit testing for generic events + feedback From 536f949a1a0e531314e023436c22859b5114376d Mon Sep 17 00:00:00 2001 From: Patrik Oldsberg Date: Wed, 17 Feb 2016 10:53:06 +0100 Subject: [PATCH 176/349] api/filtering: don't assume that event content will always be a dict Signed-off-by: Patrik Oldsberg --- synapse/api/filtering.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 6eff83e5f8..cd699ef27f 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -198,7 +198,10 @@ class Filter(object): sender = event.get("sender", None) if not sender: # Presence events have their 'sender' in content.user_id - sender = event.get("content", {}).get("user_id", None) + content = event.get("content") + # account_data has been allowed to have non-dict content, so check type first + if isinstance(content, dict): + sender = content.get("user_id") return self.check_fields( event.get("room_id", None), From 9e7900da1ebe892104ec61e8d3d45c4d956bd323 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 12 Feb 2016 16:06:49 +0000 Subject: [PATCH 177/349] Add wheeltimer impl --- synapse/util/wheel_timer.py | 88 ++++++++++++++++++++++++++++++++++ tests/util/test_wheel_timer.py | 74 ++++++++++++++++++++++++++++ 2 files changed, 162 insertions(+) create mode 100644 synapse/util/wheel_timer.py create mode 100644 tests/util/test_wheel_timer.py diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py new file mode 100644 index 0000000000..b447b2456c --- /dev/null +++ b/synapse/util/wheel_timer.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class _Entry(object): + __slots__ = ["end_key", "queue"] + + def __init__(self, end_key): + self.end_key = end_key + self.queue = [] + + +class WheelTimer(object): + """Stores arbitrary objects that will be returned after their timers have + expired. + """ + + def __init__(self, bucket_size=5000): + """ + Args: + bucket_size (int): Size of buckets in ms. Corresponds roughly to the + accuracy of the timer. + """ + self.bucket_size = bucket_size + self.entries = [] + self.current_tick = 0 + + def insert(self, now, obj, then): + """Inserts object into timer. + + Args: + now (int): Current time in msec + obj (object): Object to be inserted + then (int): When to return the object strictly after. + """ + then_key = int(then / self.bucket_size) + 1 + for entry in self.entries: + # Add to first bucket we find. This should gracefully handle inserts + # for times in the past. + if entry.end_key >= then_key: + entry.queue.append(obj) + return + + next_key = int(now / self.bucket_size) + 1 + if self.entries: + last_key = self.entries[-1].end_key + else: + last_key = next_key + + # Handle the case when `then` is in the past and `entries` is empty. + then_key = max(last_key, then_key) + + # Add empty entries between the end of the current list and when we want + # to insert. This ensures there are no gaps. + self.entries.extend( + _Entry(key) for key in xrange(last_key, then_key + 1) + ) + + self.entries[-1].queue.append(obj) + + def fetch(self, now): + """Fetch any objects that have timed out + + Args: + now (ms): Current time in msec + + Returns: + list: List of objects that have timed out + """ + now_key = int(now / self.bucket_size) + + ret = [] + while self.entries and self.entries[0].end_key <= now_key: + ret.extend(self.entries.pop(0).queue) + + return ret diff --git a/tests/util/test_wheel_timer.py b/tests/util/test_wheel_timer.py new file mode 100644 index 0000000000..c44567e52e --- /dev/null +++ b/tests/util/test_wheel_timer.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .. import unittest + +from synapse.util.wheel_timer import WheelTimer + + +class WheelTimerTestCase(unittest.TestCase): + def test_single_insert_fetch(self): + wheel = WheelTimer(bucket_size=5) + + obj = object() + wheel.insert(100, obj, 150) + + self.assertListEqual(wheel.fetch(101), []) + self.assertListEqual(wheel.fetch(110), []) + self.assertListEqual(wheel.fetch(120), []) + self.assertListEqual(wheel.fetch(130), []) + self.assertListEqual(wheel.fetch(149), []) + self.assertListEqual(wheel.fetch(156), [obj]) + self.assertListEqual(wheel.fetch(170), []) + + def test_mutli_insert(self): + wheel = WheelTimer(bucket_size=5) + + obj1 = object() + obj2 = object() + obj3 = object() + wheel.insert(100, obj1, 150) + wheel.insert(105, obj2, 130) + wheel.insert(106, obj3, 160) + + self.assertListEqual(wheel.fetch(110), []) + self.assertListEqual(wheel.fetch(135), [obj2]) + self.assertListEqual(wheel.fetch(149), []) + self.assertListEqual(wheel.fetch(158), [obj1]) + self.assertListEqual(wheel.fetch(160), []) + self.assertListEqual(wheel.fetch(200), [obj3]) + self.assertListEqual(wheel.fetch(210), []) + + def test_insert_past(self): + wheel = WheelTimer(bucket_size=5) + + obj = object() + wheel.insert(100, obj, 50) + self.assertListEqual(wheel.fetch(120), [obj]) + + def test_insert_past_mutli(self): + wheel = WheelTimer(bucket_size=5) + + obj1 = object() + obj2 = object() + obj3 = object() + wheel.insert(100, obj1, 150) + wheel.insert(100, obj2, 140) + wheel.insert(100, obj3, 50) + self.assertListEqual(wheel.fetch(110), [obj3]) + self.assertListEqual(wheel.fetch(120), []) + self.assertListEqual(wheel.fetch(147), [obj2]) + self.assertListEqual(wheel.fetch(200), [obj1]) + self.assertListEqual(wheel.fetch(240), []) From a4e278bfe7972b367e7782102461881c65720c08 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Wed, 17 Feb 2016 15:25:12 +0000 Subject: [PATCH 178/349] Respond to federated invite with non-empty context Currently, we magically perform an extra database hit to find the inviter, and use this to guess where we should send the event. Instead, fill in a valid context, so that other callers relying on the context actually have one. --- synapse/handlers/_base.py | 51 ++++++++++++++++++++++++++-- synapse/handlers/room.py | 52 +++++++---------------------- synapse/storage/event_federation.py | 8 ++--- 3 files changed, 65 insertions(+), 46 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index cad37f50e7..41e153c934 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -147,7 +147,7 @@ class BaseHandler(object): @defer.inlineCallbacks def _create_new_client_event(self, builder): - latest_ret = yield self.store.get_latest_events_in_room( + latest_ret = yield self.store.get_latest_event_ids_and_hashes_in_room( builder.room_id, ) @@ -156,7 +156,10 @@ class BaseHandler(object): else: depth = 1 - prev_events = [(e, h) for e, h, _ in latest_ret] + prev_events = [ + (event_id, prev_hashes) + for event_id, prev_hashes, _ in latest_ret + ] builder.prev_events = prev_events builder.depth = depth @@ -165,6 +168,31 @@ class BaseHandler(object): context = yield state_handler.compute_event_context(builder) + # If we've received an invite over federation, there are no latest + # events in the room, because we don't know enough about the graph + # fragment we received to treat it like a graph, so the above returned + # no relevant events. It may have returned some events (if we have + # joined and left the room), but not useful ones, like the invite. So we + # forcibly set our context to the invite we received over federation. + if ( + not self.is_host_in_room(context.current_state) and + builder.type == EventTypes.Member + ): + prev_member_event = yield self.store.get_room_member( + builder.sender, builder.room_id + ) + if prev_member_event: + builder.prev_events = ( + prev_member_event.event_id, + prev_member_event.prev_events + ) + + context = yield state_handler.compute_event_context( + builder, + old_state=(prev_member_event,), + outlier=True + ) + if builder.is_state(): builder.prev_state = yield self.store.add_event_hashes( context.prev_state_events @@ -187,6 +215,25 @@ class BaseHandler(object): (event, context,) ) + def is_host_in_room(self, current_state): + room_members = [ + (state_key, event.membership) + for ((event_type, state_key), event) in current_state.items() + if event_type == EventTypes.Member + ] + if len(room_members) == 0: + # has the room been created so we can join it? + create_event = current_state.get(("m.room.create", "")) + if create_event: + return True + for (state_key, membership) in room_members: + if ( + UserID.from_string(state_key).domain == self.hs.hostname + and membership == Membership.JOIN + ): + return True + return False + @defer.inlineCallbacks def handle_new_client_event(self, event, context, ratelimit=True, extra_users=[]): # We now need to go and hit out to wherever we need to hit out to. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d4bb21e69e..f85a5f2677 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -510,10 +510,9 @@ class RoomMemberHandler(BaseHandler): # so don't really fit into the general auth process. raise AuthError(403, "Guest access not allowed") - should_do_dance, room_hosts = yield self._should_do_dance( - room_id, + should_do_dance, room_hosts = self._should_do_dance( context, - (yield self.get_inviter(target_user.to_string(), room_id)), + (self.get_inviter(target_user.to_string(), context.current_state)), room_hosts, ) @@ -534,11 +533,11 @@ class RoomMemberHandler(BaseHandler): ) handled = True if event.membership == Membership.LEAVE: - is_host_in_room = yield self.is_host_in_room(room_id, context) + is_host_in_room = self.is_host_in_room(context.current_state) if not is_host_in_room: # Rejecting an invite, rather than leaving a joined room handler = self.hs.get_handlers().federation_handler - inviter = yield self.get_inviter(target_user.to_string(), room_id) + inviter = self.get_inviter(target_user.to_string(), context.current_state) if not inviter: # return the same error as join_room_alias does raise SynapseError(404, "No known servers") @@ -584,20 +583,18 @@ class RoomMemberHandler(BaseHandler): and guest_access.content["guest_access"] == "can_join" ) - @defer.inlineCallbacks - def _should_do_dance(self, room_id, context, inviter, room_hosts=None): + def _should_do_dance(self, context, inviter, room_hosts=None): # TODO: Shouldn't this be remote_room_host? room_hosts = room_hosts or [] - # TODO(danielwh): This shouldn't need to yield for this check, we have a context. - is_host_in_room = yield self.is_host_in_room(room_id, context) + is_host_in_room = self.is_host_in_room(context.current_state) if is_host_in_room: - defer.returnValue((False, room_hosts)) + return False, room_hosts if inviter and not self.hs.is_mine(inviter): room_hosts.append(inviter.domain) - defer.returnValue((True, room_hosts)) + return True, room_hosts @defer.inlineCallbacks def lookup_room_alias(self, room_alias): @@ -624,36 +621,11 @@ class RoomMemberHandler(BaseHandler): defer.returnValue((RoomID.from_string(room_id), hosts)) - # TODO(danielwh): This should use the context, rather than looking up the store. - @defer.inlineCallbacks - def get_inviter(self, user_id, room_id): - # TODO(markjh): get prev_state from snapshot - prev_state = yield self.store.get_room_member( - user_id, room_id - ) + def get_inviter(self, user_id, current_state): + prev_state = current_state.get((EventTypes.Member, user_id)) if prev_state and prev_state.membership == Membership.INVITE: - defer.returnValue(UserID.from_string(prev_state.user_id)) - - # TODO(danielwh): This looks insane. Please make it not insane. - @defer.inlineCallbacks - def is_host_in_room(self, room_id, context): - is_host_in_room = yield self.auth.check_host_in_room( - room_id, - self.hs.hostname - ) - if not is_host_in_room: - # is *anyone* in the room? - room_member_keys = [ - v for (k, v) in context.current_state.keys() if ( - k == "m.room.member" - ) - ] - if len(room_member_keys) == 0: - # has the room been created so we can join it? - create_event = context.current_state.get(("m.room.create", "")) - if create_event: - is_host_in_room = True - defer.returnValue(is_host_in_room) + return UserID.from_string(prev_state.user_id) + return None @defer.inlineCallbacks def get_joined_rooms_for_user(self, user): diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index ce2c794025..3489315e0d 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -114,10 +114,10 @@ class EventFederationStore(SQLBaseStore): retcol="event_id", ) - def get_latest_events_in_room(self, room_id): + def get_latest_event_ids_and_hashes_in_room(self, room_id): return self.runInteraction( - "get_latest_events_in_room", - self._get_latest_events_in_room, + "get_latest_event_ids_and_hashes_in_room", + self._get_latest_event_ids_and_hashes_in_room, room_id, ) @@ -132,7 +132,7 @@ class EventFederationStore(SQLBaseStore): desc="get_latest_event_ids_in_room", ) - def _get_latest_events_in_room(self, txn, room_id): + def _get_latest_event_ids_and_hashes_in_room(self, txn, room_id): sql = ( "SELECT e.event_id, e.depth FROM events as e " "INNER JOIN event_forward_extremities as f " From e5999bfb1a4aab56acecb59ed6d068442f5b11a0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Feb 2016 17:10:40 +0000 Subject: [PATCH 179/349] Initial cut --- synapse/handlers/events.py | 43 +- synapse/handlers/message.py | 14 +- synapse/handlers/presence.py | 1798 +++++++---------- synapse/handlers/profile.py | 3 + synapse/handlers/sync.py | 22 + synapse/rest/client/v1/presence.py | 26 +- synapse/rest/client/v1/room.py | 18 +- synapse/rest/client/v2_alpha/receipts.py | 3 + synapse/rest/client/v2_alpha/sync.py | 16 +- synapse/storage/__init__.py | 50 +- synapse/storage/prepare_database.py | 2 +- synapse/storage/presence.py | 176 +- .../schema/delta/30/presence_stream.sql | 30 + synapse/storage/util/id_generators.py | 4 +- synapse/util/__init__.py | 2 +- tests/utils.py | 4 +- 16 files changed, 1004 insertions(+), 1207 deletions(-) create mode 100644 synapse/storage/schema/delta/30/presence_stream.sql diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 4933c31c19..72a31a9755 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -19,6 +19,8 @@ from synapse.util.logutils import log_function from synapse.types import UserID from synapse.events.utils import serialize_event from synapse.util.logcontext import preserve_context_over_fn +from synapse.api.constants import Membership, EventTypes +from synapse.events import EventBase from ._base import BaseHandler @@ -126,11 +128,12 @@ class EventStreamHandler(BaseHandler): If `only_keys` is not None, events from keys will be sent down. """ auth_user = UserID.from_string(auth_user_id) + presence_handler = self.hs.get_handlers().presence_handler - try: - if affect_presence: - yield self.started_stream(auth_user) - + context = yield presence_handler.user_syncing( + auth_user_id, affect_presence=affect_presence, + ) + with context: if timeout: # If they've set a timeout set a minimum limit. timeout = max(timeout, 500) @@ -145,6 +148,34 @@ class EventStreamHandler(BaseHandler): is_guest=is_guest, explicit_room_id=room_id ) + # When the user joins a new room, or another user joins a currently + # joined room, we need to send down presence for those users. + to_add = [] + for event in events: + if not isinstance(event, EventBase): + continue + if event.type == EventTypes.Member: + if event.membership != Membership.JOIN: + continue + # Send down presence. + if event.state_key == auth_user_id: + # Send down presence for everyone in the room. + users = yield self.store.get_users_in_room(event.room_id) + states = yield presence_handler.get_states( + users, + as_event=True, + ) + to_add.extend(states) + else: + + ev = yield presence_handler.get_state( + UserID.from_string(event.state_key), + as_event=True, + ) + to_add.append(ev) + + events.extend(to_add) + time_now = self.clock.time_msec() chunks = [ @@ -159,10 +190,6 @@ class EventStreamHandler(BaseHandler): defer.returnValue(chunk) - finally: - if affect_presence: - self.stopped_stream(auth_user) - class EventHandler(BaseHandler): diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 82c8cb5f0c..77894d9132 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -21,7 +21,6 @@ from synapse.streams.config import PaginationConfig from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator from synapse.util import unwrapFirstError -from synapse.util.logcontext import PreserveLoggingContext from synapse.util.caches.snapshot_cache import SnapshotCache from synapse.types import UserID, RoomStreamToken, StreamToken @@ -254,8 +253,7 @@ class MessageHandler(BaseHandler): if event.type == EventTypes.Message: presence = self.hs.get_handlers().presence_handler - with PreserveLoggingContext(): - presence.bump_presence_active_time(user) + yield presence.bump_presence_active_time(user) @defer.inlineCallbacks def create_and_send_event(self, event_dict, ratelimit=True, @@ -660,10 +658,6 @@ class MessageHandler(BaseHandler): room_id=room_id, ) - # TODO(paul): I wish I was called with user objects not user_id - # strings... - auth_user = UserID.from_string(user_id) - # TODO: These concurrently time_now = self.clock.time_msec() state = [ @@ -688,13 +682,11 @@ class MessageHandler(BaseHandler): @defer.inlineCallbacks def get_presence(): states = yield presence_handler.get_states( - target_users=[UserID.from_string(m.user_id) for m in room_members], - auth_user=auth_user, + [m.user_id for m in room_members], as_event=True, - check_auth=False, ) - defer.returnValue(states.values()) + defer.returnValue(states) @defer.inlineCallbacks def get_receipts(): diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index b61394f2b5..26f2e669ce 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -13,13 +13,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +"""This module is responsible for keeping track of presence status of local +and remote users. -from synapse.api.errors import SynapseError, AuthError +The methods that define policy are: + - PresenceHandler._update_states + - PresenceHandler._handle_timeouts + - should_notify +""" + +from twisted.internet import defer, reactor +from contextlib import contextmanager + +from synapse.api.errors import SynapseError from synapse.api.constants import PresenceState +from synapse.storage.presence import UserPresenceState -from synapse.util.logcontext import PreserveLoggingContext +from synapse.util.logcontext import preserve_fn from synapse.util.logutils import log_function +from synapse.util.wheel_timer import WheelTimer from synapse.types import UserID import synapse.metrics @@ -33,33 +45,24 @@ logger = logging.getLogger(__name__) metrics = synapse.metrics.get_metrics_for(__name__) -# Don't bother bumping "last active" time if it differs by less than 60 seconds +# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them +# "currently_active" LAST_ACTIVE_GRANULARITY = 60 * 1000 -# Keep no more than this number of offline serial revisions -MAX_OFFLINE_SERIALS = 1000 +# How long to wait until a new /events or /sync request before assuming +# the client has gone. +SYNC_ONLINE_TIMEOUT = 30 * 1000 +# How long to wait before marking the user as idle. Compared against last active +IDLE_TIMER = 5 * 60 * 1000 -# TODO(paul): Maybe there's one of these I can steal from somewhere -def partition(l, func): - """Partition the list by the result of func applied to each element.""" - ret = {} +# How often we expect remote servers to resend us presence. +FEDERATION_TIMEOUT = 30 * 60 * 1000 - for x in l: - key = func(x) - if key not in ret: - ret[key] = [] - ret[key].append(x) +# How often to resend presence to remote servers +FEDERATION_PING_INTERVAL = 25 * 60 * 1000 - return ret - - -def partitionbool(l, func): - def boolfunc(x): - return bool(func(x)) - - ret = partition(l, boolfunc) - return ret.get(True, []), ret.get(False, []) +assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER def user_presence_changed(distributor, user, statuscache): @@ -72,45 +75,13 @@ def collect_presencelike_data(distributor, user, content): class PresenceHandler(BaseHandler): - STATE_LEVELS = { - PresenceState.OFFLINE: 0, - PresenceState.UNAVAILABLE: 1, - PresenceState.ONLINE: 2, - PresenceState.FREE_FOR_CHAT: 3, - } - def __init__(self, hs): super(PresenceHandler, self).__init__(hs) - - self.homeserver = hs - + self.hs = hs self.clock = hs.get_clock() - - distributor = hs.get_distributor() - distributor.observe("registered_user", self.registered_user) - - distributor.observe( - "started_user_eventstream", self.started_user_eventstream - ) - distributor.observe( - "stopped_user_eventstream", self.stopped_user_eventstream - ) - - distributor.observe("user_joined_room", self.user_joined_room) - - distributor.declare("collect_presencelike_data") - - distributor.declare("changed_presencelike_data") - distributor.observe( - "changed_presencelike_data", self.changed_presencelike_data - ) - - # outbound signal from the presence module to advertise when a user's - # presence has changed - distributor.declare("user_presence_changed") - - self.distributor = distributor - + self.store = hs.get_datastore() + self.wheel_timer = WheelTimer() + self.notifier = hs.get_notifier() self.federation = hs.get_replication_layer() self.federation.register_edu_handler( @@ -138,348 +109,574 @@ class PresenceHandler(BaseHandler): ) ) - # IN-MEMORY store, mapping local userparts to sets of local users to - # be informed of state changes. - self._local_pushmap = {} - # map local users to sets of remote /domain names/ who are interested - # in them - self._remote_sendmap = {} - # map remote users to sets of local users who're interested in them - self._remote_recvmap = {} - # list of (serial, set of(userids)) tuples, ordered by serial, latest - # first - self._remote_offline_serials = [] + distributor = hs.get_distributor() + distributor.observe("user_joined_room", self.user_joined_room) - # map any user to a UserPresenceCache - self._user_cachemap = {} - self._user_cachemap_latest_serial = 0 + active_presence = self.store.take_presence_startup_info() - # map room_ids to the latest presence serial for a member of that - # room - self._room_serials = {} + # A dictionary of the current state of users. This is prefilled with + # non-offline presence from the DB. We should fetch from the DB if + # we can't find a users presence in here. + self.user_to_current_state = { + state.user_id: state + for state in active_presence + } - metrics.register_callback( - "userCachemap:size", - lambda: len(self._user_cachemap), - ) - - def _get_or_make_usercache(self, user): - """If the cache entry doesn't exist, initialise a new one.""" - if user not in self._user_cachemap: - self._user_cachemap[user] = UserPresenceCache() - return self._user_cachemap[user] - - def _get_or_offline_usercache(self, user): - """If the cache entry doesn't exist, return an OFFLINE one but do not - store it into the cache.""" - if user in self._user_cachemap: - return self._user_cachemap[user] - else: - return UserPresenceCache() - - def registered_user(self, user): - return self.store.create_presence(user.localpart) - - @defer.inlineCallbacks - def is_presence_visible(self, observer_user, observed_user): - assert(self.hs.is_mine(observed_user)) - - if observer_user == observed_user: - defer.returnValue(True) - - if (yield self.store.user_rooms_intersect( - [u.to_string() for u in observer_user, observed_user])): - defer.returnValue(True) - - if (yield self.store.is_presence_visible( - observed_localpart=observed_user.localpart, - observer_userid=observer_user.to_string())): - defer.returnValue(True) - - defer.returnValue(False) - - @defer.inlineCallbacks - def get_state(self, target_user, auth_user, as_event=False, check_auth=True): - """Get the current presence state of the given user. - - Args: - target_user (UserID): The user whose presence we want - auth_user (UserID): The user requesting the presence, used for - checking if said user is allowed to see the persence of the - `target_user` - as_event (bool): Format the return as an event or not? - check_auth (bool): Perform the auth checks or not? - - Returns: - dict: The presence state of the `target_user`, whose format depends - on the `as_event` argument. - """ - if self.hs.is_mine(target_user): - if check_auth: - visible = yield self.is_presence_visible( - observer_user=auth_user, - observed_user=target_user + now = self.clock.time_msec() + for state in active_presence: + self.wheel_timer.insert( + now=now, + obj=state.user_id, + then=state.last_active + IDLE_TIMER, + ) + self.wheel_timer.insert( + now=now, + obj=state.user_id, + then=state.last_user_sync + SYNC_ONLINE_TIMEOUT, + ) + if self.hs.is_mine_id(state.user_id): + self.wheel_timer.insert( + now=now, + obj=state.user_id, + then=state.last_federation_update + FEDERATION_PING_INTERVAL, ) - - if not visible: - raise SynapseError(404, "Presence information not visible") - - if target_user in self._user_cachemap: - state = self._user_cachemap[target_user].get_state() else: - state = yield self.store.get_presence_state(target_user.localpart) - if "mtime" in state: - del state["mtime"] - state["presence"] = state.pop("state") - else: - # TODO(paul): Have remote server send us permissions set - state = self._get_or_offline_usercache(target_user).get_state() - - if "last_active" in state: - state["last_active_ago"] = int( - self.clock.time_msec() - state.pop("last_active") - ) - - if as_event: - content = state - - content["user_id"] = target_user.to_string() - - if "last_active" in content: - content["last_active_ago"] = int( - self._clock.time_msec() - content.pop("last_active") + self.wheel_timer.insert( + now=now, + obj=state.user_id, + then=state.last_federation_update + FEDERATION_TIMEOUT, ) - defer.returnValue({"type": "m.presence", "content": content}) - else: - defer.returnValue(state) + # Set of users who have presence in the `user_to_current_state` that + # have not yet been persisted + self.unpersisted_users_changes = set() - @defer.inlineCallbacks - def get_states(self, target_users, auth_user, as_event=False, check_auth=True): - """A batched version of the `get_state` method that accepts a list of - `target_users` + reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown) - Args: - target_users (list): The list of UserID's whose presence we want - auth_user (UserID): The user requesting the presence, used for - checking if said user is allowed to see the persence of the - `target_users` - as_event (bool): Format the return as an event or not? - check_auth (bool): Perform the auth checks or not? + self.serial_to_user = {} + self._next_serial = 1 - Returns: - dict: A mapping from user -> presence_state - """ - local_users, remote_users = partitionbool( - target_users, - lambda u: self.hs.is_mine(u) + # Keeps track of the number of *ongoing* syncs. While this is non zero + # a user will never go offline. + self.user_to_num_current_syncs = {} + + # Start a LoopingCall in 30s that fires every 5s. + # The initial delay is to allow disconnected clients a chance to + # reconnect before we treat them as offline. + self.clock.call_later( + 0 * 1000, + self.clock.looping_call, + self._handle_timeouts, + 5000, ) - if check_auth: - for user in local_users: - visible = yield self.is_presence_visible( - observer_user=auth_user, - observed_user=user - ) + @defer.inlineCallbacks + def _on_shutdown(self): + """Gets called when shutting down. This lets us persist any updates that + we haven't yet persisted, e.g. updates that only changes some internal + timers. This allows changes to persist across startup without having to + persist every single change. - if not visible: - raise SynapseError(404, "Presence information not visible") + If this does not run it simply means that some of the timers will fire + earlier than they should when synapse is restarted. This affect of this + is some spurious presence changes that will self-correct. + """ + logger.info( + "Performing _on_shutdown. Persiting %d unpersisted changes", + len(self.user_to_current_state) + ) - results = {} - if local_users: - for user in local_users: - if user in self._user_cachemap: - results[user] = self._user_cachemap[user].get_state() + if self.unpersisted_users_changes: + yield self.store.update_presence([ + self.user_to_current_state[user_id] + for user_id in self.unpersisted_users_changes + ]) + logger.info("Finished _on_shutdown") - local_to_user = {u.localpart: u for u in local_users} + @defer.inlineCallbacks + def _update_states(self, new_states): + """Updates presence of users. Sets the appropriate timeouts. Pokes + the notifier and federation if and only if the changed presence state + should be sent to clients/servers. + """ + now = self.clock.time_msec() - states = yield self.store.get_presence_states( - [u.localpart for u in local_users if u not in results] + # NOTE: We purposefully don't yield between now and when we've + # calculated what we want to do with the new states, to avoid races. + + to_notify = {} # Changes we want to notify everyone about + to_federation_ping = {} # These need sending keep-alives + for new_state in new_states: + user_id = new_state.user_id + prev_state = self.user_to_current_state.get( + user_id, UserPresenceState.default(user_id) ) - for local_part, state in states.items(): - if state is None: - continue - res = {"presence": state["state"]} - if "status_msg" in state and state["status_msg"]: - res["status_msg"] = state["status_msg"] - results[local_to_user[local_part]] = res - - for user in remote_users: - # TODO(paul): Have remote server send us permissions set - results[user] = self._get_or_offline_usercache(user).get_state() - - for state in results.values(): - if "last_active" in state: - state["last_active_ago"] = int( - self.clock.time_msec() - state.pop("last_active") - ) - - if as_event: - for user, state in results.items(): - content = state - content["user_id"] = user.to_string() - - if "last_active" in content: - content["last_active_ago"] = int( - self._clock.time_msec() - content.pop("last_active") + # If the users are ours then we want to set up a bunch of timers + # to time things out. + if self.hs.is_mine_id(user_id): + if new_state.state == PresenceState.ONLINE: + # Idle timer + self.wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_active + IDLE_TIMER ) - results[user] = {"type": "m.presence", "content": content} + if new_state.state != PresenceState.OFFLINE: + # User has stopped syncing + self.wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_user_sync + SYNC_ONLINE_TIMEOUT + ) + + last_federate = new_state.last_federation_update + if now - last_federate > FEDERATION_PING_INTERVAL: + # Been a while since we've poked remote servers + new_state = new_state.copy_and_replace( + last_federation_update=now, + ) + to_federation_ping[user_id] = new_state + + else: + self.wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_federation_update + FEDERATION_TIMEOUT + ) + + if new_state.state == PresenceState.ONLINE: + currently_active = now - new_state.last_active < LAST_ACTIVE_GRANULARITY + new_state = new_state.copy_and_replace( + currently_active=currently_active, + ) + + # Check whether the change was something worth notifying about + if should_notify(prev_state, new_state): + new_state.copy_and_replace( + last_federation_update=now, + ) + to_notify[user_id] = new_state + + self.user_to_current_state[user_id] = new_state + + # TODO: We should probably ensure there are no races hereafter + + if to_notify: + yield self._persist_and_notify(to_notify.values()) + + self.unpersisted_users_changes |= set(s.user_id for s in new_states) + self.unpersisted_users_changes -= set(to_notify.keys()) + + to_federation_ping = { + user_id: state for user_id, state in to_federation_ping.items() + if user_id not in to_notify + } + if to_federation_ping: + _, _, hosts_to_states = yield self._get_interested_parties( + to_federation_ping.values() + ) + + self._push_to_remotes(hosts_to_states) + + def _handle_timeouts(self): + """Checks the presence of users that have timed out and updates as + appropriate. + """ + now = self.clock.time_msec() + + # Fetch the list of users that *may* have timed out. Things may have + # changed since the timeout was set, so we won't necessarily have to + # take any action. + users_to_check = self.wheel_timer.fetch(now) + + changes = {} # Actual changes we need to notify people about + + for user_id in set(users_to_check): + state = self.user_to_current_state.get(user_id, None) + if not state: + continue + + if self.hs.is_mine_id(user_id): + if state.state == PresenceState.OFFLINE: + continue + + if state.state == PresenceState.ONLINE: + if now - state.last_active > IDLE_TIMER: + # Currently online, but last activity ages ago so auto + # idle + changes[user_id] = state.copy_and_replace( + state=PresenceState.UNAVAILABLE, + ) + elif now - state.last_active > LAST_ACTIVE_GRANULARITY: + # So that we send down a notification that we've + # stopped updating. + changes[user_id] = state + + if now - state.last_federation_update > FEDERATION_PING_INTERVAL: + # Need to send ping to other servers to ensure they don't + # timeout and set us to offline + changes[user_id] = state + + # If there are have been no sync for a while (and none ongoing), + # set presence to offline + if not self.user_to_num_current_syncs.get(user_id, 0): + if now - state.last_user_sync > SYNC_ONLINE_TIMEOUT: + changes[user_id] = state.copy_and_replace( + state=PresenceState.OFFLINE, + ) + else: + # We expect to be poked occaisonally by the other side. + # This is to protect against forgetful/buggy servers, so that + # no one gets stuck online forever. + if now - state.last_federation_update > FEDERATION_TIMEOUT: + if state.state != PresenceState.OFFLINE: + # The other side seems to have disappeared. + changes[user_id] = state.copy_and_replace( + state=PresenceState.OFFLINE, + ) + + preserve_fn(self._update_states)(changes.values()) + + @defer.inlineCallbacks + def bump_presence_active_time(self, user): + """We've seen the user do something that indicates they're interacting + with the app. + """ + user_id = user.to_string() + + prev_state = yield self.current_state_for_user(user_id) + + yield self._update_states([prev_state.copy_and_replace( + state=PresenceState.ONLINE, + last_active=self.clock.time_msec(), + )]) + + @defer.inlineCallbacks + def user_syncing(self, user_id, affect_presence=True): + """Returns a context manager that should surround any stream requests + from the user. + + This allows us to keep track of who is currently streaming and who isn't + without having to have timers outside of this module to avoid flickering + when users disconnect/reconnect. + + Args: + user_id (str) + affect_presence (bool): If false this function will be a no-op. + Useful for streams that are not associated with an actual + client that is being used by a user. + """ + if affect_presence: + curr_sync = self.user_to_num_current_syncs.get(user_id, 0) + self.user_to_num_current_syncs[user_id] = curr_sync + 1 + + prev_state = yield self.current_state_for_user(user_id) + if prev_state.state == PresenceState.OFFLINE: + # If they're currently offline then bring them online, otherwise + # just update the last sync times. + yield self._update_states([prev_state.copy_and_replace( + state=PresenceState.ONLINE, + last_active=self.clock.time_msec(), + last_user_sync=self.clock.time_msec(), + )]) + else: + yield self._update_states([prev_state.copy_and_replace( + last_user_sync=self.clock.time_msec(), + )]) + + @defer.inlineCallbacks + def _end(): + if affect_presence: + self.user_to_num_current_syncs[user_id] -= 1 + + prev_state = yield self.current_state_for_user(user_id) + yield self._update_states([prev_state.copy_and_replace( + last_user_sync=self.clock.time_msec(), + )]) + + @contextmanager + def _user_syncing(): + try: + yield + finally: + preserve_fn(_end)() + + defer.returnValue(_user_syncing()) + + @defer.inlineCallbacks + def current_state_for_user(self, user_id): + """Get the current presence state for a user. + """ + res = yield self.current_state_for_users([user_id]) + defer.returnValue(res[user_id]) + + @defer.inlineCallbacks + def current_state_for_users(self, user_ids): + """Get the current presence state for multiple users. + + Returns: + dict: `user_id` -> `UserPresenceState` + """ + states = { + user_id: self.user_to_current_state.get(user_id, None) + for user_id in user_ids + } + + missing = [user_id for user_id, state in states.items() if not state] + if missing: + # There are things not in our in memory cache. Lets pull them out of + # the database. + res = yield self.store.get_presence_for_users(missing) + states.update({state.user_id: state for state in res}) + + missing = [user_id for user_id, state in states.items() if not state] + if missing: + states.update({ + user_id: UserPresenceState.default(user_id) + for user_id in missing + }) + + defer.returnValue(states) + + @defer.inlineCallbacks + def _get_interested_parties(self, states): + """Given a list of states return which entities (rooms, users, servers) + are interested in the given states. + + Returns: + 3-tuple: `(room_ids_to_states, users_to_states, hosts_to_states)`, + with each item being a dict of `entity_name` -> `[UserPresenceState]` + """ + room_ids_to_states = {} + users_to_states = {} + for state in states: + events = yield self.store.get_rooms_for_user(state.user_id) + for e in events: + room_ids_to_states.setdefault(e.room_id, []).append(state) + + plist = yield self.store.get_presence_list_observers_accepted(state.user_id) + for u in plist: + users_to_states.setdefault(u, []).append(state) + + # Always notify self + users_to_states.setdefault(state.user_id, []).append(state) + + hosts_to_states = {} + for room_id, states in room_ids_to_states.items(): + hosts = yield self.store.get_joined_hosts_for_room(room_id) + for host in hosts: + hosts_to_states.setdefault(host, []).extend(states) + + for user_id, states in users_to_states.items(): + host = UserID.from_string(user_id).domain + hosts_to_states.setdefault(host, []).extend(states) + + # TODO: de-dup hosts_to_states, as a single host might have multiple + # of same presence + + defer.returnValue((room_ids_to_states, users_to_states, hosts_to_states)) + + @defer.inlineCallbacks + def _persist_and_notify(self, states): + """Persist states in the database, poke the notifier and send to + interested remote servers + """ + stream_id, max_token = yield self.store.update_presence(states) + + parties = yield self._get_interested_parties(states) + room_ids_to_states, users_to_states, hosts_to_states = parties + + self.notifier.on_new_event( + "presence_key", stream_id, rooms=room_ids_to_states.keys(), + users=[UserID.from_string(u) for u in users_to_states.keys()] + ) + + self._push_to_remotes(hosts_to_states) + + def _push_to_remotes(self, hosts_to_states): + """Sends state updates to remote servers. + + Args: + hosts_to_states (dict): Mapping `server_name` -> `[UserPresenceState]` + """ + now = self.clock.time_msec() + for host, states in hosts_to_states.items(): + self.federation.send_edu( + destination=host, + edu_type="m.presence", + content={ + "push": [ + _format_user_presence_state(state, now) + for state in states + ] + } + ) + + @defer.inlineCallbacks + def incoming_presence(self, origin, content): + """Called when we receive a `m.presence` EDU from a remote server. + """ + now = self.clock.time_msec() + updates = [] + for push in content.get("push", []): + # A "push" contains a list of presence that we are probably interested + # in. + # TODO: Actually check if we're interested, rather than blindly + # accepting presence updates. + user_id = push.get("user_id", None) + if not user_id: + logger.info( + "Got presence update from %r with no 'user_id': %r", + origin, push, + ) + continue + + presence_state = push.get("presence", None) + if not presence_state: + logger.info( + "Got presence update from %r with no 'presence_state': %r", + origin, push, + ) + continue + + new_fields = { + "state": presence_state, + "last_federation_update": now, + } + + last_active_ago = push.get("last_active_ago", None) + if last_active_ago is not None: + new_fields["last_active"] = now - last_active_ago + + new_fields["status_msg"] = push.get("status_msg", None) + + prev_state = yield self.current_state_for_user(user_id) + updates.append(prev_state.copy_and_replace(**new_fields)) + + if updates: + yield self._update_states(updates) + + @defer.inlineCallbacks + def get_state(self, target_user, as_event=False): + results = yield self.get_states( + [target_user.to_string()], + as_event=as_event, + ) + + defer.returnValue(results[0]) + + @defer.inlineCallbacks + def get_states(self, target_user_ids, as_event=False): + """Get the presence state for users. + + Args: + target_user_ids (list) + as_event (bool): Whether to format it as a client event or not. + + Returns: + list + """ + + updates = yield self.current_state_for_users(target_user_ids) + updates = updates.values() + + for user_id in set(target_user_ids) - set(u.user_id for u in updates): + updates.append(UserPresenceState.default(user_id)) + + now = self.clock.time_msec() + if as_event: + defer.returnValue([ + { + "type": "m.presence", + "content": _format_user_presence_state(state, now), + } + for state in updates + ]) + else: + defer.returnValue([ + _format_user_presence_state(state, now) for state in updates + ]) + + @defer.inlineCallbacks + def set_state(self, target_user, state): + """Set the presence state of the user. + """ + status_msg = state.get("status_msg", None) + presence = state["presence"] + + user_id = target_user.to_string() + + prev_state = yield self.current_state_for_user(user_id) + + new_fields = { + "state": presence, + "status_msg": status_msg + } + + if presence == PresenceState.ONLINE: + new_fields["last_active"] = self.clock.time_msec() + + yield self._update_states([prev_state.copy_and_replace(**new_fields)]) + + @defer.inlineCallbacks + def user_joined_room(self, user, room_id): + """Called (via the distributor) when a user joins a room. This funciton + sends presence updates to servers, either: + 1. the joining user is a local user and we send their presence to + all servers in the room. + 2. the joining user is a remote user and so we send presence for all + local users in the room. + """ + # We only need to send presence to servers that don't have it yet. We + # don't need to send to local clients here, as that is done as part + # of the event stream/sync. + # TODO: Only send to servers not already in the room. + if self.hs.is_mine(user): + state = yield self.current_state_for_user(user.to_string()) + + hosts = yield self.store.get_joined_hosts_for_room(room_id) + self._push_to_remotes({host: (state,) for host in hosts}) + else: + user_ids = yield self.store.get_users_in_room(room_id) + user_ids = filter(self.hs.is_mine_id, user_ids) + + states = yield self.current_state_for_users(user_ids) + + self._push_to_remotes({user.domain: states.values()}) + + @defer.inlineCallbacks + def get_presence_list(self, observer_user, accepted=None): + """Returns the presence for all users in their presence list. + """ + if not self.hs.is_mine(observer_user): + raise SynapseError(400, "User is not hosted on this Home Server") + + presence_list = yield self.store.get_presence_list( + observer_user.localpart, accepted=accepted + ) + + results = yield self.get_states( + target_user_ids=[row["observed_user_id"] for row in presence_list], + as_event=False, + ) + + is_accepted = { + row["observed_user_id"]: row["accepted"] for row in presence_list + } + + for result in results: + result.update({ + "accepted": is_accepted, + }) defer.returnValue(results) - @defer.inlineCallbacks - @log_function - def set_state(self, target_user, auth_user, state): - # return - # TODO (erikj): Turn this back on. Why did we end up sending EDUs - # everywhere? - - if not self.hs.is_mine(target_user): - raise SynapseError(400, "User is not hosted on this Home Server") - - if target_user != auth_user: - raise AuthError(400, "Cannot set another user's presence") - - if "status_msg" not in state: - state["status_msg"] = None - - for k in state.keys(): - if k not in ("presence", "status_msg"): - raise SynapseError( - 400, "Unexpected presence state key '%s'" % (k,) - ) - - if state["presence"] not in self.STATE_LEVELS: - raise SynapseError(400, "'%s' is not a valid presence state" % ( - state["presence"], - )) - - logger.debug("Updating presence state of %s to %s", - target_user.localpart, state["presence"]) - - state_to_store = dict(state) - state_to_store["state"] = state_to_store.pop("presence") - - statuscache = self._get_or_offline_usercache(target_user) - was_level = self.STATE_LEVELS[statuscache.get_state()["presence"]] - now_level = self.STATE_LEVELS[state["presence"]] - - yield self.store.set_presence_state( - target_user.localpart, state_to_store - ) - yield collect_presencelike_data(self.distributor, target_user, state) - - if now_level > was_level: - state["last_active"] = self.clock.time_msec() - - now_online = state["presence"] != PresenceState.OFFLINE - was_polling = target_user in self._user_cachemap - - if now_online and not was_polling: - yield self.start_polling_presence(target_user, state=state) - elif not now_online and was_polling: - yield self.stop_polling_presence(target_user) - - # TODO(paul): perform a presence push as part of start/stop poll so - # we don't have to do this all the time - yield self.changed_presencelike_data(target_user, state) - - def bump_presence_active_time(self, user, now=None): - if now is None: - now = self.clock.time_msec() - - prev_state = self._get_or_make_usercache(user) - if now - prev_state.state.get("last_active", 0) < LAST_ACTIVE_GRANULARITY: - return - - with PreserveLoggingContext(): - self.changed_presencelike_data(user, {"last_active": now}) - - def get_joined_rooms_for_user(self, user): - """Get the list of rooms a user is joined to. - - Args: - user(UserID): The user. - Returns: - A Deferred of a list of room id strings. - """ - rm_handler = self.homeserver.get_handlers().room_member_handler - return rm_handler.get_joined_rooms_for_user(user) - - def get_joined_users_for_room_id(self, room_id): - rm_handler = self.homeserver.get_handlers().room_member_handler - return rm_handler.get_room_members(room_id) - - @defer.inlineCallbacks - def changed_presencelike_data(self, user, state): - """Updates the presence state of a local user. - - Args: - user(UserID): The user being updated. - state(dict): The new presence state for the user. - Returns: - A Deferred - """ - self._user_cachemap_latest_serial += 1 - statuscache = yield self.update_presence_cache(user, state) - yield self.push_presence(user, statuscache=statuscache) - - @log_function - def started_user_eventstream(self, user): - # TODO(paul): Use "last online" state - return self.set_state(user, user, {"presence": PresenceState.ONLINE}) - - @log_function - def stopped_user_eventstream(self, user): - # TODO(paul): Save current state as "last online" state - return self.set_state(user, user, {"presence": PresenceState.OFFLINE}) - - @defer.inlineCallbacks - def user_joined_room(self, user, room_id): - """Called via the distributor whenever a user joins a room. - Notifies the new member of the presence of the current members. - Notifies the current members of the room of the new member's presence. - - Args: - user(UserID): The user who joined the room. - room_id(str): The room id the user joined. - """ - if self.hs.is_mine(user): - # No actual update but we need to bump the serial anyway for the - # event source - self._user_cachemap_latest_serial += 1 - statuscache = yield self.update_presence_cache( - user, room_ids=[room_id] - ) - self.push_update_to_local_and_remote( - observed_user=user, - room_ids=[room_id], - statuscache=statuscache, - ) - - # We also want to tell them about current presence of people. - curr_users = yield self.get_joined_users_for_room_id(room_id) - - for local_user in [c for c in curr_users if self.hs.is_mine(c)]: - statuscache = yield self.update_presence_cache( - local_user, room_ids=[room_id], add_to_cache=False - ) - - with PreserveLoggingContext(): - self.push_update_to_local_and_remote( - observed_user=local_user, - users_to_push=[user], - statuscache=statuscache, - ) - @defer.inlineCallbacks def send_presence_invite(self, observer_user, observed_user): - """Request the presence of a local or remote user for a local user""" - if not self.hs.is_mine(observer_user): - raise SynapseError(400, "User is not hosted on this Home Server") - + """Sends a presence invite. + """ yield self.store.add_presence_list_pending( observer_user.localpart, observed_user.to_string() ) @@ -496,60 +693,41 @@ class PresenceHandler(BaseHandler): } ) - @defer.inlineCallbacks - def _should_accept_invite(self, observed_user, observer_user): - if not self.hs.is_mine(observed_user): - defer.returnValue(False) - - row = yield self.store.has_presence_state(observed_user.localpart) - if not row: - defer.returnValue(False) - - # TODO(paul): Eventually we'll ask the user's permission for this - # before accepting. For now just accept any invite request - defer.returnValue(True) - @defer.inlineCallbacks def invite_presence(self, observed_user, observer_user): - """Handles a m.presence_invite EDU. A remote or local user has - requested presence updates for a local user. If the invite is accepted - then allow the local or remote user to see the presence of the local - user. - - Args: - observed_user(UserID): The local user whose presence is requested. - observer_user(UserID): The remote or local user requesting presence. + """Handles new presence invites. """ - accept = yield self._should_accept_invite(observed_user, observer_user) - - if accept: - yield self.store.allow_presence_visible( - observed_user.localpart, observer_user.to_string() - ) + if not self.hs.is_mine(observed_user): + raise SynapseError(400, "User is not hosted on this Home Server") + # TODO: Don't auto accept if self.hs.is_mine(observer_user): - if accept: - yield self.accept_presence(observed_user, observer_user) - else: - yield self.deny_presence(observed_user, observer_user) + yield self.accept_presence(observed_user, observer_user) else: - edu_type = "m.presence_accept" if accept else "m.presence_deny" - - yield self.federation.send_edu( + self.federation.send_edu( destination=observer_user.domain, - edu_type=edu_type, + edu_type="m.presence_accept", content={ "observed_user": observed_user.to_string(), "observer_user": observer_user.to_string(), } ) + state_dict = yield self.get_state(observed_user, as_event=False) + + self.federation.send_edu( + destination=observer_user.domain, + edu_type="m.presence", + content={ + "push": [state_dict] + } + ) + @defer.inlineCallbacks def accept_presence(self, observed_user, observer_user): """Handles a m.presence_accept EDU. Mark a presence invite from a local or remote user as accepted in a local user's presence list. Starts polling for presence updates from the local or remote user. - Args: observed_user(UserID): The user to update in the presence list. observer_user(UserID): The owner of the presence list to update. @@ -558,15 +736,10 @@ class PresenceHandler(BaseHandler): observer_user.localpart, observed_user.to_string() ) - yield self.start_polling_presence( - observer_user, target_user=observed_user - ) - @defer.inlineCallbacks def deny_presence(self, observed_user, observer_user): """Handle a m.presence_deny EDU. Removes a local or remote user from a local user's presence list. - Args: observed_user(UserID): The local or remote user to remove from the list. @@ -584,7 +757,6 @@ class PresenceHandler(BaseHandler): def drop(self, observed_user, observer_user): """Remove a local or remote user from a local user's presence list and unsubscribe the local user from updates that user. - Args: observed_user(UserId): The local or remote user to remove from the list. @@ -599,710 +771,138 @@ class PresenceHandler(BaseHandler): observer_user.localpart, observed_user.to_string() ) - self.stop_polling_presence( - observer_user, target_user=observed_user - ) + # TODO: Inform the remote that we've dropped the presence list. @defer.inlineCallbacks - def get_presence_list(self, observer_user, accepted=None): - """Get the presence list for a local user. The retured list includes - the current presence state for each user listed. + def is_visible(self, observed_user, observer_user): + observer_rooms = yield self.store.get_rooms_for_user(observer_user.to_string()) + observed_rooms = yield self.store.get_rooms_for_user(observed_user.to_string()) - Args: - observer_user(UserID): The local user whose presence list to fetch. - accepted(bool or None): If not none then only include users who - have or have not accepted the presence invite request. - Returns: - A Deferred list of presence state events. - """ - if not self.hs.is_mine(observer_user): - raise SynapseError(400, "User is not hosted on this Home Server") + observer_room_ids = set(r.room_id for r in observer_rooms) + observed_room_ids = set(r.room_id for r in observed_rooms) - presence_list = yield self.store.get_presence_list( - observer_user.localpart, accepted=accepted + if observer_room_ids & observed_room_ids: + defer.returnValue(True) + + accepted_observers = yield self.store.get_presence_list_observers_accepted( + observed_user.to_string() ) - results = [] - for row in presence_list: - observed_user = UserID.from_string(row["observed_user_id"]) - result = { - "observed_user": observed_user, "accepted": row["accepted"] - } - result.update( - self._get_or_offline_usercache(observed_user).get_state() - ) - if "last_active" in result: - result["last_active_ago"] = int( - self.clock.time_msec() - result.pop("last_active") - ) - results.append(result) - - defer.returnValue(results) - - @defer.inlineCallbacks - @log_function - def start_polling_presence(self, user, target_user=None, state=None): - """Subscribe a local user to presence updates from a local or remote - user. If no target_user is supplied then subscribe to all users stored - in the presence list for the local user. - - Additonally this pushes the current presence state of this user to all - target_users. That state can be provided directly or will be read from - the stored state for the local user. - - Also this attempts to notify the local user of the current state of - any local target users. - - Args: - user(UserID): The local user that whishes for presence updates. - target_user(UserID): The local or remote user whose updates are - wanted. - state(dict): Optional presence state for the local user. - """ - logger.debug("Start polling for presence from %s", user) - - if target_user: - target_users = set([target_user]) - room_ids = [] - else: - presence = yield self.store.get_presence_list( - user.localpart, accepted=True - ) - target_users = set([ - UserID.from_string(x["observed_user_id"]) for x in presence - ]) - - # Also include people in all my rooms - - room_ids = yield self.get_joined_rooms_for_user(user) - - if state is None: - state = yield self.store.get_presence_state(user.localpart) - else: - # statuscache = self._get_or_make_usercache(user) - # self._user_cachemap_latest_serial += 1 - # statuscache.update(state, self._user_cachemap_latest_serial) - pass - - yield self.push_update_to_local_and_remote( - observed_user=user, - users_to_push=target_users, - room_ids=room_ids, - statuscache=self._get_or_make_usercache(user), - ) - - for target_user in target_users: - if self.hs.is_mine(target_user): - self._start_polling_local(user, target_user) - - # We want to tell the person that just came online - # presence state of people they are interested in? - self.push_update_to_clients( - users_to_push=[user], - ) - - deferreds = [] - remote_users = [u for u in target_users if not self.hs.is_mine(u)] - remoteusers_by_domain = partition(remote_users, lambda u: u.domain) - # Only poll for people in our get_presence_list - for domain in remoteusers_by_domain: - remoteusers = remoteusers_by_domain[domain] - - deferreds.append(self._start_polling_remote( - user, domain, remoteusers - )) - - yield defer.DeferredList(deferreds, consumeErrors=True) - - def _start_polling_local(self, user, target_user): - """Subscribe a local user to presence updates for a local user - - Args: - user(UserId): The local user that wishes for updates. - target_user(UserId): The local users whose updates are wanted. - """ - target_localpart = target_user.localpart - - if target_localpart not in self._local_pushmap: - self._local_pushmap[target_localpart] = set() - - self._local_pushmap[target_localpart].add(user) - - def _start_polling_remote(self, user, domain, remoteusers): - """Subscribe a local user to presence updates for remote users on a - given remote domain. - - Args: - user(UserID): The local user that wishes for updates. - domain(str): The remote server the local user wants updates from. - remoteusers(UserID): The remote users that local user wants to be - told about. - Returns: - A Deferred. - """ - to_poll = set() - - for u in remoteusers: - if u not in self._remote_recvmap: - self._remote_recvmap[u] = set() - to_poll.add(u) - - self._remote_recvmap[u].add(user) - - if not to_poll: - return defer.succeed(None) - - return self.federation.send_edu( - destination=domain, - edu_type="m.presence", - content={"poll": [u.to_string() for u in to_poll]} - ) - - @log_function - def stop_polling_presence(self, user, target_user=None): - """Unsubscribe a local user from presence updates from a local or - remote user. If no target user is supplied then unsubscribe the user - from all presence updates that the user had subscribed to. - - Args: - user(UserID): The local user that no longer wishes for updates. - target_user(UserID or None): The user whose updates are no longer - wanted. - Returns: - A Deferred. - """ - logger.debug("Stop polling for presence from %s", user) - - if not target_user or self.hs.is_mine(target_user): - self._stop_polling_local(user, target_user=target_user) - - deferreds = [] - - if target_user: - if target_user not in self._remote_recvmap: - return - target_users = set([target_user]) - else: - target_users = self._remote_recvmap.keys() - - remoteusers = [u for u in target_users - if user in self._remote_recvmap[u]] - remoteusers_by_domain = partition(remoteusers, lambda u: u.domain) - - for domain in remoteusers_by_domain: - remoteusers = remoteusers_by_domain[domain] - - deferreds.append( - self._stop_polling_remote(user, domain, remoteusers) - ) - - return defer.DeferredList(deferreds, consumeErrors=True) - - def _stop_polling_local(self, user, target_user): - """Unsubscribe a local user from presence updates from a local user on - this server. - - Args: - user(UserID): The local user that no longer wishes for updates. - target_user(UserID): The user whose updates are no longer wanted. - """ - for localpart in self._local_pushmap.keys(): - if target_user and localpart != target_user.localpart: - continue - - if user in self._local_pushmap[localpart]: - self._local_pushmap[localpart].remove(user) - - if not self._local_pushmap[localpart]: - del self._local_pushmap[localpart] - - @log_function - def _stop_polling_remote(self, user, domain, remoteusers): - """Unsubscribe a local user from presence updates from remote users on - a given domain. - - Args: - user(UserID): The local user that no longer wishes for updates. - domain(str): The remote server to unsubscribe from. - remoteusers([UserID]): The users on that remote server that the - local user no longer wishes to be updated about. - Returns: - A Deferred. - """ - to_unpoll = set() - - for u in remoteusers: - self._remote_recvmap[u].remove(user) - - if not self._remote_recvmap[u]: - del self._remote_recvmap[u] - to_unpoll.add(u) - - if not to_unpoll: - return defer.succeed(None) - - return self.federation.send_edu( - destination=domain, - edu_type="m.presence", - content={"unpoll": [u.to_string() for u in to_unpoll]} - ) - - @defer.inlineCallbacks - @log_function - def push_presence(self, user, statuscache): - """ - Notify local and remote users of a change in presence of a local user. - Pushes the update to local clients and remote domains that are directly - subscribed to the presence of the local user. - Also pushes that update to any local user or remote domain that shares - a room with the local user. - - Args: - user(UserID): The local user whose presence was updated. - statuscache(UserPresenceCache): Cache of the user's presence state - Returns: - A Deferred. - """ - assert(self.hs.is_mine(user)) - - logger.debug("Pushing presence update from %s", user) - - localusers = set(self._local_pushmap.get(user.localpart, set())) - remotedomains = set(self._remote_sendmap.get(user.localpart, set())) - - # Reflect users' status changes back to themselves, so UIs look nice - # and also user is informed of server-forced pushes - localusers.add(user) - - room_ids = yield self.get_joined_rooms_for_user(user) - - if not localusers and not room_ids: - defer.returnValue(None) - - yield self.push_update_to_local_and_remote( - observed_user=user, - users_to_push=localusers, - remote_domains=remotedomains, - room_ids=room_ids, - statuscache=statuscache, - ) - yield user_presence_changed(self.distributor, user, statuscache) - - @defer.inlineCallbacks - def incoming_presence(self, origin, content): - """Handle an incoming m.presence EDU. - For each presence update in the "push" list update our local cache and - notify the appropriate local clients. Only clients that share a room - or are directly subscribed to the presence for a user should be - notified of the update. - For each subscription request in the "poll" list start pushing presence - updates to the remote server. - For unsubscribe request in the "unpoll" list stop pushing presence - updates to the remote server. - - Args: - orgin(str): The source of this m.presence EDU. - content(dict): The content of this m.presence EDU. - Returns: - A Deferred. - """ - deferreds = [] - - for push in content.get("push", []): - user = UserID.from_string(push["user_id"]) - - logger.debug("Incoming presence update from %s", user) - - observers = set(self._remote_recvmap.get(user, set())) - if observers: - logger.debug( - " | %d interested local observers %r", len(observers), observers - ) - - room_ids = yield self.get_joined_rooms_for_user(user) - if room_ids: - logger.debug(" | %d interested room IDs %r", len(room_ids), room_ids) - - state = dict(push) - del state["user_id"] - - if "presence" not in state: - logger.warning( - "Received a presence 'push' EDU from %s without a" - " 'presence' key", origin - ) - continue - - if "last_active_ago" in state: - state["last_active"] = int( - self.clock.time_msec() - state.pop("last_active_ago") - ) - - self._user_cachemap_latest_serial += 1 - yield self.update_presence_cache(user, state, room_ids=room_ids) - - if not observers and not room_ids: - logger.debug(" | no interested observers or room IDs") - continue - - self.push_update_to_clients( - users_to_push=observers, room_ids=room_ids - ) - - user_id = user.to_string() - - if state["presence"] == PresenceState.OFFLINE: - self._remote_offline_serials.insert( - 0, - (self._user_cachemap_latest_serial, set([user_id])) - ) - while len(self._remote_offline_serials) > MAX_OFFLINE_SERIALS: - self._remote_offline_serials.pop() # remove the oldest - if user in self._user_cachemap: - del self._user_cachemap[user] - else: - # Remove the user from remote_offline_serials now that they're - # no longer offline - for idx, elem in enumerate(self._remote_offline_serials): - (_, user_ids) = elem - user_ids.discard(user_id) - if not user_ids: - self._remote_offline_serials.pop(idx) - - for poll in content.get("poll", []): - user = UserID.from_string(poll) - - if not self.hs.is_mine(user): - continue - - # TODO(paul) permissions checks - - if user not in self._remote_sendmap: - self._remote_sendmap[user] = set() - - self._remote_sendmap[user].add(origin) - - deferreds.append(self._push_presence_remote(user, origin)) - - for unpoll in content.get("unpoll", []): - user = UserID.from_string(unpoll) - - if not self.hs.is_mine(user): - continue - - if user in self._remote_sendmap: - self._remote_sendmap[user].remove(origin) - - if not self._remote_sendmap[user]: - del self._remote_sendmap[user] - - yield defer.DeferredList(deferreds, consumeErrors=True) - - @defer.inlineCallbacks - def update_presence_cache(self, user, state={}, room_ids=None, - add_to_cache=True): - """Update the presence cache for a user with a new state and bump the - serial to the latest value. - - Args: - user(UserID): The user being updated - state(dict): The presence state being updated - room_ids(None or list of str): A list of room_ids to update. If - room_ids is None then fetch the list of room_ids the user is - joined to. - add_to_cache: Whether to add an entry to the presence cache if the - user isn't already in the cache. - Returns: - A Deferred UserPresenceCache for the user being updated. - """ - if room_ids is None: - room_ids = yield self.get_joined_rooms_for_user(user) - - for room_id in room_ids: - self._room_serials[room_id] = self._user_cachemap_latest_serial - if add_to_cache: - statuscache = self._get_or_make_usercache(user) - else: - statuscache = self._get_or_offline_usercache(user) - statuscache.update(state, serial=self._user_cachemap_latest_serial) - defer.returnValue(statuscache) - - @defer.inlineCallbacks - def push_update_to_local_and_remote(self, observed_user, statuscache, - users_to_push=[], room_ids=[], - remote_domains=[]): - """Notify local clients and remote servers of a change in the presence - of a user. - - Args: - observed_user(UserID): The user to push the presence state for. - statuscache(UserPresenceCache): The cache for the presence state to - push. - users_to_push([UserID]): A list of local and remote users to - notify. - room_ids([str]): Notify the local and remote occupants of these - rooms. - remote_domains([str]): A list of remote servers to notify in - addition to those implied by the users_to_push and the - room_ids. - Returns: - A Deferred. - """ - - localusers, remoteusers = partitionbool( - users_to_push, - lambda u: self.hs.is_mine(u) - ) - - localusers = set(localusers) - - self.push_update_to_clients( - users_to_push=localusers, room_ids=room_ids - ) - - remote_domains = set(remote_domains) - remote_domains |= set([r.domain for r in remoteusers]) - for room_id in room_ids: - remote_domains.update( - (yield self.store.get_joined_hosts_for_room(room_id)) - ) - - remote_domains.discard(self.hs.hostname) - - deferreds = [] - for domain in remote_domains: - logger.debug(" | push to remote domain %s", domain) - deferreds.append( - self._push_presence_remote( - observed_user, domain, state=statuscache.get_state() - ) - ) - - yield defer.DeferredList(deferreds, consumeErrors=True) - - defer.returnValue((localusers, remote_domains)) - - def push_update_to_clients(self, users_to_push=[], room_ids=[]): - """Notify clients of a new presence event. - - Args: - users_to_push([UserID]): List of users to notify. - room_ids([str]): List of room_ids to notify. - """ - with PreserveLoggingContext(): - self.notifier.on_new_event( - "presence_key", - self._user_cachemap_latest_serial, - users_to_push, - room_ids, - ) - - @defer.inlineCallbacks - def _push_presence_remote(self, user, destination, state=None): - """Push a user's presence to a remote server. If a presence state event - that event is sent. Otherwise a new state event is constructed from the - stored presence state. - The last_active is replaced with last_active_ago in case the wallclock - time on the remote server is different to the time on this server. - Sends an EDU to the remote server with the current presence state. - - Args: - user(UserID): The user to push the presence state for. - destination(str): The remote server to send state to. - state(dict): The state to push, or None to use the current stored - state. - Returns: - A Deferred. - """ - if state is None: - state = yield self.store.get_presence_state(user.localpart) - del state["mtime"] - state["presence"] = state.pop("state") - - if user in self._user_cachemap: - state["last_active"] = ( - self._user_cachemap[user].get_state()["last_active"] - ) - - yield collect_presencelike_data(self.distributor, user, state) - - if "last_active" in state: - state = dict(state) - state["last_active_ago"] = int( - self.clock.time_msec() - state.pop("last_active") - ) - - user_state = {"user_id": user.to_string(), } - user_state.update(state) - - yield self.federation.send_edu( - destination=destination, - edu_type="m.presence", - content={"push": [user_state, ], } - ) + defer.returnValue(observer_user.to_string() in accepted_observers) + + +def should_notify(old_state, new_state): + """Decides if a presence state change should be sent to interested parties. + """ + if old_state.status_msg != new_state.status_msg: + return True + + if old_state.state == PresenceState.ONLINE: + if new_state.state != PresenceState.ONLINE: + # Always notify for online -> anything + return True + + if new_state.currently_active != old_state.currently_active: + return True + + if new_state.last_active - old_state.last_active > LAST_ACTIVE_GRANULARITY: + # Always notify for a transition where last active gets bumped. + return True + + if old_state.state != new_state.state: + # Nothing to report. + return True + + return False + + +def _format_user_presence_state(state, now): + """Convert UserPresenceState to a format that can be sent down to clients + and to other servers. + """ + content = { + "presence": state.state, + "user_id": state.user_id, + } + if state.last_active: + content["last_active_ago"] = now - state.last_active + if state.status_msg and state.state != PresenceState.OFFLINE: + content["status_msg"] = state.status_msg + if state.state == PresenceState.ONLINE: + content["currently_active"] = state.currently_active + + return content class PresenceEventSource(object): def __init__(self, hs): self.hs = hs self.clock = hs.get_clock() + self.store = hs.get_datastore() @defer.inlineCallbacks @log_function - def get_new_events(self, user, from_key, room_ids=None, **kwargs): - from_key = int(from_key) + def get_new_events(self, user, from_key, room_ids=None, include_offline=True, + **kwargs): + # The process for getting presence events are: + # 1. Get the rooms the user is in. + # 2. Get the list of user in the rooms. + # 3. Get the list of users that are in the user's presence list. + # 4. If there is a from_key set, cross reference the list of users + # with the `presence_stream_cache` to see which ones we actually + # need to check. + # 5. Load current state for the users. + # + # We don't try and limit the presence updates by the current token, as + # sending down the rare duplicate is not a concern. + + user_id = user.to_string() + if from_key is not None: + from_key = int(from_key) room_ids = room_ids or [] presence = self.hs.get_handlers().presence_handler - cachemap = presence._user_cachemap - max_serial = presence._user_cachemap_latest_serial + if not room_ids: + rooms = yield self.store.get_rooms_for_user(user_id) + room_ids = set(e.room_id for e in rooms) - clock = self.clock - latest_serial = 0 + user_ids_to_check = set() + for room_id in room_ids: + users = yield self.store.get_users_in_room(room_id) + user_ids_to_check.update(users) - user_ids_to_check = {user} - presence_list = yield presence.store.get_presence_list( - user.localpart, accepted=True - ) - if presence_list is not None: - user_ids_to_check |= set( - UserID.from_string(p["observed_user_id"]) for p in presence_list + plist = yield self.store.get_presence_list_accepted(user.localpart) + user_ids_to_check.update([row["observed_user_id"] for row in plist]) + + # Always include yourself. Only really matters for when the user is + # not in any rooms, but still. + user_ids_to_check.add(user_id) + + max_token = self.store.get_current_presence_token() + + if from_key: + user_ids_changed = self.store.presence_stream_cache.get_entities_changed( + user_ids_to_check, from_key, ) - for room_id in set(room_ids) & set(presence._room_serials): - if presence._room_serials[room_id] > from_key: - joined = yield presence.get_joined_users_for_room_id(room_id) - user_ids_to_check |= set(joined) - - updates = [] - for observed_user in user_ids_to_check & set(cachemap): - cached = cachemap[observed_user] - - if cached.serial <= from_key or cached.serial > max_serial: - continue - - latest_serial = max(cached.serial, latest_serial) - updates.append(cached.make_event(user=observed_user, clock=clock)) - - # TODO(paul): limit - - for serial, user_ids in presence._remote_offline_serials: - if serial <= from_key: - break - - if serial > max_serial: - continue - - latest_serial = max(latest_serial, serial) - for u in user_ids: - updates.append({ - "type": "m.presence", - "content": {"user_id": u, "presence": PresenceState.OFFLINE}, - }) - # TODO(paul): For the v2 API we want to tell the client their from_key - # is too old if we fell off the end of the _remote_offline_serials - # list, and get them to invalidate+resync. In v1 we have no such - # concept so this is a best-effort result. - - if updates: - defer.returnValue((updates, latest_serial)) else: - defer.returnValue(([], presence._user_cachemap_latest_serial)) + user_ids_changed = user_ids_to_check + + updates = yield presence.current_state_for_users(user_ids_changed) + + now = self.clock.time_msec() + + defer.returnValue(([ + { + "type": "m.presence", + "content": _format_user_presence_state(s, now), + } + for s in updates.values() + if include_offline or s.state != PresenceState.OFFLINE + ], max_token)) def get_current_key(self): - presence = self.hs.get_handlers().presence_handler - return presence._user_cachemap_latest_serial + return self.store.get_current_presence_token() - @defer.inlineCallbacks def get_pagination_rows(self, user, pagination_config, key): - # TODO (erikj): Does this make sense? Ordering? - - from_key = int(pagination_config.from_key) - - if pagination_config.to_key: - to_key = int(pagination_config.to_key) - else: - to_key = -1 - - presence = self.hs.get_handlers().presence_handler - cachemap = presence._user_cachemap - - user_ids_to_check = {user} - presence_list = yield presence.store.get_presence_list( - user.localpart, accepted=True - ) - if presence_list is not None: - user_ids_to_check |= set( - UserID.from_string(p["observed_user_id"]) for p in presence_list - ) - room_ids = yield presence.get_joined_rooms_for_user(user) - for room_id in set(room_ids) & set(presence._room_serials): - if presence._room_serials[room_id] >= from_key: - joined = yield presence.get_joined_users_for_room_id(room_id) - user_ids_to_check |= set(joined) - - updates = [] - for observed_user in user_ids_to_check & set(cachemap): - if not (to_key < cachemap[observed_user].serial <= from_key): - continue - - updates.append((observed_user, cachemap[observed_user])) - - # TODO(paul): limit - - if updates: - clock = self.clock - - earliest_serial = max([x[1].serial for x in updates]) - data = [x[1].make_event(user=x[0], clock=clock) for x in updates] - - defer.returnValue((data, earliest_serial)) - else: - defer.returnValue(([], 0)) - - -class UserPresenceCache(object): - """Store an observed user's state and status message. - - Includes the update timestamp. - """ - def __init__(self): - self.state = {"presence": PresenceState.OFFLINE} - self.serial = None - - def __repr__(self): - return "UserPresenceCache(state=%r, serial=%r)" % ( - self.state, self.serial - ) - - def update(self, state, serial): - assert("mtime_age" not in state) - - self.state.update(state) - # Delete keys that are now 'None' - for k in self.state.keys(): - if self.state[k] is None: - del self.state[k] - - self.serial = serial - - if "status_msg" in state: - self.status_msg = state["status_msg"] - else: - self.status_msg = None - - def get_state(self): - # clone it so caller can't break our cache - state = dict(self.state) - return state - - def make_event(self, user, clock): - content = self.get_state() - content["user_id"] = user.to_string() - - if "last_active" in content: - content["last_active_ago"] = int( - clock.time_msec() - content.pop("last_active") - ) - - return {"type": "m.presence", "content": content} + return self.get_new_events(user, from_key=None, include_offline=False) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 629e6e3594..7084a7396f 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -49,6 +49,9 @@ class ProfileHandler(BaseHandler): distributor = hs.get_distributor() self.distributor = distributor + distributor.declare("collect_presencelike_data") + distributor.declare("changed_presencelike_data") + distributor.observe("registered_user", self.registered_user) distributor.observe( diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1d0f0058a2..c5c13e085b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -582,6 +582,28 @@ class SyncHandler(BaseHandler): if room_sync: joined.append(room_sync) + # For each newly joined room, we want to send down presence of + # existing users. + presence_handler = self.hs.get_handlers().presence_handler + extra_presence_users = set() + for room_id in newly_joined_rooms: + users = yield self.store.get_users_in_room(event.room_id) + extra_presence_users.update(users) + + # For each new member, send down presence. + for joined_sync in joined: + it = itertools.chain(joined_sync.timeline.events, joined_sync.state.values()) + for event in it: + if event.type == EventTypes.Member: + if event.membership == Membership.JOIN: + extra_presence_users.add(event.state_key) + + states = yield presence_handler.get_states( + [u for u in extra_presence_users if u != user_id], + as_event=True, + ) + presence.extend(states) + account_data_for_user = sync_config.filter_collection.filter_account_data( self.account_data_for_user(account_data) ) diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index a6f8754e32..27ea5f2a43 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -17,7 +17,7 @@ """ from twisted.internet import defer -from synapse.api.errors import SynapseError +from synapse.api.errors import SynapseError, AuthError from synapse.types import UserID from .base import ClientV1RestServlet, client_path_patterns @@ -35,8 +35,15 @@ class PresenceStatusRestServlet(ClientV1RestServlet): requester = yield self.auth.get_user_by_req(request) user = UserID.from_string(user_id) - state = yield self.handlers.presence_handler.get_state( - target_user=user, auth_user=requester.user) + if requester.user != user: + allowed = yield self.handlers.presence_handler.is_visible( + observed_user=user, observer_user=requester.user, + ) + + if not allowed: + raise AuthError(403, "You are allowed to see their presence.") + + state = yield self.handlers.presence_handler.get_state(target_user=user) defer.returnValue((200, state)) @@ -45,6 +52,9 @@ class PresenceStatusRestServlet(ClientV1RestServlet): requester = yield self.auth.get_user_by_req(request) user = UserID.from_string(user_id) + if requester.user != user: + raise AuthError(403, "Can only set your own presence state") + state = {} try: content = json.loads(request.content.read()) @@ -63,8 +73,7 @@ class PresenceStatusRestServlet(ClientV1RestServlet): except: raise SynapseError(400, "Unable to parse state") - yield self.handlers.presence_handler.set_state( - target_user=user, auth_user=requester.user, state=state) + yield self.handlers.presence_handler.set_state(user, state) defer.returnValue((200, {})) @@ -87,11 +96,8 @@ class PresenceListRestServlet(ClientV1RestServlet): raise SynapseError(400, "Cannot get another user's presence list") presence = yield self.handlers.presence_handler.get_presence_list( - observer_user=user, accepted=True) - - for p in presence: - observed_user = p.pop("observed_user") - p["user_id"] = observed_user.to_string() + observer_user=user, accepted=True + ) defer.returnValue((200, presence)) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 24706f9383..a8e89c7fe9 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -304,18 +304,6 @@ class RoomMemberListRestServlet(ClientV1RestServlet): if event["type"] != EventTypes.Member: continue chunk.append(event) - # FIXME: should probably be state_key here, not user_id - target_user = UserID.from_string(event["user_id"]) - # Presence is an optional cache; don't fail if we can't fetch it - try: - presence_handler = self.handlers.presence_handler - presence_state = yield presence_handler.get_state( - target_user=target_user, - auth_user=requester.user, - ) - event["content"].update(presence_state) - except: - pass defer.returnValue((200, { "chunk": chunk @@ -541,6 +529,10 @@ class RoomTypingRestServlet(ClientV1RestServlet): "/rooms/(?P[^/]*)/typing/(?P[^/]*)$" ) + def __init__(self, hs): + super(RoomTypingRestServlet, self).__init__(hs) + self.presence_handler = hs.get_handlers().presence_handler + @defer.inlineCallbacks def on_PUT(self, request, room_id, user_id): requester = yield self.auth.get_user_by_req(request) @@ -552,6 +544,8 @@ class RoomTypingRestServlet(ClientV1RestServlet): typing_handler = self.handlers.typing_notification_handler + yield self.presence_handler.bump_presence_active_time(requester.user) + if content["typing"]: yield typing_handler.started_typing( target_user=target_user, diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index eb4b369a3d..b831d8c95e 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -37,6 +37,7 @@ class ReceiptRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() self.receipts_handler = hs.get_handlers().receipts_handler + self.presence_handler = hs.get_handlers().presence_handler @defer.inlineCallbacks def on_POST(self, request, room_id, receipt_type, event_id): @@ -45,6 +46,8 @@ class ReceiptRestServlet(RestServlet): if receipt_type != "m.read": raise SynapseError(400, "Receipt type must be 'm.read'") + yield self.presence_handler.bump_presence_active_time(requester.user) + yield self.receipts_handler.received_client_receipt( room_id, receipt_type, diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index accbc6cfac..de4a020ad4 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -25,6 +25,7 @@ from synapse.events.utils import ( ) from synapse.api.filtering import FilterCollection, DEFAULT_FILTER_COLLECTION from synapse.api.errors import SynapseError +from synapse.api.constants import PresenceState from ._base import client_v2_patterns import copy @@ -82,6 +83,7 @@ class SyncRestServlet(RestServlet): self.sync_handler = hs.get_handlers().sync_handler self.clock = hs.get_clock() self.filtering = hs.get_filtering() + self.presence_handler = hs.get_handlers().presence_handler @defer.inlineCallbacks def on_GET(self, request): @@ -139,17 +141,19 @@ class SyncRestServlet(RestServlet): else: since_token = None - if set_presence == "online": - yield self.event_stream_handler.started_stream(user) + affect_presence = set_presence != PresenceState.OFFLINE - try: + if affect_presence: + yield self.presence_handler.set_state(user, {"presence": set_presence}) + + context = yield self.presence_handler.user_syncing( + user.to_string(), affect_presence=affect_presence, + ) + with context: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state ) - finally: - if set_presence == "online": - self.event_stream_handler.stopped_stream(user) time_now = self.clock.time_msec() diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 5a9e7720d9..8c3cf9e801 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -20,7 +20,7 @@ from .appservice import ( from ._base import Cache from .directory import DirectoryStore from .events import EventsStore -from .presence import PresenceStore +from .presence import PresenceStore, UserPresenceState from .profile import ProfileStore from .registration import RegistrationStore from .room import RoomStore @@ -47,6 +47,7 @@ from .account_data import AccountDataStore from util.id_generators import IdGenerator, StreamIdGenerator +from synapse.api.constants import PresenceState from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -110,6 +111,9 @@ class DataStore(RoomMemberStore, RoomStore, self._account_data_id_gen = StreamIdGenerator( db_conn, "account_data_max_stream_id", "stream_id" ) + self._presence_id_gen = StreamIdGenerator( + db_conn, "presence_stream", "stream_id" + ) self._transaction_id_gen = IdGenerator("sent_transactions", "id", self) self._state_groups_id_gen = IdGenerator("state_groups", "id", self) @@ -119,7 +123,7 @@ class DataStore(RoomMemberStore, RoomStore, self._push_rule_id_gen = IdGenerator("push_rules", "id", self) self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) - events_max = self._stream_id_gen.get_max_token(None) + events_max = self._stream_id_gen.get_max_token() event_cache_prefill, min_event_val = self._get_cache_dict( db_conn, "events", entity_column="room_id", @@ -135,13 +139,31 @@ class DataStore(RoomMemberStore, RoomStore, "MembershipStreamChangeCache", events_max, ) - account_max = self._account_data_id_gen.get_max_token(None) + account_max = self._account_data_id_gen.get_max_token() self._account_data_stream_cache = StreamChangeCache( "AccountDataAndTagsChangeCache", account_max, ) + self.__presence_on_startup = self._get_active_presence(db_conn) + + presence_cache_prefill, min_presence_val = self._get_cache_dict( + db_conn, "presence_stream", + entity_column="user_id", + stream_column="stream_id", + max_value=self._presence_id_gen.get_max_token(), + ) + self.presence_stream_cache = StreamChangeCache( + "PresenceStreamChangeCache", min_presence_val, + prefilled_cache=presence_cache_prefill + ) + super(DataStore, self).__init__(hs) + def take_presence_startup_info(self): + active_on_startup = self.__presence_on_startup + self.__presence_on_startup = None + return active_on_startup + def _get_cache_dict(self, db_conn, table, entity_column, stream_column, max_value): # Fetch a mapping of room_id -> max stream position for "recent" rooms. # It doesn't really matter how many we get, the StreamChangeCache will @@ -161,6 +183,7 @@ class DataStore(RoomMemberStore, RoomStore, txn = db_conn.cursor() txn.execute(sql, (int(max_value),)) rows = txn.fetchall() + txn.close() cache = { row[0]: int(row[1]) @@ -174,6 +197,27 @@ class DataStore(RoomMemberStore, RoomStore, return cache, min_val + def _get_active_presence(self, db_conn): + """Fetch non-offline presence from the database so that we can register + the appropriate time outs. + """ + + sql = ( + "SELECT user_id, state, last_active, last_federation_update," + " last_user_sync, status_msg, currently_active FROM presence_stream" + " WHERE state != ?" + ) + sql = self.database_engine.convert_param_style(sql) + + txn = db_conn.cursor() + txn.execute(sql, (PresenceState.OFFLINE,)) + rows = self.cursor_to_dict(txn) + + for row in rows: + row["currently_active"] = bool(row["currently_active"]) + + return [UserPresenceState(**row) for row in rows] + @defer.inlineCallbacks def insert_client_ip(self, user, access_token, ip, user_agent): now = int(self._clock.time_msec()) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 850736c85e..0fd5d497ab 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 29 +SCHEMA_VERSION = 30 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index ef525f34c5..b133979102 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -14,73 +14,128 @@ # limitations under the License. from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cached, cachedList +from synapse.api.constants import PresenceState +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +from collections import namedtuple from twisted.internet import defer +class UserPresenceState(namedtuple("UserPresenceState", + ("user_id", "state", "last_active", "last_federation_update", + "last_user_sync", "status_msg", "currently_active"))): + """Represents the current presence state of the user. + + user_id (str) + last_active (int): Time in msec that the user last interacted with server. + last_federation_update (int): Time in msec since either a) we sent a presence + update to other servers or b) we received a presence update, depending + on if is a local user or not. + last_user_sync (int): Time in msec that the user last *completed* a sync + (or event stream). + status_msg (str): User set status message. + """ + + def copy_and_replace(self, **kwargs): + return self._replace(**kwargs) + + @classmethod + def default(cls, user_id): + """Returns a default presence state. + """ + return cls( + user_id=user_id, + state=PresenceState.OFFLINE, + last_active=0, + last_federation_update=0, + last_user_sync=0, + status_msg=None, + currently_active=False, + ) + + class PresenceStore(SQLBaseStore): - def create_presence(self, user_localpart): - res = self._simple_insert( - table="presence", - values={"user_id": user_localpart}, - desc="create_presence", + @defer.inlineCallbacks + def update_presence(self, presence_states): + stream_id_manager = yield self._presence_id_gen.get_next(self) + with stream_id_manager as stream_id: + yield self.runInteraction( + "update_presence", + self._update_presence_txn, stream_id, presence_states, + ) + + defer.returnValue((stream_id, self._presence_id_gen.get_max_token())) + + def _update_presence_txn(self, txn, stream_id, presence_states): + for state in presence_states: + txn.call_after( + self.presence_stream_cache.entity_has_changed, + state.user_id, stream_id, + ) + + # Actually insert new rows + self._simple_insert_many_txn( + txn, + table="presence_stream", + values=[ + { + "stream_id": stream_id, + "user_id": state.user_id, + "state": state.state, + "last_active": state.last_active, + "last_federation_update": state.last_federation_update, + "last_user_sync": state.last_user_sync, + "status_msg": state.status_msg, + "currently_active": state.currently_active, + } + for state in presence_states + ], ) - self.get_presence_state.invalidate((user_localpart,)) - return res - - def has_presence_state(self, user_localpart): - return self._simple_select_one( - table="presence", - keyvalues={"user_id": user_localpart}, - retcols=["user_id"], - allow_none=True, - desc="has_presence_state", + # Delete old rows to stop database from getting really big + sql = ( + "DELETE FROM presence_stream WHERE" + " stream_id < ?" + " AND user_id IN (%s)" ) - @cached(max_entries=2000) - def get_presence_state(self, user_localpart): - return self._simple_select_one( - table="presence", - keyvalues={"user_id": user_localpart}, - retcols=["state", "status_msg", "mtime"], - desc="get_presence_state", + batches = ( + presence_states[i:i + 50] + for i in xrange(0, len(presence_states), 50) ) - - @cachedList(get_presence_state.cache, list_name="user_localparts", - inlineCallbacks=True) - def get_presence_states(self, user_localparts): - rows = yield self._simple_select_many_batch( - table="presence", - column="user_id", - iterable=user_localparts, - retcols=("user_id", "state", "status_msg", "mtime",), - desc="get_presence_states", - ) - - defer.returnValue({ - row["user_id"]: { - "state": row["state"], - "status_msg": row["status_msg"], - "mtime": row["mtime"], - } - for row in rows - }) + for states in batches: + args = [stream_id] + args.extend(s.user_id for s in states) + txn.execute( + sql % (",".join("?" for _ in states),), + args + ) @defer.inlineCallbacks - def set_presence_state(self, user_localpart, new_state): - res = yield self._simple_update_one( - table="presence", - keyvalues={"user_id": user_localpart}, - updatevalues={"state": new_state["state"], - "status_msg": new_state["status_msg"], - "mtime": self._clock.time_msec()}, - desc="set_presence_state", + def get_presence_for_users(self, user_ids): + rows = yield self._simple_select_many_batch( + table="presence_stream", + column="user_id", + iterable=user_ids, + keyvalues={}, + retcols=( + "user_id", + "state", + "last_active", + "last_federation_update", + "last_user_sync", + "status_msg", + "currently_active", + ), ) - self.get_presence_state.invalidate((user_localpart,)) - defer.returnValue(res) + for row in rows: + row["currently_active"] = bool(row["currently_active"]) + + defer.returnValue([UserPresenceState(**row) for row in rows]) + + def get_current_presence_token(self): + return self._presence_id_gen.get_max_token() def allow_presence_visible(self, observed_localpart, observer_userid): return self._simple_insert( @@ -128,6 +183,7 @@ class PresenceStore(SQLBaseStore): desc="set_presence_list_accepted", ) self.get_presence_list_accepted.invalidate((observer_localpart,)) + self.get_presence_list_observers_accepted.invalidate((observed_userid,)) defer.returnValue(result) def get_presence_list(self, observer_localpart, accepted=None): @@ -154,6 +210,19 @@ class PresenceStore(SQLBaseStore): desc="get_presence_list_accepted", ) + @cachedInlineCallbacks() + def get_presence_list_observers_accepted(self, observed_userid): + user_localparts = yield self._simple_select_onecol( + table="presence_list", + keyvalues={"observed_user_id": observed_userid, "accepted": True}, + retcol="user_id", + desc="get_presence_list_accepted", + ) + + defer.returnValue([ + "@%s:%s" % (u, self.hs.hostname,) for u in user_localparts + ]) + @defer.inlineCallbacks def del_presence_list(self, observer_localpart, observed_userid): yield self._simple_delete_one( @@ -163,3 +232,4 @@ class PresenceStore(SQLBaseStore): desc="del_presence_list", ) self.get_presence_list_accepted.invalidate((observer_localpart,)) + self.get_presence_list_observers_accepted.invalidate((observed_userid,)) diff --git a/synapse/storage/schema/delta/30/presence_stream.sql b/synapse/storage/schema/delta/30/presence_stream.sql new file mode 100644 index 0000000000..14f5e3d30a --- /dev/null +++ b/synapse/storage/schema/delta/30/presence_stream.sql @@ -0,0 +1,30 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + CREATE TABLE presence_stream( + stream_id BIGINT, + user_id TEXT, + state TEXT, + last_active BIGINT, + last_federation_update BIGINT, + last_user_sync BIGINT, + status_msg TEXT, + currently_active BOOLEAN + ); + + CREATE INDEX presence_stream_id ON presence_stream(stream_id, user_id); + CREATE INDEX presence_stream_user_id ON presence_stream(user_id); + CREATE INDEX presence_stream_state ON presence_stream(state); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 5c522f4ab9..5ce54f76de 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -130,9 +130,11 @@ class StreamIdGenerator(object): return manager() - def get_max_token(self, store): + def get_max_token(self, *args): """Returns the maximum stream id such that all stream ids less than or equal to it have been successfully persisted. + + Used to take a DataStore param, which is no longer needed. """ with self._lock: if self._unfinished_ids: diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 133671e238..3b9da5b34a 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -42,7 +42,7 @@ class Clock(object): def time_msec(self): """Returns the current system time in miliseconds since epoch.""" - return self.time() * 1000 + return int(self.time() * 1000) def looping_call(self, f, msec): l = task.LoopingCall(f) diff --git a/tests/utils.py b/tests/utils.py index 3b1eb50d8d..f71125042b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -224,12 +224,12 @@ class MockClock(object): def time_msec(self): return self.time() * 1000 - def call_later(self, delay, callback): + def call_later(self, delay, callback, *args, **kwargs): current_context = LoggingContext.current_context() def wrapped_callback(): LoggingContext.thread_local.current_context = current_context - callback() + callback(*args, **kwargs) t = [self.now + delay, wrapped_callback, False] self.timers.append(t) From c229c87398963ef03a82fb674e9725a70b684aa7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Feb 2016 15:48:29 +0000 Subject: [PATCH 180/349] Remove spurious comment --- synapse/handlers/presence.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 26f2e669ce..38d2ecc43a 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -810,7 +810,6 @@ def should_notify(old_state, new_state): return True if old_state.state != new_state.state: - # Nothing to report. return True return False From 591af2d074044a70a48b033c4dfc322f58189d3e Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Wed, 17 Feb 2016 15:50:13 +0000 Subject: [PATCH 181/349] Some cleanup I'm not particularly happy with the "action" switching, but there's no convenient way to defer the work that needs to happen after it, so... :( --- synapse/handlers/room.py | 120 +++++++++++++++------------------ synapse/rest/client/v1/room.py | 6 +- 2 files changed, 59 insertions(+), 67 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f85a5f2677..cd04ac09fa 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -397,7 +397,7 @@ class RoomMemberHandler(BaseHandler): room_id, action, txn_id=None, - room_hosts=None, + remote_room_hosts=None, ratelimit=True, ): effective_membership_state = action @@ -448,7 +448,7 @@ class RoomMemberHandler(BaseHandler): context, is_guest=requester.is_guest, ratelimit=ratelimit, - room_hosts=room_hosts, + remote_room_hosts=remote_room_hosts, from_client=True, ) @@ -461,11 +461,12 @@ class RoomMemberHandler(BaseHandler): event, context, is_guest=False, - room_hosts=None, + remote_room_hosts=None, ratelimit=True, from_client=True, ): - """ Change the membership status of a user in a room. + """ + Change the membership status of a user in a room. Args: event (SynapseEvent): The membership event. @@ -482,78 +483,64 @@ class RoomMemberHandler(BaseHandler): Raises: SynapseError if there was a problem changing the membership. """ - user = UserID.from_string(event.sender) + target_user = UserID.from_string(event.state_key) + room_id = event.room_id if from_client: - assert self.hs.is_mine(user), "User must be our own: %s" % (user,) + sender = UserID.from_string(event.sender) + assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,) if event.is_state(): message_handler = self.hs.get_handlers().message_handler - prev_state = message_handler.deduplicate_state_event(event, context) - if prev_state is not None: + prev_event = message_handler.deduplicate_state_event(event, context) + if prev_event is not None: return - target_user = UserID.from_string(event.state_key) - - prev_state = context.current_state.get( - (EventTypes.Member, target_user.to_string()), - None - ) - - room_id = event.room_id - - handled = False + action = "send" if event.membership == Membership.JOIN: if is_guest and not self._can_guest_join(context.current_state): # This should be an auth check, but guests are a local concept, # so don't really fit into the general auth process. raise AuthError(403, "Guest access not allowed") - - should_do_dance, room_hosts = self._should_do_dance( + do_remote_join_dance, remote_room_hosts = self._should_do_dance( context, - (self.get_inviter(target_user.to_string(), context.current_state)), - room_hosts, + (self.get_inviter(event.state_key, context.current_state)), + remote_room_hosts, ) - - if should_do_dance: - if len(room_hosts) == 0: - # return the same error as join_room_alias does - raise SynapseError(404, "No known servers") - - # We don't do an auth check if we are doing an invite - # join dance for now, since we're kinda implicitly checking - # that we are allowed to join when we decide whether or not we - # need to do the invite/join dance. - yield self.hs.get_handlers().federation_handler.do_invite_join( - room_hosts, - room_id, - event.user_id, - event.content, - ) - handled = True - if event.membership == Membership.LEAVE: + if do_remote_join_dance: + action = "remote_join" + elif event.membership == Membership.LEAVE: is_host_in_room = self.is_host_in_room(context.current_state) if not is_host_in_room: - # Rejecting an invite, rather than leaving a joined room - handler = self.hs.get_handlers().federation_handler - inviter = self.get_inviter(target_user.to_string(), context.current_state) - if not inviter: - # return the same error as join_room_alias does - raise SynapseError(404, "No known servers") - yield handler.do_remotely_reject_invite( - [inviter.domain], - room_id, - event.user_id - ) - handled = True + action = "remote_reject" - # FIXME: This isn't idempotency. - if prev_state and prev_state.membership == event.membership: - # double same action, treat this event as a NOOP. - return + federation_handler = self.hs.get_handlers().federation_handler - if not handled: + if action == "remote_join": + if len(remote_room_hosts) == 0: + raise SynapseError(404, "No known servers") + + # We don't do an auth check if we are doing an invite + # join dance for now, since we're kinda implicitly checking + # that we are allowed to join when we decide whether or not we + # need to do the invite/join dance. + yield federation_handler.do_invite_join( + remote_room_hosts, + event.room_id, + event.user_id, + event.content, + ) + elif action == "remote_reject": + inviter = self.get_inviter(target_user.to_string(), context.current_state) + if not inviter: + raise SynapseError(404, "No known servers") + yield federation_handler.do_remotely_reject_invite( + [inviter.domain], + room_id, + event.user_id + ) + else: yield self.handle_new_client_event( event, context, @@ -561,14 +548,19 @@ class RoomMemberHandler(BaseHandler): ratelimit=ratelimit, ) + prev_member_event = context.current_state.get( + (EventTypes.Member, target_user.to_string()), + None + ) + if event.membership == Membership.JOIN: - if not prev_state or prev_state.membership != Membership.JOIN: + if not prev_member_event or prev_member_event.membership != Membership.JOIN: # Only fire user_joined_room if the user has acutally joined the # room. Don't bother if the user is just changing their profile # info. yield user_joined_room(self.distributor, target_user, room_id) elif event.membership == Membership.LEAVE: - if prev_state and prev_state.membership == Membership.JOIN: + if prev_member_event and prev_member_event.membership == Membership.JOIN: user_left_room(self.distributor, target_user, room_id) def _can_guest_join(self, current_state): @@ -604,7 +596,9 @@ class RoomMemberHandler(BaseHandler): Args: room_alias (RoomAlias): The alias to look up. Returns: - The room ID as a RoomID object. + A tuple of: + The room ID as a RoomID object. + Hosts likely to be participating in the room ([str]). Raises: SynapseError if room alias could not be found. """ @@ -615,11 +609,9 @@ class RoomMemberHandler(BaseHandler): raise SynapseError(404, "No such room alias") room_id = mapping["room_id"] - hosts = mapping["servers"] - if not hosts: - raise SynapseError(404, "No known servers") + servers = mapping["servers"] - defer.returnValue((RoomID.from_string(room_id), hosts)) + defer.returnValue((RoomID.from_string(room_id), servers)) def get_inviter(self, user_id, current_state): prev_state = current_state.get((EventTypes.Member, user_id)) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 179fe9a010..1f5ee09dcc 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -230,11 +230,11 @@ class JoinRoomAliasServlet(ClientV1RestServlet): if RoomID.is_valid(room_identifier): room_id = room_identifier - room_hosts = None + remote_room_hosts = None elif RoomAlias.is_valid(room_identifier): handler = self.handlers.room_member_handler room_alias = RoomAlias.from_string(room_identifier) - room_id, room_hosts = yield handler.lookup_room_alias(room_alias) + room_id, remote_room_hosts = yield handler.lookup_room_alias(room_alias) room_id = room_id.to_string() else: raise SynapseError(400, "%s was not legal room ID or room alias" % ( @@ -247,7 +247,7 @@ class JoinRoomAliasServlet(ClientV1RestServlet): room_id=room_id, action="join", txn_id=txn_id, - room_hosts=room_hosts, + remote_room_hosts=remote_room_hosts, ) defer.returnValue((200, {"room_id": room_id})) From 58371fa263ce6faad3a96c202b226ced32e8e84e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 09:09:50 +0000 Subject: [PATCH 182/349] Comment --- synapse/handlers/presence.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 38d2ecc43a..3b01e9a072 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -775,6 +775,8 @@ class PresenceHandler(BaseHandler): @defer.inlineCallbacks def is_visible(self, observed_user, observer_user): + """Returns whether a user can see another user's presence. + """ observer_rooms = yield self.store.get_rooms_for_user(observer_user.to_string()) observed_rooms = yield self.store.get_rooms_for_user(observed_user.to_string()) From ddca9c56fccfb03653986b36fe6cd11cf57328dc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 09:11:42 +0000 Subject: [PATCH 183/349] Move if statement --- synapse/handlers/presence.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 3b01e9a072..1942e2ad30 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -298,10 +298,11 @@ class PresenceHandler(BaseHandler): if not state: continue - if self.hs.is_mine_id(user_id): - if state.state == PresenceState.OFFLINE: - continue + if state.state == PresenceState.OFFLINE: + # No timeouts are associated with offline states. + continue + if self.hs.is_mine_id(user_id): if state.state == PresenceState.ONLINE: if now - state.last_active > IDLE_TIMER: # Currently online, but last activity ages ago so auto @@ -331,11 +332,10 @@ class PresenceHandler(BaseHandler): # This is to protect against forgetful/buggy servers, so that # no one gets stuck online forever. if now - state.last_federation_update > FEDERATION_TIMEOUT: - if state.state != PresenceState.OFFLINE: - # The other side seems to have disappeared. - changes[user_id] = state.copy_and_replace( - state=PresenceState.OFFLINE, - ) + # The other side seems to have disappeared. + changes[user_id] = state.copy_and_replace( + state=PresenceState.OFFLINE, + ) preserve_fn(self._update_states)(changes.values()) From 114b929f8bdd3dcc165fd488c087aaca3dc8bd91 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 09:16:32 +0000 Subject: [PATCH 184/349] Check presence state is a valid one --- synapse/api/constants.py | 1 - synapse/handlers/presence.py | 6 ++++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 84cbe710b3..8cf4d6169c 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -32,7 +32,6 @@ class PresenceState(object): OFFLINE = u"offline" UNAVAILABLE = u"unavailable" ONLINE = u"online" - FREE_FOR_CHAT = u"free_for_chat" class JoinRules(object): diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 1942e2ad30..439bfe5919 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -606,6 +606,12 @@ class PresenceHandler(BaseHandler): status_msg = state.get("status_msg", None) presence = state["presence"] + valid_presence = ( + PresenceState.ONLINE, PresenceState.UNAVAILABLE, PresenceState.OFFLINE + ) + if presence not in valid_presence: + raise SynapseError(400, "Invalid presence state") + user_id = target_user.to_string() prev_state = yield self.current_state_for_user(user_id) From b31ec214a5c2bd674814b2d052200963c5e5cbd7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 09:54:08 +0000 Subject: [PATCH 185/349] Remove status_msg when going offline. Don't offline -> online if you send a message --- synapse/handlers/presence.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 439bfe5919..3137c23509 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -208,6 +208,11 @@ class PresenceHandler(BaseHandler): to_federation_ping = {} # These need sending keep-alives for new_state in new_states: user_id = new_state.user_id + + # Its fine to not hit the database here, as the only thing not in + # the current state cache are OFFLINE states, where the only field + # of interest is last_active which is safe enough to assume is 0 + # here. prev_state = self.user_to_current_state.get( user_id, UserPresenceState.default(user_id) ) @@ -326,6 +331,7 @@ class PresenceHandler(BaseHandler): if now - state.last_user_sync > SYNC_ONLINE_TIMEOUT: changes[user_id] = state.copy_and_replace( state=PresenceState.OFFLINE, + status_msg=None, ) else: # We expect to be poked occaisonally by the other side. @@ -335,6 +341,7 @@ class PresenceHandler(BaseHandler): # The other side seems to have disappeared. changes[user_id] = state.copy_and_replace( state=PresenceState.OFFLINE, + status_msg=None, ) preserve_fn(self._update_states)(changes.values()) @@ -348,10 +355,13 @@ class PresenceHandler(BaseHandler): prev_state = yield self.current_state_for_user(user_id) - yield self._update_states([prev_state.copy_and_replace( - state=PresenceState.ONLINE, - last_active=self.clock.time_msec(), - )]) + new_fields = { + "last_active": self.clock.time_msec(), + } + if prev_state.state == PresenceState.UNAVAILABLE: + new_fields["state"] = PresenceState.ONLINE + + yield self._update_states([prev_state.copy_and_replace(**new_fields)]) @defer.inlineCallbacks def user_syncing(self, user_id, affect_presence=True): @@ -618,7 +628,7 @@ class PresenceHandler(BaseHandler): new_fields = { "state": presence, - "status_msg": status_msg + "status_msg": status_msg if presence != PresenceState.OFFLINE else None } if presence == PresenceState.ONLINE: From 112283e23005bdaa17b6184cb55fd786facff47d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 10:11:43 +0000 Subject: [PATCH 186/349] Prefix TS fields with _ts --- synapse/handlers/presence.py | 54 +++++++++---------- synapse/storage/__init__.py | 4 +- synapse/storage/presence.py | 23 ++++---- .../schema/delta/30/presence_stream.sql | 6 +-- 4 files changed, 44 insertions(+), 43 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 3137c23509..d296953651 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -127,24 +127,24 @@ class PresenceHandler(BaseHandler): self.wheel_timer.insert( now=now, obj=state.user_id, - then=state.last_active + IDLE_TIMER, + then=state.last_active_ts + IDLE_TIMER, ) self.wheel_timer.insert( now=now, obj=state.user_id, - then=state.last_user_sync + SYNC_ONLINE_TIMEOUT, + then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT, ) if self.hs.is_mine_id(state.user_id): self.wheel_timer.insert( now=now, obj=state.user_id, - then=state.last_federation_update + FEDERATION_PING_INTERVAL, + then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL, ) else: self.wheel_timer.insert( now=now, obj=state.user_id, - then=state.last_federation_update + FEDERATION_TIMEOUT, + then=state.last_federation_update_ts + FEDERATION_TIMEOUT, ) # Set of users who have presence in the `user_to_current_state` that @@ -225,7 +225,7 @@ class PresenceHandler(BaseHandler): self.wheel_timer.insert( now=now, obj=user_id, - then=new_state.last_active + IDLE_TIMER + then=new_state.last_active_ts + IDLE_TIMER ) if new_state.state != PresenceState.OFFLINE: @@ -233,14 +233,14 @@ class PresenceHandler(BaseHandler): self.wheel_timer.insert( now=now, obj=user_id, - then=new_state.last_user_sync + SYNC_ONLINE_TIMEOUT + then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT ) - last_federate = new_state.last_federation_update + last_federate = new_state.last_federation_update_ts if now - last_federate > FEDERATION_PING_INTERVAL: # Been a while since we've poked remote servers new_state = new_state.copy_and_replace( - last_federation_update=now, + last_federation_update_ts=now, ) to_federation_ping[user_id] = new_state @@ -248,11 +248,11 @@ class PresenceHandler(BaseHandler): self.wheel_timer.insert( now=now, obj=user_id, - then=new_state.last_federation_update + FEDERATION_TIMEOUT + then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT ) if new_state.state == PresenceState.ONLINE: - currently_active = now - new_state.last_active < LAST_ACTIVE_GRANULARITY + currently_active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY new_state = new_state.copy_and_replace( currently_active=currently_active, ) @@ -260,7 +260,7 @@ class PresenceHandler(BaseHandler): # Check whether the change was something worth notifying about if should_notify(prev_state, new_state): new_state.copy_and_replace( - last_federation_update=now, + last_federation_update_ts=now, ) to_notify[user_id] = new_state @@ -309,18 +309,18 @@ class PresenceHandler(BaseHandler): if self.hs.is_mine_id(user_id): if state.state == PresenceState.ONLINE: - if now - state.last_active > IDLE_TIMER: + if now - state.last_active_ts > IDLE_TIMER: # Currently online, but last activity ages ago so auto # idle changes[user_id] = state.copy_and_replace( state=PresenceState.UNAVAILABLE, ) - elif now - state.last_active > LAST_ACTIVE_GRANULARITY: + elif now - state.last_active_ts > LAST_ACTIVE_GRANULARITY: # So that we send down a notification that we've # stopped updating. changes[user_id] = state - if now - state.last_federation_update > FEDERATION_PING_INTERVAL: + if now - state.last_federation_update_ts > FEDERATION_PING_INTERVAL: # Need to send ping to other servers to ensure they don't # timeout and set us to offline changes[user_id] = state @@ -328,7 +328,7 @@ class PresenceHandler(BaseHandler): # If there are have been no sync for a while (and none ongoing), # set presence to offline if not self.user_to_num_current_syncs.get(user_id, 0): - if now - state.last_user_sync > SYNC_ONLINE_TIMEOUT: + if now - state.last_user_sync_ts > SYNC_ONLINE_TIMEOUT: changes[user_id] = state.copy_and_replace( state=PresenceState.OFFLINE, status_msg=None, @@ -337,7 +337,7 @@ class PresenceHandler(BaseHandler): # We expect to be poked occaisonally by the other side. # This is to protect against forgetful/buggy servers, so that # no one gets stuck online forever. - if now - state.last_federation_update > FEDERATION_TIMEOUT: + if now - state.last_federation_update_ts > FEDERATION_TIMEOUT: # The other side seems to have disappeared. changes[user_id] = state.copy_and_replace( state=PresenceState.OFFLINE, @@ -356,7 +356,7 @@ class PresenceHandler(BaseHandler): prev_state = yield self.current_state_for_user(user_id) new_fields = { - "last_active": self.clock.time_msec(), + "last_active_ts": self.clock.time_msec(), } if prev_state.state == PresenceState.UNAVAILABLE: new_fields["state"] = PresenceState.ONLINE @@ -388,12 +388,12 @@ class PresenceHandler(BaseHandler): # just update the last sync times. yield self._update_states([prev_state.copy_and_replace( state=PresenceState.ONLINE, - last_active=self.clock.time_msec(), - last_user_sync=self.clock.time_msec(), + last_active_ts=self.clock.time_msec(), + last_user_sync_ts=self.clock.time_msec(), )]) else: yield self._update_states([prev_state.copy_and_replace( - last_user_sync=self.clock.time_msec(), + last_user_sync_ts=self.clock.time_msec(), )]) @defer.inlineCallbacks @@ -403,7 +403,7 @@ class PresenceHandler(BaseHandler): prev_state = yield self.current_state_for_user(user_id) yield self._update_states([prev_state.copy_and_replace( - last_user_sync=self.clock.time_msec(), + last_user_sync_ts=self.clock.time_msec(), )]) @contextmanager @@ -553,12 +553,12 @@ class PresenceHandler(BaseHandler): new_fields = { "state": presence_state, - "last_federation_update": now, + "last_federation_update_ts": now, } last_active_ago = push.get("last_active_ago", None) if last_active_ago is not None: - new_fields["last_active"] = now - last_active_ago + new_fields["last_active_ts"] = now - last_active_ago new_fields["status_msg"] = push.get("status_msg", None) @@ -632,7 +632,7 @@ class PresenceHandler(BaseHandler): } if presence == PresenceState.ONLINE: - new_fields["last_active"] = self.clock.time_msec() + new_fields["last_active_ts"] = self.clock.time_msec() yield self._update_states([prev_state.copy_and_replace(**new_fields)]) @@ -823,7 +823,7 @@ def should_notify(old_state, new_state): if new_state.currently_active != old_state.currently_active: return True - if new_state.last_active - old_state.last_active > LAST_ACTIVE_GRANULARITY: + if new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY: # Always notify for a transition where last active gets bumped. return True @@ -841,8 +841,8 @@ def _format_user_presence_state(state, now): "presence": state.state, "user_id": state.user_id, } - if state.last_active: - content["last_active_ago"] = now - state.last_active + if state.last_active_ts: + content["last_active_ago"] = now - state.last_active_ts if state.status_msg and state.state != PresenceState.OFFLINE: content["status_msg"] = state.status_msg if state.state == PresenceState.ONLINE: diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 8c3cf9e801..fcb968e8f4 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -203,8 +203,8 @@ class DataStore(RoomMemberStore, RoomStore, """ sql = ( - "SELECT user_id, state, last_active, last_federation_update," - " last_user_sync, status_msg, currently_active FROM presence_stream" + "SELECT user_id, state, last_active_ts, last_federation_update_ts," + " last_user_sync_ts, status_msg, currently_active FROM presence_stream" " WHERE state != ?" ) sql = self.database_engine.convert_param_style(sql) diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index b133979102..70ece56548 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -22,8 +22,9 @@ from twisted.internet import defer class UserPresenceState(namedtuple("UserPresenceState", - ("user_id", "state", "last_active", "last_federation_update", - "last_user_sync", "status_msg", "currently_active"))): + ("user_id", "state", "last_active_ts", + "last_federation_update_ts", "last_user_sync_ts", + "status_msg", "currently_active"))): """Represents the current presence state of the user. user_id (str) @@ -46,9 +47,9 @@ class UserPresenceState(namedtuple("UserPresenceState", return cls( user_id=user_id, state=PresenceState.OFFLINE, - last_active=0, - last_federation_update=0, - last_user_sync=0, + last_active_ts=0, + last_federation_update_ts=0, + last_user_sync_ts=0, status_msg=None, currently_active=False, ) @@ -82,9 +83,9 @@ class PresenceStore(SQLBaseStore): "stream_id": stream_id, "user_id": state.user_id, "state": state.state, - "last_active": state.last_active, - "last_federation_update": state.last_federation_update, - "last_user_sync": state.last_user_sync, + "last_active_ts": state.last_active_ts, + "last_federation_update_ts": state.last_federation_update_ts, + "last_user_sync_ts": state.last_user_sync_ts, "status_msg": state.status_msg, "currently_active": state.currently_active, } @@ -121,9 +122,9 @@ class PresenceStore(SQLBaseStore): retcols=( "user_id", "state", - "last_active", - "last_federation_update", - "last_user_sync", + "last_active_ts", + "last_federation_update_ts", + "last_user_sync_ts", "status_msg", "currently_active", ), diff --git a/synapse/storage/schema/delta/30/presence_stream.sql b/synapse/storage/schema/delta/30/presence_stream.sql index 14f5e3d30a..606bbb037d 100644 --- a/synapse/storage/schema/delta/30/presence_stream.sql +++ b/synapse/storage/schema/delta/30/presence_stream.sql @@ -18,9 +18,9 @@ stream_id BIGINT, user_id TEXT, state TEXT, - last_active BIGINT, - last_federation_update BIGINT, - last_user_sync BIGINT, + last_active_ts BIGINT, + last_federation_update_ts BIGINT, + last_user_sync_ts BIGINT, status_msg TEXT, currently_active BOOLEAN ); From 8351538873ff9f43122756c83beeff0bee5375be Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 10:12:12 +0000 Subject: [PATCH 187/349] PEP8 --- synapse/handlers/presence.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index d296953651..4b3d037453 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -252,9 +252,9 @@ class PresenceHandler(BaseHandler): ) if new_state.state == PresenceState.ONLINE: - currently_active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY + active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY new_state = new_state.copy_and_replace( - currently_active=currently_active, + currently_active=active, ) # Check whether the change was something worth notifying about From fe95f2217cba3390c367d2bc429abd8f829987d2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 10:26:24 +0000 Subject: [PATCH 188/349] Add stuff pulled from the DB to the cache --- synapse/handlers/presence.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 4b3d037453..8831d83c56 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -443,10 +443,12 @@ class PresenceHandler(BaseHandler): missing = [user_id for user_id, state in states.items() if not state] if missing: - states.update({ + new = { user_id: UserPresenceState.default(user_id) for user_id in missing - }) + } + states.update(new) + self.user_to_current_state.update(new) defer.returnValue(states) From 9da9826b858bc4fdaf6c6f7f8382043ce151cdf3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 10:46:16 +0000 Subject: [PATCH 189/349] Remove old tests. --- tests/handlers/test_presence.py | 1340 ------------------------- tests/handlers/test_presencelike.py | 311 ------ tests/handlers/test_profile.py | 3 - tests/rest/client/v1/test_presence.py | 412 -------- tests/rest/client/v1/test_rooms.py | 6 - tests/storage/test_presence.py | 26 - 6 files changed, 2098 deletions(-) delete mode 100644 tests/handlers/test_presence.py delete mode 100644 tests/handlers/test_presencelike.py delete mode 100644 tests/rest/client/v1/test_presence.py diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py deleted file mode 100644 index 447a22b5fc..0000000000 --- a/tests/handlers/test_presence.py +++ /dev/null @@ -1,1340 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from tests import unittest -from twisted.internet import defer, reactor - -from mock import Mock, call, ANY, NonCallableMock -import json - -from tests.utils import ( - MockHttpResource, MockClock, DeferredMockCallable, setup_test_homeserver -) - -from synapse.api.constants import PresenceState -from synapse.api.errors import SynapseError -from synapse.handlers.presence import PresenceHandler, UserPresenceCache -from synapse.streams.config import SourcePaginationConfig -from synapse.types import UserID - -OFFLINE = PresenceState.OFFLINE -UNAVAILABLE = PresenceState.UNAVAILABLE -ONLINE = PresenceState.ONLINE - - -def _expect_edu(destination, edu_type, content, origin="test"): - return { - "origin": origin, - "origin_server_ts": 1000000, - "pdus": [], - "edus": [ - { - "edu_type": edu_type, - "content": content, - } - ], - "pdu_failures": [], - } - -def _make_edu_json(origin, edu_type, content): - return json.dumps(_expect_edu("test", edu_type, content, origin=origin)) - - -class JustPresenceHandlers(object): - def __init__(self, hs): - self.presence_handler = PresenceHandler(hs) - - -class PresenceTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - self.clock = MockClock() - - self.mock_federation_resource = MockHttpResource() - - self.mock_http_client = Mock(spec=[]) - self.mock_http_client.put_json = DeferredMockCallable() - - hs_kwargs = {} - if hasattr(self, "make_datastore_mock"): - hs_kwargs["datastore"] = self.make_datastore_mock() - - hs = yield setup_test_homeserver( - clock=self.clock, - handlers=None, - resource_for_federation=self.mock_federation_resource, - http_client=self.mock_http_client, - keyring=Mock(), - **hs_kwargs - ) - hs.handlers = JustPresenceHandlers(hs) - - self.datastore = hs.get_datastore() - - self.setUp_roommemberhandler_mocks(hs.handlers) - - self.handler = hs.get_handlers().presence_handler - self.event_source = hs.get_event_sources().sources["presence"] - - self.distributor = hs.get_distributor() - self.distributor.declare("user_joined_room") - - yield self.setUp_users(hs) - - def setUp_roommemberhandler_mocks(self, handlers): - self.room_id = "a-room" - self.room_members = [] - - room_member_handler = handlers.room_member_handler = Mock(spec=[ - "get_joined_rooms_for_user", - "get_room_members", - "fetch_room_distributions_into", - ]) - self.room_member_handler = room_member_handler - - def get_rooms_for_user(user): - if user in self.room_members: - return defer.succeed([self.room_id]) - else: - return defer.succeed([]) - room_member_handler.get_joined_rooms_for_user = get_rooms_for_user - - def get_room_members(room_id): - if room_id == self.room_id: - return defer.succeed(self.room_members) - else: - return defer.succeed([]) - room_member_handler.get_room_members = get_room_members - - @defer.inlineCallbacks - def fetch_room_distributions_into(room_id, localusers=None, - remotedomains=None, ignore_user=None): - - members = yield get_room_members(room_id) - for member in members: - if ignore_user is not None and member == ignore_user: - continue - - if member.is_mine: - if localusers is not None: - localusers.add(member) - else: - if remotedomains is not None: - remotedomains.add(member.domain) - room_member_handler.fetch_room_distributions_into = ( - fetch_room_distributions_into) - - self.setUp_datastore_room_mocks(self.datastore) - - def setUp_datastore_room_mocks(self, datastore): - def get_room_hosts(room_id): - if room_id == self.room_id: - hosts = set([u.domain for u in self.room_members]) - return defer.succeed(hosts) - else: - return defer.succeed([]) - datastore.get_joined_hosts_for_room = get_room_hosts - - def user_rooms_intersect(userlist): - room_member_ids = map(lambda u: u.to_string(), self.room_members) - - shared = all(map(lambda i: i in room_member_ids, userlist)) - return defer.succeed(shared) - datastore.user_rooms_intersect = user_rooms_intersect - - @defer.inlineCallbacks - def setUp_users(self, hs): - # Some local users to test with - self.u_apple = UserID.from_string("@apple:test") - self.u_banana = UserID.from_string("@banana:test") - self.u_clementine = UserID.from_string("@clementine:test") - - for u in self.u_apple, self.u_banana, self.u_clementine: - yield self.datastore.create_presence(u.localpart) - - yield self.datastore.set_presence_state( - self.u_apple.localpart, {"state": ONLINE, "status_msg": "Online"} - ) - - # ID of a local user that does not exist - self.u_durian = UserID.from_string("@durian:test") - - # A remote user - self.u_cabbage = UserID.from_string("@cabbage:elsewhere") - - -class MockedDatastorePresenceTestCase(PresenceTestCase): - def make_datastore_mock(self): - datastore = Mock(spec=[ - # Bits that Federation needs - "prep_send_transaction", - "delivered_txn", - "get_received_txn_response", - "set_received_txn_response", - "get_destination_retry_timings", - ]) - - self.setUp_datastore_federation_mocks(datastore) - self.setUp_datastore_presence_mocks(datastore) - - return datastore - - def setUp_datastore_federation_mocks(self, datastore): - retry_timings_res = { - "destination": "", - "retry_last_ts": 0, - "retry_interval": 0, - } - datastore.get_destination_retry_timings.return_value = ( - defer.succeed(retry_timings_res) - ) - - def get_received_txn_response(*args): - return defer.succeed(None) - datastore.get_received_txn_response = get_received_txn_response - - def setUp_datastore_presence_mocks(self, datastore): - self.current_user_state = { - "apple": OFFLINE, - "banana": OFFLINE, - "clementine": OFFLINE, - "fig": OFFLINE, - } - - def get_presence_state(user_localpart): - return defer.succeed( - {"state": self.current_user_state[user_localpart], - "status_msg": None, - "mtime": 123456000} - ) - datastore.get_presence_state = get_presence_state - - def set_presence_state(user_localpart, new_state): - was = self.current_user_state[user_localpart] - self.current_user_state[user_localpart] = new_state["state"] - return defer.succeed({"state": was}) - datastore.set_presence_state = set_presence_state - - def get_presence_list(user_localpart, accepted): - if not user_localpart in self.PRESENCE_LIST: - return defer.succeed([]) - return defer.succeed([ - {"observed_user_id": u, "accepted": accepted} for u in - self.PRESENCE_LIST[user_localpart]]) - datastore.get_presence_list = get_presence_list - - def is_presence_visible(observed_localpart, observer_userid): - return True - datastore.is_presence_visible = is_presence_visible - - @defer.inlineCallbacks - def setUp_users(self, hs): - # Some local users to test with - self.u_apple = UserID.from_string("@apple:test") - self.u_banana = UserID.from_string("@banana:test") - self.u_clementine = UserID.from_string("@clementine:test") - self.u_durian = UserID.from_string("@durian:test") - self.u_elderberry = UserID.from_string("@elderberry:test") - self.u_fig = UserID.from_string("@fig:test") - - # Remote user - self.u_onion = UserID.from_string("@onion:farm") - self.u_potato = UserID.from_string("@potato:remote") - - yield - - -class PresenceStateTestCase(PresenceTestCase): - """ Tests presence management. """ - @defer.inlineCallbacks - def setUp(self): - yield super(PresenceStateTestCase, self).setUp() - - self.mock_start = Mock() - self.mock_stop = Mock() - - self.handler.start_polling_presence = self.mock_start - self.handler.stop_polling_presence = self.mock_stop - - @defer.inlineCallbacks - def test_get_my_state(self): - state = yield self.handler.get_state( - target_user=self.u_apple, auth_user=self.u_apple - ) - - self.assertEquals( - {"presence": ONLINE, "status_msg": "Online"}, - state - ) - - @defer.inlineCallbacks - def test_get_allowed_state(self): - yield self.datastore.allow_presence_visible( - observed_localpart=self.u_apple.localpart, - observer_userid=self.u_banana.to_string(), - ) - - state = yield self.handler.get_state( - target_user=self.u_apple, auth_user=self.u_banana - ) - - self.assertEquals( - {"presence": ONLINE, "status_msg": "Online"}, - state - ) - - @defer.inlineCallbacks - def test_get_same_room_state(self): - self.room_members = [self.u_apple, self.u_clementine] - - state = yield self.handler.get_state( - target_user=self.u_apple, auth_user=self.u_clementine - ) - - self.assertEquals( - {"presence": ONLINE, "status_msg": "Online"}, - state - ) - - @defer.inlineCallbacks - def test_get_disallowed_state(self): - self.room_members = [] - - yield self.assertFailure( - self.handler.get_state( - target_user=self.u_apple, auth_user=self.u_clementine - ), - SynapseError - ) - - @defer.inlineCallbacks - def test_set_my_state(self): - yield self.handler.set_state( - target_user=self.u_apple, auth_user=self.u_apple, - state={"presence": UNAVAILABLE, "status_msg": "Away"}) - - self.assertEquals( - {"state": UNAVAILABLE, - "status_msg": "Away", - "mtime": 1000000}, - (yield self.datastore.get_presence_state(self.u_apple.localpart)) - ) - - self.mock_start.assert_called_with(self.u_apple, - state={ - "presence": UNAVAILABLE, - "status_msg": "Away", - "last_active": 1000000, # MockClock - }) - - yield self.handler.set_state( - target_user=self.u_apple, auth_user=self.u_apple, - state={"presence": OFFLINE}) - - self.mock_stop.assert_called_with(self.u_apple) - - -class PresenceInvitesTestCase(PresenceTestCase): - """ Tests presence management. """ - @defer.inlineCallbacks - def setUp(self): - yield super(PresenceInvitesTestCase, self).setUp() - - self.mock_start = Mock() - self.mock_stop = Mock() - - self.handler.start_polling_presence = self.mock_start - self.handler.stop_polling_presence = self.mock_stop - - @defer.inlineCallbacks - def test_invite_local(self): - # TODO(paul): This test will likely break if/when real auth permissions - # are added; for now the HS will always accept any invite - - yield self.handler.send_presence_invite( - observer_user=self.u_apple, observed_user=self.u_banana) - - self.assertEquals( - [{"observed_user_id": "@banana:test", "accepted": 1}], - (yield self.datastore.get_presence_list(self.u_apple.localpart)) - ) - self.assertTrue( - (yield self.datastore.is_presence_visible( - observed_localpart=self.u_banana.localpart, - observer_userid=self.u_apple.to_string(), - )) - ) - - self.mock_start.assert_called_with( - self.u_apple, target_user=self.u_banana) - - @defer.inlineCallbacks - def test_invite_local_nonexistant(self): - yield self.handler.send_presence_invite( - observer_user=self.u_apple, observed_user=self.u_durian) - - self.assertEquals( - [], - (yield self.datastore.get_presence_list(self.u_apple.localpart)) - ) - - @defer.inlineCallbacks - def test_invite_remote(self): - # Use a different destination, otherwise retry logic might fail the - # request - u_rocket = UserID.from_string("@rocket:there") - - put_json = self.mock_http_client.put_json - put_json.expect_call_and_return( - call("there", - path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu("there", "m.presence_invite", - content={ - "observer_user": "@apple:test", - "observed_user": "@rocket:there", - } - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - yield self.handler.send_presence_invite( - observer_user=self.u_apple, observed_user=u_rocket) - - self.assertEquals( - [{"observed_user_id": "@rocket:there", "accepted": 0}], - (yield self.datastore.get_presence_list(self.u_apple.localpart)) - ) - - yield put_json.await_calls() - - @defer.inlineCallbacks - def test_accept_remote(self): - # TODO(paul): This test will likely break if/when real auth permissions - # are added; for now the HS will always accept any invite - - # Use a different destination, otherwise retry logic might fail the - # request - u_rocket = UserID.from_string("@rocket:moon") - - put_json = self.mock_http_client.put_json - put_json.expect_call_and_return( - call("moon", - path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu("moon", "m.presence_accept", - content={ - "observer_user": "@rocket:moon", - "observed_user": "@apple:test", - } - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - yield self.mock_federation_resource.trigger("PUT", - "/_matrix/federation/v1/send/1000000/", - _make_edu_json("elsewhere", "m.presence_invite", - content={ - "observer_user": "@rocket:moon", - "observed_user": "@apple:test", - } - ) - ) - - self.assertTrue( - (yield self.datastore.is_presence_visible( - observed_localpart=self.u_apple.localpart, - observer_userid=u_rocket.to_string(), - )) - ) - - yield put_json.await_calls() - - @defer.inlineCallbacks - def test_invited_remote_nonexistant(self): - # Use a different destination, otherwise retry logic might fail the - # request - u_rocket = UserID.from_string("@rocket:sun") - - put_json = self.mock_http_client.put_json - put_json.expect_call_and_return( - call("sun", - path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu("sun", "m.presence_deny", - content={ - "observer_user": "@rocket:sun", - "observed_user": "@durian:test", - } - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - yield self.mock_federation_resource.trigger("PUT", - "/_matrix/federation/v1/send/1000000/", - _make_edu_json("sun", "m.presence_invite", - content={ - "observer_user": "@rocket:sun", - "observed_user": "@durian:test", - } - ) - ) - - yield put_json.await_calls() - - @defer.inlineCallbacks - def test_accepted_remote(self): - yield self.datastore.add_presence_list_pending( - observer_localpart=self.u_apple.localpart, - observed_userid=self.u_cabbage.to_string(), - ) - - yield self.mock_federation_resource.trigger("PUT", - "/_matrix/federation/v1/send/1000000/", - _make_edu_json("elsewhere", "m.presence_accept", - content={ - "observer_user": "@apple:test", - "observed_user": "@cabbage:elsewhere", - } - ) - ) - - self.assertEquals( - [{"observed_user_id": "@cabbage:elsewhere", "accepted": 1}], - (yield self.datastore.get_presence_list(self.u_apple.localpart)) - ) - - self.mock_start.assert_called_with( - self.u_apple, target_user=self.u_cabbage) - - @defer.inlineCallbacks - def test_denied_remote(self): - yield self.datastore.add_presence_list_pending( - observer_localpart=self.u_apple.localpart, - observed_userid="@eggplant:elsewhere", - ) - - yield self.mock_federation_resource.trigger("PUT", - "/_matrix/federation/v1/send/1000000/", - _make_edu_json("elsewhere", "m.presence_deny", - content={ - "observer_user": "@apple:test", - "observed_user": "@eggplant:elsewhere", - } - ) - ) - - self.assertEquals( - [], - (yield self.datastore.get_presence_list(self.u_apple.localpart)) - ) - - @defer.inlineCallbacks - def test_drop_local(self): - yield self.datastore.add_presence_list_pending( - observer_localpart=self.u_apple.localpart, - observed_userid=self.u_banana.to_string(), - ) - yield self.datastore.set_presence_list_accepted( - observer_localpart=self.u_apple.localpart, - observed_userid=self.u_banana.to_string(), - ) - - yield self.handler.drop( - observer_user=self.u_apple, - observed_user=self.u_banana, - ) - - self.assertEquals( - [], - (yield self.datastore.get_presence_list(self.u_apple.localpart)) - ) - - self.mock_stop.assert_called_with( - self.u_apple, target_user=self.u_banana) - - @defer.inlineCallbacks - def test_drop_remote(self): - yield self.datastore.add_presence_list_pending( - observer_localpart=self.u_apple.localpart, - observed_userid=self.u_cabbage.to_string(), - ) - yield self.datastore.set_presence_list_accepted( - observer_localpart=self.u_apple.localpart, - observed_userid=self.u_cabbage.to_string(), - ) - - yield self.handler.drop( - observer_user=self.u_apple, - observed_user=self.u_cabbage, - ) - - self.assertEquals( - [], - (yield self.datastore.get_presence_list(self.u_apple.localpart)) - ) - - @defer.inlineCallbacks - def test_get_presence_list(self): - yield self.datastore.add_presence_list_pending( - observer_localpart=self.u_apple.localpart, - observed_userid=self.u_banana.to_string(), - ) - yield self.datastore.set_presence_list_accepted( - observer_localpart=self.u_apple.localpart, - observed_userid=self.u_banana.to_string(), - ) - - presence = yield self.handler.get_presence_list( - observer_user=self.u_apple) - - self.assertEquals([ - {"observed_user": self.u_banana, - "presence": OFFLINE, - "accepted": 1}, - ], presence) - - -class PresencePushTestCase(MockedDatastorePresenceTestCase): - """ Tests steady-state presence status updates. - - They assert that presence state update messages are pushed around the place - when users change state, presuming that the watches are all established. - - These tests are MASSIVELY fragile currently as they poke internals of the - presence handler; namely the _local_pushmap and _remote_recvmap. - BE WARNED... - """ - PRESENCE_LIST = { - 'apple': [ "@banana:test", "@clementine:test" ], - 'banana': [ "@apple:test" ], - } - - @defer.inlineCallbacks - def test_push_local(self): - self.room_members = [self.u_apple, self.u_elderberry] - - self.datastore.set_presence_state.return_value = defer.succeed( - {"state": ONLINE} - ) - - # TODO(paul): Gut-wrenching - self.handler._user_cachemap[self.u_apple] = UserPresenceCache() - self.handler._user_cachemap[self.u_apple].update( - {"presence": OFFLINE}, serial=0 - ) - apple_set = self.handler._local_pushmap.setdefault("apple", set()) - apple_set.add(self.u_banana) - apple_set.add(self.u_clementine) - - self.assertEquals(self.event_source.get_current_key(), 0) - - yield self.handler.set_state(self.u_apple, self.u_apple, - {"presence": ONLINE} - ) - - # Apple sees self-reflection even without room_id - (events, _) = yield self.event_source.get_new_events( - user=self.u_apple, - from_key=0, - ) - - self.assertEquals(self.event_source.get_current_key(), 1) - self.assertEquals(events, - [ - {"type": "m.presence", - "content": { - "user_id": "@apple:test", - "presence": ONLINE, - "last_active_ago": 0, - }}, - ], - msg="Presence event should be visible to self-reflection" - ) - - # Apple sees self-reflection - (events, _) = yield self.event_source.get_new_events( - user=self.u_apple, - from_key=0, - room_ids=[self.room_id], - ) - - self.assertEquals(self.event_source.get_current_key(), 1) - self.assertEquals(events, - [ - {"type": "m.presence", - "content": { - "user_id": "@apple:test", - "presence": ONLINE, - "last_active_ago": 0, - }}, - ], - msg="Presence event should be visible to self-reflection" - ) - - config = SourcePaginationConfig(from_key=1, to_key=0) - (chunk, _) = yield self.event_source.get_pagination_rows( - self.u_apple, config, None - ) - self.assertEquals(chunk, - [ - {"type": "m.presence", - "content": { - "user_id": "@apple:test", - "presence": ONLINE, - "last_active_ago": 0, - }}, - ] - ) - - # Banana sees it because of presence subscription - (events, _) = yield self.event_source.get_new_events( - user=self.u_banana, - from_key=0, - room_ids=[self.room_id], - ) - - self.assertEquals(self.event_source.get_current_key(), 1) - self.assertEquals(events, - [ - {"type": "m.presence", - "content": { - "user_id": "@apple:test", - "presence": ONLINE, - "last_active_ago": 0, - }}, - ], - msg="Presence event should be visible to explicit subscribers" - ) - - # Elderberry sees it because of same room - (events, _) = yield self.event_source.get_new_events( - user=self.u_elderberry, - from_key=0, - room_ids=[self.room_id], - ) - - self.assertEquals(self.event_source.get_current_key(), 1) - self.assertEquals(events, - [ - {"type": "m.presence", - "content": { - "user_id": "@apple:test", - "presence": ONLINE, - "last_active_ago": 0, - }}, - ], - msg="Presence event should be visible to other room members" - ) - - # Durian is not in the room, should not see this event - (events, _) = yield self.event_source.get_new_events( - user=self.u_durian, - from_key=0, - room_ids=[], - ) - - self.assertEquals(self.event_source.get_current_key(), 1) - self.assertEquals(events, [], - msg="Presence event should not be visible to others" - ) - - presence = yield self.handler.get_presence_list( - observer_user=self.u_apple, accepted=True) - - self.assertEquals( - [ - {"observed_user": self.u_banana, - "presence": OFFLINE, - "accepted": True}, - {"observed_user": self.u_clementine, - "presence": OFFLINE, - "accepted": True}, - ], - presence - ) - - # TODO(paul): Gut-wrenching - banana_set = self.handler._local_pushmap.setdefault("banana", set()) - banana_set.add(self.u_apple) - - yield self.handler.set_state(self.u_banana, self.u_banana, - {"presence": ONLINE} - ) - - self.clock.advance_time(2) - - presence = yield self.handler.get_presence_list( - observer_user=self.u_apple, accepted=True) - - self.assertEquals([ - {"observed_user": self.u_banana, - "presence": ONLINE, - "last_active_ago": 2000, - "accepted": True}, - {"observed_user": self.u_clementine, - "presence": OFFLINE, - "accepted": True}, - ], presence) - - (events, _) = yield self.event_source.get_new_events( - user=self.u_apple, - from_key=1, - ) - - self.assertEquals(self.event_source.get_current_key(), 2) - self.assertEquals(events, - [ - {"type": "m.presence", - "content": { - "user_id": "@banana:test", - "presence": ONLINE, - "last_active_ago": 2000 - }}, - ] - ) - - @defer.inlineCallbacks - def test_push_remote(self): - put_json = self.mock_http_client.put_json - put_json.expect_call_and_return( - call("farm", - path=ANY, # Can't guarantee which txn ID will be which - data=_expect_edu("farm", "m.presence", - content={ - "push": [ - {"user_id": "@apple:test", - "presence": u"online", - "last_active_ago": 0}, - ], - } - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - put_json.expect_call_and_return( - call("remote", - path=ANY, # Can't guarantee which txn ID will be which - data=_expect_edu("remote", "m.presence", - content={ - "push": [ - {"user_id": "@apple:test", - "presence": u"online", - "last_active_ago": 0}, - ], - } - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - self.room_members = [self.u_apple, self.u_onion] - - self.datastore.set_presence_state.return_value = defer.succeed( - {"state": ONLINE} - ) - - # TODO(paul): Gut-wrenching - self.handler._user_cachemap[self.u_apple] = UserPresenceCache() - self.handler._user_cachemap[self.u_apple].update( - {"presence": OFFLINE}, serial=0 - ) - apple_set = self.handler._remote_sendmap.setdefault("apple", set()) - apple_set.add(self.u_potato.domain) - - yield self.handler.set_state(self.u_apple, self.u_apple, - {"presence": ONLINE} - ) - - yield put_json.await_calls() - - @defer.inlineCallbacks - def test_recv_remote(self): - self.room_members = [self.u_apple, self.u_banana, self.u_potato] - - self.assertEquals(self.event_source.get_current_key(), 0) - - yield self.mock_federation_resource.trigger("PUT", - "/_matrix/federation/v1/send/1000000/", - _make_edu_json("elsewhere", "m.presence", - content={ - "push": [ - {"user_id": "@potato:remote", - "presence": "online", - "last_active_ago": 1000}, - ], - } - ) - ) - - (events, _) = yield self.event_source.get_new_events( - user=self.u_apple, - from_key=0, - room_ids=[self.room_id], - ) - - self.assertEquals(self.event_source.get_current_key(), 1) - self.assertEquals(events, - [ - {"type": "m.presence", - "content": { - "user_id": "@potato:remote", - "presence": ONLINE, - "last_active_ago": 1000, - }} - ] - ) - - self.clock.advance_time(2) - - state = yield self.handler.get_state(self.u_potato, self.u_apple) - - self.assertEquals( - {"presence": ONLINE, "last_active_ago": 3000}, - state - ) - - @defer.inlineCallbacks - def test_recv_remote_offline(self): - """ Various tests relating to SYN-261 """ - - self.room_members = [self.u_apple, self.u_banana, self.u_potato] - - self.assertEquals(self.event_source.get_current_key(), 0) - - yield self.mock_federation_resource.trigger("PUT", - "/_matrix/federation/v1/send/1000000/", - _make_edu_json("elsewhere", "m.presence", - content={ - "push": [ - {"user_id": "@potato:remote", - "presence": "offline"}, - ], - } - ) - ) - - self.assertEquals(self.event_source.get_current_key(), 1) - - (events, _) = yield self.event_source.get_new_events( - user=self.u_apple, - from_key=0, - room_ids=[self.room_id,] - ) - self.assertEquals(events, - [ - {"type": "m.presence", - "content": { - "user_id": "@potato:remote", - "presence": OFFLINE, - }} - ] - ) - - yield self.mock_federation_resource.trigger("PUT", - "/_matrix/federation/v1/send/1000001/", - _make_edu_json("elsewhere", "m.presence", - content={ - "push": [ - {"user_id": "@potato:remote", - "presence": "online"}, - ], - } - ) - ) - - self.assertEquals(self.event_source.get_current_key(), 2) - - (events, _) = yield self.event_source.get_new_events( - user=self.u_apple, - from_key=0, - room_ids=[self.room_id,] - ) - self.assertEquals(events, - [ - {"type": "m.presence", - "content": { - "user_id": "@potato:remote", - "presence": ONLINE, - }} - ] - ) - - @defer.inlineCallbacks - def test_join_room_local(self): - self.room_members = [self.u_apple, self.u_banana] - - self.assertEquals(self.event_source.get_current_key(), 0) - - # TODO(paul): Gut-wrenching - self.handler._user_cachemap[self.u_clementine] = UserPresenceCache() - self.handler._user_cachemap[self.u_clementine].update( - { - "presence": PresenceState.ONLINE, - "last_active": self.clock.time_msec(), - }, self.u_clementine - ) - - yield self.distributor.fire("user_joined_room", self.u_clementine, - self.room_id - ) - - self.room_members.append(self.u_clementine) - - (events, _) = yield self.event_source.get_new_events( - user=self.u_apple, - from_key=0, - ) - - self.assertEquals(self.event_source.get_current_key(), 1) - self.assertEquals(events, - [ - {"type": "m.presence", - "content": { - "user_id": "@clementine:test", - "presence": ONLINE, - "last_active_ago": 0, - }} - ] - ) - - @defer.inlineCallbacks - def test_join_room_remote(self): - ## Sending local user state to a newly-joined remote user - put_json = self.mock_http_client.put_json - put_json.expect_call_and_return( - call("remote", - path=ANY, # Can't guarantee which txn ID will be which - data=_expect_edu("remote", "m.presence", - content={ - "push": [ - {"user_id": "@apple:test", - "presence": "online"}, - ], - } - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - put_json.expect_call_and_return( - call("remote", - path=ANY, # Can't guarantee which txn ID will be which - data=_expect_edu("remote", "m.presence", - content={ - "push": [ - {"user_id": "@banana:test", - "presence": "offline"}, - ], - } - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - # TODO(paul): Gut-wrenching - self.handler._user_cachemap[self.u_apple] = UserPresenceCache() - self.handler._user_cachemap[self.u_apple].update( - {"presence": PresenceState.ONLINE}, self.u_apple) - self.room_members = [self.u_apple, self.u_banana] - - yield self.distributor.fire("user_joined_room", self.u_potato, - self.room_id - ) - - yield put_json.await_calls() - - ## Sending newly-joined local user state to remote users - - put_json.expect_call_and_return( - call("remote", - path="/_matrix/federation/v1/send/1000002/", - data=_expect_edu("remote", "m.presence", - content={ - "push": [ - {"user_id": "@clementine:test", - "presence": "online"}, - ], - } - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - self.handler._user_cachemap[self.u_clementine] = UserPresenceCache() - self.handler._user_cachemap[self.u_clementine].update( - {"presence": ONLINE}, self.u_clementine) - self.room_members.append(self.u_potato) - - yield self.distributor.fire("user_joined_room", self.u_clementine, - self.room_id - ) - - put_json.await_calls() - - -class PresencePollingTestCase(MockedDatastorePresenceTestCase): - """ Tests presence status polling. """ - - # For this test, we have three local users; apple is watching and is - # watched by the other two, but the others don't watch each other. - # Additionally clementine is watching a remote user. - PRESENCE_LIST = { - 'apple': [ "@banana:test", "@clementine:test" ], - 'banana': [ "@apple:test" ], - 'clementine': [ "@apple:test", "@potato:remote" ], - 'fig': [ "@potato:remote" ], - } - - @defer.inlineCallbacks - def setUp(self): - yield super(PresencePollingTestCase, self).setUp() - - self.mock_update_client = Mock() - - def update(*args,**kwargs): - return defer.succeed(None) - self.mock_update_client.side_effect = update - - self.handler.push_update_to_clients = self.mock_update_client - - @defer.inlineCallbacks - def test_push_local(self): - # apple goes online - yield self.handler.set_state( - target_user=self.u_apple, auth_user=self.u_apple, - state={"presence": ONLINE} - ) - - # apple should see both banana and clementine currently offline - self.mock_update_client.assert_has_calls([ - call(users_to_push=[self.u_apple]), - call(users_to_push=[self.u_apple]), - ], any_order=True) - - # Gut-wrenching tests - self.assertTrue("banana" in self.handler._local_pushmap) - self.assertTrue(self.u_apple in self.handler._local_pushmap["banana"]) - self.assertTrue("clementine" in self.handler._local_pushmap) - self.assertTrue(self.u_apple in self.handler._local_pushmap["clementine"]) - - self.mock_update_client.reset_mock() - - # banana goes online - yield self.handler.set_state( - target_user=self.u_banana, auth_user=self.u_banana, - state={"presence": ONLINE} - ) - - # apple and banana should now both see each other online - self.mock_update_client.assert_has_calls([ - call(users_to_push=set([self.u_apple]), room_ids=[]), - call(users_to_push=[self.u_banana]), - ], any_order=True) - - self.assertTrue("apple" in self.handler._local_pushmap) - self.assertTrue(self.u_banana in self.handler._local_pushmap["apple"]) - - self.mock_update_client.reset_mock() - - # apple goes offline - yield self.handler.set_state( - target_user=self.u_apple, auth_user=self.u_apple, - state={"presence": OFFLINE} - ) - - # banana should now be told apple is offline - self.mock_update_client.assert_has_calls([ - call(users_to_push=set([self.u_banana, self.u_apple]), room_ids=[]), - ], any_order=True) - - self.assertFalse("banana" in self.handler._local_pushmap) - self.assertFalse("clementine" in self.handler._local_pushmap) - - @defer.inlineCallbacks - def test_remote_poll_send(self): - put_json = self.mock_http_client.put_json - put_json.expect_call_and_return( - call("remote", - path=ANY, - data=_expect_edu("remote", "m.presence", - content={ - "poll": [ "@potato:remote" ], - }, - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - put_json.expect_call_and_return( - call("remote", - path=ANY, - data=_expect_edu("remote", "m.presence", - content={ - "push": [ { - "user_id": "@clementine:test", - "presence": OFFLINE, - }], - }, - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - # clementine goes online - yield self.handler.set_state( - target_user=self.u_clementine, auth_user=self.u_clementine, - state={"presence": ONLINE} - ) - - yield put_json.await_calls() - - # Gut-wrenching tests - self.assertTrue(self.u_potato in self.handler._remote_recvmap, - msg="expected potato to be in _remote_recvmap" - ) - self.assertTrue(self.u_clementine in - self.handler._remote_recvmap[self.u_potato]) - - - put_json.expect_call_and_return( - call("remote", - path=ANY, - data=_expect_edu("remote", "m.presence", - content={ - "push": [ { - "user_id": "@fig:test", - "presence": OFFLINE, - }], - }, - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - # fig goes online; shouldn't send a second poll - yield self.handler.set_state( - target_user=self.u_fig, auth_user=self.u_fig, - state={"presence": ONLINE} - ) - - # reactor.iterate(delay=0) - - yield put_json.await_calls() - - # fig goes offline - yield self.handler.set_state( - target_user=self.u_fig, auth_user=self.u_fig, - state={"presence": OFFLINE} - ) - - reactor.iterate(delay=0) - - put_json.assert_had_no_calls() - - put_json.expect_call_and_return( - call("remote", - path=ANY, - data=_expect_edu("remote", "m.presence", - content={ - "unpoll": [ "@potato:remote" ], - }, - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - # clementine goes offline - yield self.handler.set_state( - target_user=self.u_clementine, auth_user=self.u_clementine, - state={"presence": OFFLINE} - ) - - yield put_json.await_calls() - - self.assertFalse(self.u_potato in self.handler._remote_recvmap, - msg="expected potato not to be in _remote_recvmap" - ) - - @defer.inlineCallbacks - def test_remote_poll_receive(self): - put_json = self.mock_http_client.put_json - put_json.expect_call_and_return( - call("remote", - path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu("remote", "m.presence", - content={ - "push": [ - {"user_id": "@banana:test", - "presence": "offline", - "status_msg": None}, - ], - }, - ), - json_data_callback=ANY, - long_retries=True, - ), - defer.succeed((200, "OK")) - ) - - yield self.mock_federation_resource.trigger("PUT", - "/_matrix/federation/v1/send/1000000/", - _make_edu_json("remote", "m.presence", - content={ - "poll": [ "@banana:test" ], - }, - ) - ) - - yield put_json.await_calls() - - # Gut-wrenching tests - self.assertTrue(self.u_banana in self.handler._remote_sendmap) - - yield self.mock_federation_resource.trigger("PUT", - "/_matrix/federation/v1/send/1000001/", - _make_edu_json("remote", "m.presence", - content={ - "unpoll": [ "@banana:test" ], - } - ) - ) - - # Gut-wrenching tests - self.assertFalse(self.u_banana in self.handler._remote_sendmap) diff --git a/tests/handlers/test_presencelike.py b/tests/handlers/test_presencelike.py deleted file mode 100644 index 76f6ba5e7b..0000000000 --- a/tests/handlers/test_presencelike.py +++ /dev/null @@ -1,311 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This file contains tests of the "presence-like" data that is shared between -presence and profiles; namely, the displayname and avatar_url.""" - -from tests import unittest -from twisted.internet import defer - -from mock import Mock, call, ANY, NonCallableMock - -from ..utils import MockClock, setup_test_homeserver - -from synapse.api.constants import PresenceState -from synapse.handlers.presence import PresenceHandler -from synapse.handlers.profile import ProfileHandler -from synapse.types import UserID - - -OFFLINE = PresenceState.OFFLINE -UNAVAILABLE = PresenceState.UNAVAILABLE -ONLINE = PresenceState.ONLINE - - -class MockReplication(object): - def __init__(self): - self.edu_handlers = {} - - def register_edu_handler(self, edu_type, handler): - self.edu_handlers[edu_type] = handler - - def register_query_handler(self, query_type, handler): - pass - - def received_edu(self, origin, edu_type, content): - self.edu_handlers[edu_type](origin, content) - - -class PresenceAndProfileHandlers(object): - def __init__(self, hs): - self.presence_handler = PresenceHandler(hs) - self.profile_handler = ProfileHandler(hs) - - -class PresenceProfilelikeDataTestCase(unittest.TestCase): - - @defer.inlineCallbacks - def setUp(self): - hs = yield setup_test_homeserver( - clock=MockClock(), - datastore=Mock(spec=[ - "set_presence_state", - "is_presence_visible", - "set_profile_displayname", - "get_rooms_for_user", - ]), - handlers=None, - resource_for_federation=Mock(), - http_client=None, - replication_layer=MockReplication(), - ratelimiter=NonCallableMock(spec_set=[ - "send_message", - ]), - ) - self.ratelimiter = hs.get_ratelimiter() - self.ratelimiter.send_message.return_value = (True, 0) - hs.handlers = PresenceAndProfileHandlers(hs) - - self.datastore = hs.get_datastore() - - self.replication = hs.get_replication_layer() - self.replication.send_edu = Mock() - - def send_edu(*args, **kwargs): - # print "send_edu: %s, %s" % (args, kwargs) - return defer.succeed((200, "OK")) - self.replication.send_edu.side_effect = send_edu - - def get_profile_displayname(user_localpart): - return defer.succeed("Frank") - self.datastore.get_profile_displayname = get_profile_displayname - - def is_presence_visible(*args, **kwargs): - return defer.succeed(False) - self.datastore.is_presence_visible = is_presence_visible - - def get_profile_avatar_url(user_localpart): - return defer.succeed("http://foo") - self.datastore.get_profile_avatar_url = get_profile_avatar_url - - self.presence_list = [ - {"observed_user_id": "@banana:test", "accepted": True}, - {"observed_user_id": "@clementine:test", "accepted": True}, - ] - def get_presence_list(user_localpart, accepted=None): - return defer.succeed(self.presence_list) - self.datastore.get_presence_list = get_presence_list - - def user_rooms_intersect(userlist): - return defer.succeed(False) - self.datastore.user_rooms_intersect = user_rooms_intersect - - self.handlers = hs.get_handlers() - - self.mock_update_client = Mock() - def update(*args, **kwargs): - # print "mock_update_client: %s, %s" %(args, kwargs) - return defer.succeed(None) - self.mock_update_client.side_effect = update - - self.handlers.presence_handler.push_update_to_clients = ( - self.mock_update_client) - - hs.handlers.room_member_handler = Mock(spec=[ - "get_joined_rooms_for_user", - ]) - hs.handlers.room_member_handler.get_joined_rooms_for_user = ( - lambda u: defer.succeed([])) - - # Some local users to test with - self.u_apple = UserID.from_string("@apple:test") - self.u_banana = UserID.from_string("@banana:test") - self.u_clementine = UserID.from_string("@clementine:test") - - # Remote user - self.u_potato = UserID.from_string("@potato:remote") - - self.mock_get_joined = ( - self.datastore.get_rooms_for_user - ) - - @defer.inlineCallbacks - def test_set_my_state(self): - self.presence_list = [ - {"observed_user_id": "@banana:test", "accepted": True}, - {"observed_user_id": "@clementine:test", "accepted": True}, - ] - - mocked_set = self.datastore.set_presence_state - mocked_set.return_value = defer.succeed({"state": OFFLINE}) - - yield self.handlers.presence_handler.set_state( - target_user=self.u_apple, auth_user=self.u_apple, - state={"presence": UNAVAILABLE, "status_msg": "Away"}) - - mocked_set.assert_called_with("apple", - {"state": UNAVAILABLE, "status_msg": "Away"} - ) - - @defer.inlineCallbacks - def test_push_local(self): - def get_joined(*args): - return defer.succeed([]) - - self.mock_get_joined.side_effect = get_joined - - self.presence_list = [ - {"observed_user_id": "@banana:test", "accepted": True}, - {"observed_user_id": "@clementine:test", "accepted": True}, - ] - - self.datastore.set_presence_state.return_value = defer.succeed( - {"state": ONLINE} - ) - - # TODO(paul): Gut-wrenching - from synapse.handlers.presence import UserPresenceCache - self.handlers.presence_handler._user_cachemap[self.u_apple] = ( - UserPresenceCache() - ) - self.handlers.presence_handler._user_cachemap[self.u_apple].update( - {"presence": OFFLINE}, serial=0 - ) - apple_set = self.handlers.presence_handler._local_pushmap.setdefault( - "apple", set()) - apple_set.add(self.u_banana) - apple_set.add(self.u_clementine) - - yield self.handlers.presence_handler.set_state(self.u_apple, - self.u_apple, {"presence": ONLINE} - ) - yield self.handlers.presence_handler.set_state(self.u_banana, - self.u_banana, {"presence": ONLINE} - ) - - presence = yield self.handlers.presence_handler.get_presence_list( - observer_user=self.u_apple, accepted=True) - - self.assertEquals([ - {"observed_user": self.u_banana, - "presence": ONLINE, - "last_active_ago": 0, - "displayname": "Frank", - "avatar_url": "http://foo", - "accepted": True}, - {"observed_user": self.u_clementine, - "presence": OFFLINE, - "accepted": True} - ], presence) - - self.mock_update_client.assert_has_calls([ - call( - users_to_push={self.u_apple, self.u_banana, self.u_clementine}, - room_ids=[] - ), - ], any_order=True) - - self.mock_update_client.reset_mock() - - self.datastore.set_profile_displayname.return_value = defer.succeed( - None) - - yield self.handlers.profile_handler.set_displayname(self.u_apple, - self.u_apple, "I am an Apple") - - self.mock_update_client.assert_has_calls([ - call( - users_to_push={self.u_apple, self.u_banana, self.u_clementine}, - room_ids=[], - ), - ], any_order=True) - - @defer.inlineCallbacks - def test_push_remote(self): - self.presence_list = [ - {"observed_user_id": "@potato:remote", "accepted": True}, - ] - - self.datastore.set_presence_state.return_value = defer.succeed( - {"state": ONLINE} - ) - - # TODO(paul): Gut-wrenching - from synapse.handlers.presence import UserPresenceCache - self.handlers.presence_handler._user_cachemap[self.u_apple] = ( - UserPresenceCache() - ) - self.handlers.presence_handler._user_cachemap[self.u_apple].update( - {"presence": OFFLINE}, serial=0 - ) - apple_set = self.handlers.presence_handler._remote_sendmap.setdefault( - "apple", set()) - apple_set.add(self.u_potato.domain) - - yield self.handlers.presence_handler.set_state(self.u_apple, - self.u_apple, {"presence": ONLINE} - ) - - self.replication.send_edu.assert_called_with( - destination="remote", - edu_type="m.presence", - content={ - "push": [ - {"user_id": "@apple:test", - "presence": "online", - "last_active_ago": 0, - "displayname": "Frank", - "avatar_url": "http://foo"}, - ], - }, - ) - - @defer.inlineCallbacks - def test_recv_remote(self): - self.presence_list = [ - {"observed_user_id": "@banana:test"}, - {"observed_user_id": "@clementine:test"}, - ] - - # TODO(paul): Gut-wrenching - potato_set = self.handlers.presence_handler._remote_recvmap.setdefault( - self.u_potato, set() - ) - potato_set.add(self.u_apple) - - yield self.replication.received_edu( - "remote", "m.presence", { - "push": [ - {"user_id": "@potato:remote", - "presence": "online", - "displayname": "Frank", - "avatar_url": "http://foo"}, - ], - } - ) - - self.mock_update_client.assert_called_with( - users_to_push=set([self.u_apple]), - room_ids=[], - ) - - state = yield self.handlers.presence_handler.get_state(self.u_potato, - self.u_apple) - - self.assertEquals( - {"presence": ONLINE, - "displayname": "Frank", - "avatar_url": "http://foo"}, - state) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 237fc8223c..95c87f0ebd 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -70,9 +70,6 @@ class ProfileTestCase(unittest.TestCase): self.handler = hs.get_handlers().profile_handler - # TODO(paul): Icky signal declarings.. booo - hs.get_distributor().declare("changed_presencelike_data") - @defer.inlineCallbacks def test_get_my_name(self): yield self.store.set_profile_displayname( diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py deleted file mode 100644 index 8d7cfd79ab..0000000000 --- a/tests/rest/client/v1/test_presence.py +++ /dev/null @@ -1,412 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests REST events for /presence paths.""" -from tests import unittest -from twisted.internet import defer - -from mock import Mock - -from ....utils import MockHttpResource, setup_test_homeserver - -from synapse.api.constants import PresenceState -from synapse.handlers.presence import PresenceHandler -from synapse.rest.client.v1 import presence -from synapse.rest.client.v1 import events -from synapse.types import Requester, UserID -from synapse.util.async import run_on_reactor - -from collections import namedtuple - - -OFFLINE = PresenceState.OFFLINE -UNAVAILABLE = PresenceState.UNAVAILABLE -ONLINE = PresenceState.ONLINE - - -myid = "@apple:test" -PATH_PREFIX = "/_matrix/client/api/v1" - - -class NullSource(object): - """This event source never yields any events and its token remains at - zero. It may be useful for unit-testing.""" - def __init__(self, hs): - pass - - def get_new_events( - self, - user, - from_key, - room_ids=None, - limit=None, - is_guest=None - ): - return defer.succeed(([], from_key)) - - def get_current_key(self, direction='f'): - return defer.succeed(0) - - def get_pagination_rows(self, user, pagination_config, key): - return defer.succeed(([], pagination_config.from_key)) - - -class JustPresenceHandlers(object): - def __init__(self, hs): - self.presence_handler = PresenceHandler(hs) - - -class PresenceStateTestCase(unittest.TestCase): - - @defer.inlineCallbacks - def setUp(self): - self.mock_resource = MockHttpResource(prefix=PATH_PREFIX) - hs = yield setup_test_homeserver( - datastore=Mock(spec=[ - "get_presence_state", - "set_presence_state", - "insert_client_ip", - ]), - http_client=None, - resource_for_client=self.mock_resource, - resource_for_federation=self.mock_resource, - ) - hs.handlers = JustPresenceHandlers(hs) - - self.datastore = hs.get_datastore() - self.datastore.get_app_service_by_token = Mock(return_value=None) - - def get_presence_list(*a, **kw): - return defer.succeed([]) - self.datastore.get_presence_list = get_presence_list - - def _get_user_by_access_token(token=None, allow_guest=False): - return { - "user": UserID.from_string(myid), - "token_id": 1, - "is_guest": False, - } - - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token - - room_member_handler = hs.handlers.room_member_handler = Mock( - spec=[ - "get_joined_rooms_for_user", - ] - ) - - def get_rooms_for_user(user): - return defer.succeed([]) - room_member_handler.get_joined_rooms_for_user = get_rooms_for_user - - presence.register_servlets(hs, self.mock_resource) - - self.u_apple = UserID.from_string(myid) - - @defer.inlineCallbacks - def test_get_my_status(self): - mocked_get = self.datastore.get_presence_state - mocked_get.return_value = defer.succeed( - {"state": ONLINE, "status_msg": "Available"} - ) - - (code, response) = yield self.mock_resource.trigger("GET", - "/presence/%s/status" % (myid), None) - - self.assertEquals(200, code) - self.assertEquals( - {"presence": ONLINE, "status_msg": "Available"}, - response - ) - mocked_get.assert_called_with("apple") - - @defer.inlineCallbacks - def test_set_my_status(self): - mocked_set = self.datastore.set_presence_state - mocked_set.return_value = defer.succeed({"state": OFFLINE}) - - (code, response) = yield self.mock_resource.trigger("PUT", - "/presence/%s/status" % (myid), - '{"presence": "unavailable", "status_msg": "Away"}') - - self.assertEquals(200, code) - mocked_set.assert_called_with("apple", - {"state": UNAVAILABLE, "status_msg": "Away"} - ) - - -class PresenceListTestCase(unittest.TestCase): - - @defer.inlineCallbacks - def setUp(self): - self.mock_resource = MockHttpResource(prefix=PATH_PREFIX) - - hs = yield setup_test_homeserver( - datastore=Mock(spec=[ - "has_presence_state", - "get_presence_state", - "allow_presence_visible", - "is_presence_visible", - "add_presence_list_pending", - "set_presence_list_accepted", - "del_presence_list", - "get_presence_list", - "insert_client_ip", - ]), - http_client=None, - resource_for_client=self.mock_resource, - resource_for_federation=self.mock_resource, - ) - hs.handlers = JustPresenceHandlers(hs) - - self.datastore = hs.get_datastore() - self.datastore.get_app_service_by_token = Mock(return_value=None) - - def has_presence_state(user_localpart): - return defer.succeed( - user_localpart in ("apple", "banana",) - ) - self.datastore.has_presence_state = has_presence_state - - def _get_user_by_access_token(token=None, allow_guest=False): - return { - "user": UserID.from_string(myid), - "token_id": 1, - "is_guest": False, - } - - hs.handlers.room_member_handler = Mock( - spec=[ - "get_joined_rooms_for_user", - ] - ) - - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token - - presence.register_servlets(hs, self.mock_resource) - - self.u_apple = UserID.from_string("@apple:test") - self.u_banana = UserID.from_string("@banana:test") - - @defer.inlineCallbacks - def test_get_my_list(self): - self.datastore.get_presence_list.return_value = defer.succeed( - [{"observed_user_id": "@banana:test", "accepted": True}], - ) - - (code, response) = yield self.mock_resource.trigger("GET", - "/presence/list/%s" % (myid), None) - - self.assertEquals(200, code) - self.assertEquals([ - {"user_id": "@banana:test", "presence": OFFLINE, "accepted": True}, - ], response) - - self.datastore.get_presence_list.assert_called_with( - "apple", accepted=True - ) - - @defer.inlineCallbacks - def test_invite(self): - self.datastore.add_presence_list_pending.return_value = ( - defer.succeed(()) - ) - self.datastore.is_presence_visible.return_value = defer.succeed( - True - ) - - (code, response) = yield self.mock_resource.trigger("POST", - "/presence/list/%s" % (myid), - """{"invite": ["@banana:test"]}""" - ) - - self.assertEquals(200, code) - - self.datastore.add_presence_list_pending.assert_called_with( - "apple", "@banana:test" - ) - self.datastore.set_presence_list_accepted.assert_called_with( - "apple", "@banana:test" - ) - - @defer.inlineCallbacks - def test_drop(self): - self.datastore.del_presence_list.return_value = ( - defer.succeed(()) - ) - - (code, response) = yield self.mock_resource.trigger("POST", - "/presence/list/%s" % (myid), - """{"drop": ["@banana:test"]}""" - ) - - self.assertEquals(200, code) - - self.datastore.del_presence_list.assert_called_with( - "apple", "@banana:test" - ) - - -class PresenceEventStreamTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - self.mock_resource = MockHttpResource(prefix=PATH_PREFIX) - - # HIDEOUS HACKERY - # TODO(paul): This should be injected in via the HomeServer DI system - from synapse.streams.events import ( - PresenceEventSource, EventSources - ) - - old_SOURCE_TYPES = EventSources.SOURCE_TYPES - def tearDown(): - EventSources.SOURCE_TYPES = old_SOURCE_TYPES - self.tearDown = tearDown - - EventSources.SOURCE_TYPES = { - k: NullSource for k in old_SOURCE_TYPES.keys() - } - EventSources.SOURCE_TYPES["presence"] = PresenceEventSource - - clock = Mock(spec=[ - "call_later", - "cancel_call_later", - "time_msec", - "looping_call", - ]) - - clock.time_msec.return_value = 1000000 - - hs = yield setup_test_homeserver( - http_client=None, - resource_for_client=self.mock_resource, - resource_for_federation=self.mock_resource, - datastore=Mock(spec=[ - "set_presence_state", - "get_presence_list", - "get_rooms_for_user", - ]), - clock=clock, - ) - - def _get_user_by_req(req=None, allow_guest=False): - return Requester(UserID.from_string(myid), "", False) - - hs.get_v1auth().get_user_by_req = _get_user_by_req - - presence.register_servlets(hs, self.mock_resource) - events.register_servlets(hs, self.mock_resource) - - hs.handlers.room_member_handler = Mock(spec=[]) - - self.room_members = [] - - def get_rooms_for_user(user): - if user in self.room_members: - return ["a-room"] - else: - return [] - hs.handlers.room_member_handler.get_joined_rooms_for_user = get_rooms_for_user - hs.handlers.room_member_handler.get_room_members = ( - lambda r: self.room_members if r == "a-room" else [] - ) - hs.handlers.room_member_handler._filter_events_for_client = ( - lambda user_id, events, **kwargs: events - ) - - self.mock_datastore = hs.get_datastore() - self.mock_datastore.get_app_service_by_token = Mock(return_value=None) - self.mock_datastore.get_app_service_by_user_id = Mock( - return_value=defer.succeed(None) - ) - self.mock_datastore.get_rooms_for_user = ( - lambda u: [ - namedtuple("Room", "room_id")(r) - for r in get_rooms_for_user(UserID.from_string(u)) - ] - ) - - def get_profile_displayname(user_id): - return defer.succeed("Frank") - self.mock_datastore.get_profile_displayname = get_profile_displayname - - def get_profile_avatar_url(user_id): - return defer.succeed(None) - self.mock_datastore.get_profile_avatar_url = get_profile_avatar_url - - def user_rooms_intersect(user_list): - room_member_ids = map(lambda u: u.to_string(), self.room_members) - - shared = all(map(lambda i: i in room_member_ids, user_list)) - return defer.succeed(shared) - self.mock_datastore.user_rooms_intersect = user_rooms_intersect - - def get_joined_hosts_for_room(room_id): - return [] - self.mock_datastore.get_joined_hosts_for_room = get_joined_hosts_for_room - - self.presence = hs.get_handlers().presence_handler - - self.u_apple = UserID.from_string("@apple:test") - self.u_banana = UserID.from_string("@banana:test") - - @defer.inlineCallbacks - def test_shortpoll(self): - self.room_members = [self.u_apple, self.u_banana] - - self.mock_datastore.set_presence_state.return_value = defer.succeed( - {"state": ONLINE} - ) - self.mock_datastore.get_presence_list.return_value = defer.succeed( - [] - ) - - (code, response) = yield self.mock_resource.trigger("GET", - "/events?timeout=0", None) - - self.assertEquals(200, code) - - # We've forced there to be only one data stream so the tokens will - # all be ours - - # I'll already get my own presence state change - self.assertEquals({"start": "0_1_0_0_0", "end": "0_1_0_0_0", "chunk": []}, - response - ) - - self.mock_datastore.set_presence_state.return_value = defer.succeed( - {"state": ONLINE} - ) - self.mock_datastore.get_presence_list.return_value = defer.succeed([]) - - yield self.presence.set_state(self.u_banana, self.u_banana, - state={"presence": ONLINE} - ) - - yield run_on_reactor() - - (code, response) = yield self.mock_resource.trigger("GET", - "/events?from=s0_1_0&timeout=0", None) - - self.assertEquals(200, code) - self.assertEquals({"start": "s0_1_0_0_0", "end": "s0_2_0_0_0", "chunk": [ - {"type": "m.presence", - "content": { - "user_id": "@banana:test", - "presence": ONLINE, - "displayname": "Frank", - "last_active_ago": 0, - }}, - ]}, response) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index ad5dd3bd6e..a90b9dc3d8 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -953,12 +953,6 @@ class RoomInitialSyncTestCase(RestTestCase): synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource) - # Since I'm getting my own presence I need to exist as far as presence - # is concerned. - hs.get_handlers().presence_handler.registered_user( - UserID.from_string(self.user_id) - ) - # create the room self.room_id = yield self.create_room_as(self.user_id) diff --git a/tests/storage/test_presence.py b/tests/storage/test_presence.py index 333f1e10f1..ec78f007ca 100644 --- a/tests/storage/test_presence.py +++ b/tests/storage/test_presence.py @@ -34,32 +34,6 @@ class PresenceStoreTestCase(unittest.TestCase): self.u_apple = UserID.from_string("@apple:test") self.u_banana = UserID.from_string("@banana:test") - @defer.inlineCallbacks - def test_state(self): - yield self.store.create_presence( - self.u_apple.localpart - ) - - state = yield self.store.get_presence_state( - self.u_apple.localpart - ) - - self.assertEquals( - {"state": None, "status_msg": None, "mtime": None}, state - ) - - yield self.store.set_presence_state( - self.u_apple.localpart, {"state": "online", "status_msg": "Here"} - ) - - state = yield self.store.get_presence_state( - self.u_apple.localpart - ) - - self.assertEquals( - {"state": "online", "status_msg": "Here", "mtime": 1000000}, state - ) - @defer.inlineCallbacks def test_visibility(self): self.assertFalse((yield self.store.is_presence_visible( From f8d21e1431ce08b267f7b63a0a0772beb2588f25 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Thu, 18 Feb 2016 11:02:14 +0000 Subject: [PATCH 190/349] Review comments --- synapse/handlers/_base.py | 3 ++- synapse/handlers/message.py | 2 +- synapse/handlers/room.py | 9 ++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 41e153c934..a516a84a66 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -222,7 +222,8 @@ class BaseHandler(object): if event_type == EventTypes.Member ] if len(room_members) == 0: - # has the room been created so we can join it? + # Have we just created the room, and is this about to be the very + # first member event? create_event = current_state.get(("m.room.create", "")) if create_event: return True diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 9c3471f2e3..723bc0e34f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -229,7 +229,7 @@ class MessageHandler(BaseHandler): if event.type == EventTypes.Member: raise SynapseError( 500, - "Tried to send member even through non-member codepath" + "Tried to send member event through non-member codepath" ) user = UserID.from_string(event.sender) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index cd04ac09fa..b00cac4bd4 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -490,11 +490,10 @@ class RoomMemberHandler(BaseHandler): sender = UserID.from_string(event.sender) assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,) - if event.is_state(): - message_handler = self.hs.get_handlers().message_handler - prev_event = message_handler.deduplicate_state_event(event, context) - if prev_event is not None: - return + message_handler = self.hs.get_handlers().message_handler + prev_event = message_handler.deduplicate_state_event(event, context) + if prev_event is not None: + return action = "send" From b4796a62ee953d2b6946b4bef31d4252e558eb2d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 11:52:33 +0000 Subject: [PATCH 191/349] Add unit test --- synapse/handlers/presence.py | 280 ++++++++++++++++-------- tests/handlers/test_presence.py | 373 ++++++++++++++++++++++++++++++++ 2 files changed, 560 insertions(+), 93 deletions(-) create mode 100644 tests/handlers/test_presence.py diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 8831d83c56..0a061fe9b2 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -217,55 +217,20 @@ class PresenceHandler(BaseHandler): user_id, UserPresenceState.default(user_id) ) - # If the users are ours then we want to set up a bunch of timers - # to time things out. - if self.hs.is_mine_id(user_id): - if new_state.state == PresenceState.ONLINE: - # Idle timer - self.wheel_timer.insert( - now=now, - obj=user_id, - then=new_state.last_active_ts + IDLE_TIMER - ) - - if new_state.state != PresenceState.OFFLINE: - # User has stopped syncing - self.wheel_timer.insert( - now=now, - obj=user_id, - then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT - ) - - last_federate = new_state.last_federation_update_ts - if now - last_federate > FEDERATION_PING_INTERVAL: - # Been a while since we've poked remote servers - new_state = new_state.copy_and_replace( - last_federation_update_ts=now, - ) - to_federation_ping[user_id] = new_state - - else: - self.wheel_timer.insert( - now=now, - obj=user_id, - then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT - ) - - if new_state.state == PresenceState.ONLINE: - active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY - new_state = new_state.copy_and_replace( - currently_active=active, - ) - - # Check whether the change was something worth notifying about - if should_notify(prev_state, new_state): - new_state.copy_and_replace( - last_federation_update_ts=now, - ) - to_notify[user_id] = new_state + new_state, should_notify, should_ping = handle_update( + prev_state, new_state, + is_mine=self.hs.is_mine_id(user_id), + wheel_timer=self.wheel_timer, + now=now + ) self.user_to_current_state[user_id] = new_state + if should_notify: + to_notify[user_id] = new_state + elif should_ping: + to_federation_ping[user_id] = new_state + # TODO: We should probably ensure there are no races hereafter if to_notify: @@ -296,55 +261,22 @@ class PresenceHandler(BaseHandler): # take any action. users_to_check = self.wheel_timer.fetch(now) - changes = {} # Actual changes we need to notify people about + states = [ + self.user_to_current_state.get( + user_id, UserPresenceState.default(user_id) + ) + for user_id in set(users_to_check) + ] - for user_id in set(users_to_check): - state = self.user_to_current_state.get(user_id, None) - if not state: - continue + changes = handle_timeouts( + states, + is_mine_fn=self.hs.is_mine_id, + user_to_current_state=self.user_to_current_state, + user_to_num_current_syncs=self.user_to_num_current_syncs, + now=now, + ) - if state.state == PresenceState.OFFLINE: - # No timeouts are associated with offline states. - continue - - if self.hs.is_mine_id(user_id): - if state.state == PresenceState.ONLINE: - if now - state.last_active_ts > IDLE_TIMER: - # Currently online, but last activity ages ago so auto - # idle - changes[user_id] = state.copy_and_replace( - state=PresenceState.UNAVAILABLE, - ) - elif now - state.last_active_ts > LAST_ACTIVE_GRANULARITY: - # So that we send down a notification that we've - # stopped updating. - changes[user_id] = state - - if now - state.last_federation_update_ts > FEDERATION_PING_INTERVAL: - # Need to send ping to other servers to ensure they don't - # timeout and set us to offline - changes[user_id] = state - - # If there are have been no sync for a while (and none ongoing), - # set presence to offline - if not self.user_to_num_current_syncs.get(user_id, 0): - if now - state.last_user_sync_ts > SYNC_ONLINE_TIMEOUT: - changes[user_id] = state.copy_and_replace( - state=PresenceState.OFFLINE, - status_msg=None, - ) - else: - # We expect to be poked occaisonally by the other side. - # This is to protect against forgetful/buggy servers, so that - # no one gets stuck online forever. - if now - state.last_federation_update_ts > FEDERATION_TIMEOUT: - # The other side seems to have disappeared. - changes[user_id] = state.copy_and_replace( - state=PresenceState.OFFLINE, - status_msg=None, - ) - - preserve_fn(self._update_states)(changes.values()) + preserve_fn(self._update_states)(changes) @defer.inlineCallbacks def bump_presence_active_time(self, user): @@ -925,3 +857,165 @@ class PresenceEventSource(object): def get_pagination_rows(self, user, pagination_config, key): return self.get_new_events(user, from_key=None, include_offline=False) + + +def handle_timeouts(user_states, is_mine_fn, user_to_num_current_syncs, now): + """Checks the presence of users that have timed out and updates as + appropriate. + + Args: + user_states(list): List of UserPresenceState's to check. + is_mine_fn (fn): Function that returns if a user_id is ours + user_to_num_current_syncs (dict): Mapping of user_id to number of currently + active syncs. + now (int): Current time in ms. + + Returns: + List of UserPresenceState updates + """ + changes = {} # Actual changes we need to notify people about + + for state in user_states: + is_mine = is_mine_fn(state.user_id) + + new_state = handle_timeout(state, is_mine, user_to_num_current_syncs, now) + if new_state: + changes[state.user_id] = new_state + + return changes.values() + + +def handle_timeout(state, is_mine, user_to_num_current_syncs, now): + """Checks the presence of the user to see if any of the timers have elapsed + + Args: + state (UserPresenceState) + is_mine (bool): Whether the user is ours + user_to_num_current_syncs (dict): Mapping of user_id to number of currently + active syncs. + now (int): Current time in ms. + + Returns: + A UserPresenceState update or None if no update. + """ + if state.state == PresenceState.OFFLINE: + # No timeouts are associated with offline states. + return None + + changed = False + user_id = state.user_id + + if is_mine: + if state.state == PresenceState.ONLINE: + if now - state.last_active_ts > IDLE_TIMER: + # Currently online, but last activity ages ago so auto + # idle + state = state.copy_and_replace( + state=PresenceState.UNAVAILABLE, + ) + changed = True + elif now - state.last_active_ts > LAST_ACTIVE_GRANULARITY: + # So that we send down a notification that we've + # stopped updating. + changed = True + + if now - state.last_federation_update_ts > FEDERATION_PING_INTERVAL: + # Need to send ping to other servers to ensure they don't + # timeout and set us to offline + changed = True + + # If there are have been no sync for a while (and none ongoing), + # set presence to offline + if not user_to_num_current_syncs.get(user_id, 0): + if now - state.last_user_sync_ts > SYNC_ONLINE_TIMEOUT: + state = state.copy_and_replace( + state=PresenceState.OFFLINE, + status_msg=None, + ) + changed = True + else: + # We expect to be poked occaisonally by the other side. + # This is to protect against forgetful/buggy servers, so that + # no one gets stuck online forever. + if now - state.last_federation_update_ts > FEDERATION_TIMEOUT: + # The other side seems to have disappeared. + state = state.copy_and_replace( + state=PresenceState.OFFLINE, + status_msg=None, + ) + changed = True + + return state if changed else None + + +def handle_update(prev_state, new_state, is_mine, wheel_timer, now): + """Given a presence update: + 1. Add any appropriate timers. + 2. Check if we should notify anyone. + + Args: + prev_state (UserPresenceState) + new_state (UserPresenceState) + is_mine (bool): Whether the user is ours + wheel_timer (WheelTimer) + now (int): Time now in ms + + Returns: + 3-tuple: `(new_state, persist_and_notify, federation_ping)` where: + - new_state: is the state to actually persist + - persist_and_notify (bool): whether to persist and notify people + - federation_ping (bool): whether we should send a ping over federation + """ + user_id = new_state.user_id + + persist_and_notify = False + federation_ping = False + + # If the users are ours then we want to set up a bunch of timers + # to time things out. + if is_mine: + if new_state.state == PresenceState.ONLINE: + # Idle timer + wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_active_ts + IDLE_TIMER + ) + + if new_state.state != PresenceState.OFFLINE: + # User has stopped syncing + wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT + ) + + last_federate = new_state.last_federation_update_ts + if now - last_federate > FEDERATION_PING_INTERVAL: + # Been a while since we've poked remote servers + new_state = new_state.copy_and_replace( + last_federation_update_ts=now, + ) + federation_ping = True + + else: + wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT + ) + + if new_state.state == PresenceState.ONLINE: + active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY + new_state = new_state.copy_and_replace( + currently_active=active, + ) + + # Check whether the change was something worth notifying about + if should_notify(prev_state, new_state): + new_state = new_state.copy_and_replace( + last_federation_update_ts=now, + ) + persist_and_notify = True + + return new_state, persist_and_notify, federation_ping diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py new file mode 100644 index 0000000000..197298db15 --- /dev/null +++ b/tests/handlers/test_presence.py @@ -0,0 +1,373 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from tests import unittest + +from mock import Mock, call + +from synapse.api.constants import PresenceState +from synapse.handlers.presence import ( + handle_update, handle_timeout, + IDLE_TIMER, SYNC_ONLINE_TIMEOUT, LAST_ACTIVE_GRANULARITY, FEDERATION_TIMEOUT, + FEDERATION_PING_INTERVAL, +) +from synapse.storage.presence import UserPresenceState + + +class PresenceUpdateTestCase(unittest.TestCase): + def test_offline_to_online(self): + wheel_timer = Mock() + user_id = "@foo:bar" + now = 5000000 + + prev_state = UserPresenceState.default(user_id) + new_state = prev_state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now, + ) + + state, persist_and_notify, federation_ping = handle_update( + prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + ) + + self.assertTrue(persist_and_notify) + self.assertTrue(state.currently_active) + self.assertEquals(new_state.state, state.state) + self.assertEquals(new_state.status_msg, state.status_msg) + self.assertEquals(state.last_federation_update_ts, now) + + self.assertEquals(wheel_timer.insert.call_count, 2) + wheel_timer.insert.assert_has_calls([ + call( + now=now, + obj=user_id, + then=new_state.last_active_ts + IDLE_TIMER + ), + call( + now=now, + obj=user_id, + then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT + ) + ], any_order=True) + + def test_online_to_online(self): + wheel_timer = Mock() + user_id = "@foo:bar" + now = 5000000 + + prev_state = UserPresenceState.default(user_id) + prev_state = prev_state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now, + currently_active=True, + ) + + new_state = prev_state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now, + ) + + state, persist_and_notify, federation_ping = handle_update( + prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + ) + + self.assertFalse(persist_and_notify) + self.assertTrue(federation_ping) + self.assertTrue(state.currently_active) + self.assertEquals(new_state.state, state.state) + self.assertEquals(new_state.status_msg, state.status_msg) + self.assertEquals(state.last_federation_update_ts, now) + + self.assertEquals(wheel_timer.insert.call_count, 2) + wheel_timer.insert.assert_has_calls([ + call( + now=now, + obj=user_id, + then=new_state.last_active_ts + IDLE_TIMER + ), + call( + now=now, + obj=user_id, + then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT + ) + ], any_order=True) + + def test_online_to_online_last_active(self): + wheel_timer = Mock() + user_id = "@foo:bar" + now = 5000000 + + prev_state = UserPresenceState.default(user_id) + prev_state = prev_state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now - LAST_ACTIVE_GRANULARITY - 1, + currently_active=True, + ) + + new_state = prev_state.copy_and_replace( + state=PresenceState.ONLINE, + ) + + state, persist_and_notify, federation_ping = handle_update( + prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + ) + + self.assertTrue(persist_and_notify) + self.assertFalse(state.currently_active) + self.assertEquals(new_state.state, state.state) + self.assertEquals(new_state.status_msg, state.status_msg) + self.assertEquals(state.last_federation_update_ts, now) + + self.assertEquals(wheel_timer.insert.call_count, 2) + wheel_timer.insert.assert_has_calls([ + call( + now=now, + obj=user_id, + then=new_state.last_active_ts + IDLE_TIMER + ), + call( + now=now, + obj=user_id, + then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT + ) + ], any_order=True) + + def test_remote_ping_timer(self): + wheel_timer = Mock() + user_id = "@foo:bar" + now = 5000000 + + prev_state = UserPresenceState.default(user_id) + prev_state = prev_state.copy_and_replace( + state=PresenceState.ONLINE, + ) + + new_state = prev_state.copy_and_replace( + state=PresenceState.ONLINE, + ) + + state, persist_and_notify, federation_ping = handle_update( + prev_state, new_state, is_mine=False, wheel_timer=wheel_timer, now=now + ) + + self.assertFalse(persist_and_notify) + self.assertFalse(federation_ping) + self.assertFalse(state.currently_active) + self.assertEquals(new_state.state, state.state) + self.assertEquals(new_state.status_msg, state.status_msg) + + self.assertEquals(wheel_timer.insert.call_count, 1) + wheel_timer.insert.assert_has_calls([ + call( + now=now, + obj=user_id, + then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT + ), + ], any_order=True) + + def test_online_to_offline(self): + wheel_timer = Mock() + user_id = "@foo:bar" + now = 5000000 + + prev_state = UserPresenceState.default(user_id) + prev_state = prev_state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now, + currently_active=True, + ) + + new_state = prev_state.copy_and_replace( + state=PresenceState.OFFLINE, + ) + + state, persist_and_notify, federation_ping = handle_update( + prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + ) + + self.assertTrue(persist_and_notify) + self.assertEquals(new_state.state, state.state) + self.assertEquals(state.last_federation_update_ts, now) + + self.assertEquals(wheel_timer.insert.call_count, 0) + + def test_online_to_idle(self): + wheel_timer = Mock() + user_id = "@foo:bar" + now = 5000000 + + prev_state = UserPresenceState.default(user_id) + prev_state = prev_state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now, + currently_active=True, + ) + + new_state = prev_state.copy_and_replace( + state=PresenceState.UNAVAILABLE, + ) + + state, persist_and_notify, federation_ping = handle_update( + prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + ) + + self.assertTrue(persist_and_notify) + self.assertEquals(new_state.state, state.state) + self.assertEquals(state.last_federation_update_ts, now) + self.assertEquals(new_state.state, state.state) + self.assertEquals(new_state.status_msg, state.status_msg) + + self.assertEquals(wheel_timer.insert.call_count, 1) + wheel_timer.insert.assert_has_calls([ + call( + now=now, + obj=user_id, + then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT + ) + ], any_order=True) + + +class PresenceTimeoutTestCase(unittest.TestCase): + def test_idle_timer(self): + user_id = "@foo:bar" + now = 5000000 + + state = UserPresenceState.default(user_id) + state = state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now - IDLE_TIMER - 1, + last_user_sync_ts=now, + ) + + new_state = handle_timeout( + state, is_mine=True, user_to_num_current_syncs={}, now=now + ) + + self.assertIsNotNone(new_state) + self.assertEquals(new_state.state, PresenceState.UNAVAILABLE) + + def test_sync_timeout(self): + user_id = "@foo:bar" + now = 5000000 + + state = UserPresenceState.default(user_id) + state = state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now, + last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1, + ) + + new_state = handle_timeout( + state, is_mine=True, user_to_num_current_syncs={}, now=now + ) + + self.assertIsNotNone(new_state) + self.assertEquals(new_state.state, PresenceState.OFFLINE) + + def test_sync_online(self): + user_id = "@foo:bar" + now = 5000000 + + state = UserPresenceState.default(user_id) + state = state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now - SYNC_ONLINE_TIMEOUT - 1, + last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1, + ) + + new_state = handle_timeout( + state, is_mine=True, user_to_num_current_syncs={ + user_id: 1, + }, now=now + ) + + self.assertIsNotNone(new_state) + self.assertEquals(new_state.state, PresenceState.ONLINE) + + def test_federation_ping(self): + user_id = "@foo:bar" + now = 5000000 + + state = UserPresenceState.default(user_id) + state = state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now, + last_user_sync_ts=now, + last_federation_update_ts=now - FEDERATION_PING_INTERVAL - 1, + ) + + new_state = handle_timeout( + state, is_mine=True, user_to_num_current_syncs={}, now=now + ) + + self.assertIsNotNone(new_state) + self.assertEquals(new_state, new_state) + + def test_no_timeout(self): + user_id = "@foo:bar" + now = 5000000 + + state = UserPresenceState.default(user_id) + state = state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now, + last_user_sync_ts=now, + last_federation_update_ts=now, + ) + + new_state = handle_timeout( + state, is_mine=True, user_to_num_current_syncs={}, now=now + ) + + self.assertIsNone(new_state) + + def test_federation_timeout(self): + user_id = "@foo:bar" + now = 5000000 + + state = UserPresenceState.default(user_id) + state = state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now, + last_user_sync_ts=now, + last_federation_update_ts=now - FEDERATION_TIMEOUT - 1, + ) + + new_state = handle_timeout( + state, is_mine=False, user_to_num_current_syncs={}, now=now + ) + + self.assertIsNotNone(new_state) + self.assertEquals(new_state.state, PresenceState.OFFLINE) + + def test_last_active(self): + user_id = "@foo:bar" + now = 5000000 + + state = UserPresenceState.default(user_id) + state = state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=now - LAST_ACTIVE_GRANULARITY - 1, + last_user_sync_ts=now, + last_federation_update_ts=now, + ) + + new_state = handle_timeout( + state, is_mine=True, user_to_num_current_syncs={}, now=now + ) + + self.assertIsNotNone(new_state) + self.assertEquals(state, new_state) From 48b652bcbee72b729f9dbb1c04bfd2bacba48218 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 14:57:09 +0000 Subject: [PATCH 192/349] Remove invalid arg. --- synapse/handlers/presence.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 0a061fe9b2..fb9536cee3 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -271,7 +271,6 @@ class PresenceHandler(BaseHandler): changes = handle_timeouts( states, is_mine_fn=self.hs.is_mine_id, - user_to_current_state=self.user_to_current_state, user_to_num_current_syncs=self.user_to_num_current_syncs, now=now, ) From b9977ea667889f6cf89464c92fc57cbcae7cca28 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 18 Feb 2016 16:05:13 +0000 Subject: [PATCH 193/349] Remove dead code for setting device specific rules. It wasn't possible to hit the code from the API because of a typo in parsing the request path. Since no-one was using the feature we might as well remove the dead code. --- synapse/push/__init__.py | 7 +- synapse/push/action_generator.py | 2 +- synapse/push/bulk_push_rule_evaluator.py | 2 +- synapse/push/httppusher.py | 3 +- synapse/push/push_rule_evaluator.py | 15 ++-- synapse/push/pusherpool.py | 48 ++++++------- synapse/rest/client/v1/push_rule.py | 90 ++---------------------- synapse/rest/client/v1/pusher.py | 6 +- synapse/storage/event_push_actions.py | 7 +- synapse/storage/pusher.py | 6 +- 10 files changed, 45 insertions(+), 141 deletions(-) diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 8da2d8716c..4c6c3b83a2 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -47,14 +47,13 @@ class Pusher(object): MAX_BACKOFF = 60 * 60 * 1000 GIVE_UP_AFTER = 24 * 60 * 60 * 1000 - def __init__(self, _hs, profile_tag, user_id, app_id, + def __init__(self, _hs, user_id, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, data, last_token, last_success, failing_since): self.hs = _hs self.evStreamHandler = self.hs.get_handlers().event_stream_handler self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() - self.profile_tag = profile_tag self.user_id = user_id self.app_id = app_id self.app_display_name = app_display_name @@ -186,8 +185,8 @@ class Pusher(object): processed = False rule_evaluator = yield \ - push_rule_evaluator.evaluator_for_user_id_and_profile_tag( - self.user_id, self.profile_tag, single_event['room_id'], self.store + push_rule_evaluator.evaluator_for_user_id( + self.user_id, single_event['room_id'], self.store ) actions = yield rule_evaluator.actions_for_event(single_event) diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index e0da0868ec..c6c1dc769e 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -44,5 +44,5 @@ class ActionGenerator: ) context.push_actions = [ - (uid, None, actions) for uid, actions in actions_by_user.items() + (uid, actions) for uid, actions in actions_by_user.items() ] diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 8ac5ceb9ef..0a23b3f102 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -152,7 +152,7 @@ def _condition_checker(evaluator, conditions, uid, display_name, cache): elif res is True: continue - res = evaluator.matches(cond, uid, display_name, None) + res = evaluator.matches(cond, uid, display_name) if _id: cache[_id] = bool(res) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index cdc4494928..9be4869360 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -23,12 +23,11 @@ logger = logging.getLogger(__name__) class HttpPusher(Pusher): - def __init__(self, _hs, profile_tag, user_id, app_id, + def __init__(self, _hs, user_id, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, data, last_token, last_success, failing_since): super(HttpPusher, self).__init__( _hs, - profile_tag, user_id, app_id, app_display_name, diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 2a2b4437dc..98e2a2015e 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -33,7 +33,7 @@ INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$") @defer.inlineCallbacks -def evaluator_for_user_id_and_profile_tag(user_id, profile_tag, room_id, store): +def evaluator_for_user_id(user_id, room_id, store): rawrules = yield store.get_push_rules_for_user(user_id) enabled_map = yield store.get_push_rules_enabled_for_user(user_id) our_member_event = yield store.get_current_state( @@ -43,7 +43,7 @@ def evaluator_for_user_id_and_profile_tag(user_id, profile_tag, room_id, store): ) defer.returnValue(PushRuleEvaluator( - user_id, profile_tag, rawrules, enabled_map, + user_id, rawrules, enabled_map, room_id, our_member_event, store )) @@ -77,10 +77,9 @@ def _room_member_count(ev, condition, room_member_count): class PushRuleEvaluator: DEFAULT_ACTIONS = [] - def __init__(self, user_id, profile_tag, raw_rules, enabled_map, room_id, + def __init__(self, user_id, raw_rules, enabled_map, room_id, our_member_event, store): self.user_id = user_id - self.profile_tag = profile_tag self.room_id = room_id self.our_member_event = our_member_event self.store = store @@ -152,7 +151,7 @@ class PushRuleEvaluator: matches = True for c in conditions: matches = evaluator.matches( - c, self.user_id, my_display_name, self.profile_tag + c, self.user_id, my_display_name ) if not matches: break @@ -189,13 +188,9 @@ class PushRuleEvaluatorForEvent(object): # Maps strings of e.g. 'content.body' -> event["content"]["body"] self._value_cache = _flatten_dict(event) - def matches(self, condition, user_id, display_name, profile_tag): + def matches(self, condition, user_id, display_name): if condition['kind'] == 'event_match': return self._event_match(condition, user_id) - elif condition['kind'] == 'device': - if 'profile_tag' not in condition: - return True - return condition['profile_tag'] == profile_tag elif condition['kind'] == 'contains_display_name': return self._contains_display_name(display_name) elif condition['kind'] == 'room_member_count': diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index d7dcb2de4b..a05aa5f661 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -29,6 +29,7 @@ class PusherPool: def __init__(self, _hs): self.hs = _hs self.store = self.hs.get_datastore() + self.clock = self.hs.get_clock() self.pushers = {} self.last_pusher_started = -1 @@ -38,8 +39,11 @@ class PusherPool: self._start_pushers(pushers) @defer.inlineCallbacks - def add_pusher(self, user_id, access_token, profile_tag, kind, app_id, - app_display_name, device_display_name, pushkey, lang, data): + def add_pusher(self, user_id, access_token, kind, app_id, + app_display_name, device_display_name, pushkey, lang, data, + profile_tag=""): + time_now_msec = self.clock.time_msec() + # we try to create the pusher just to validate the config: it # will then get pulled out of the database, # recreated, added and started: this means we have only one @@ -47,23 +51,31 @@ class PusherPool: self._create_pusher({ "user_name": user_id, "kind": kind, - "profile_tag": profile_tag, "app_id": app_id, "app_display_name": app_display_name, "device_display_name": device_display_name, "pushkey": pushkey, - "ts": self.hs.get_clock().time_msec(), + "ts": time_now_msec, "lang": lang, "data": data, "last_token": None, "last_success": None, "failing_since": None }) - yield self._add_pusher_to_store( - user_id, access_token, profile_tag, kind, app_id, - app_display_name, device_display_name, - pushkey, lang, data + yield self.store.add_pusher( + user_id=user_id, + access_token=access_token, + kind=kind, + app_id=app_id, + app_display_name=app_display_name, + device_display_name=device_display_name, + pushkey=pushkey, + pushkey_ts=time_now_msec, + lang=lang, + data=data, + profile_tag=profile_tag, ) + yield self._refresh_pusher(app_id, pushkey, user_id) @defer.inlineCallbacks def remove_pushers_by_app_id_and_pushkey_not_user(self, app_id, pushkey, @@ -94,30 +106,10 @@ class PusherPool: ) yield self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) - @defer.inlineCallbacks - def _add_pusher_to_store(self, user_id, access_token, profile_tag, kind, - app_id, app_display_name, device_display_name, - pushkey, lang, data): - yield self.store.add_pusher( - user_id=user_id, - access_token=access_token, - profile_tag=profile_tag, - kind=kind, - app_id=app_id, - app_display_name=app_display_name, - device_display_name=device_display_name, - pushkey=pushkey, - pushkey_ts=self.hs.get_clock().time_msec(), - lang=lang, - data=data, - ) - yield self._refresh_pusher(app_id, pushkey, user_id) - def _create_pusher(self, pusherdict): if pusherdict['kind'] == 'http': return HttpPusher( self.hs, - profile_tag=pusherdict['profile_tag'], user_id=pusherdict['user_name'], app_id=pusherdict['app_id'], app_display_name=pusherdict['app_display_name'], diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 7766b8be1d..5db2805d68 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -60,7 +60,6 @@ class PushRuleRestServlet(ClientV1RestServlet): spec['template'], spec['rule_id'], content, - device=spec['device'] if 'device' in spec else None ) except InvalidRuleException as e: raise SynapseError(400, e.message) @@ -153,23 +152,7 @@ class PushRuleRestServlet(ClientV1RestServlet): elif pattern_type == "user_localpart": c["pattern"] = user.localpart - if r['priority_class'] > PRIORITY_CLASS_MAP['override']: - # per-device rule - profile_tag = _profile_tag_from_conditions(r["conditions"]) - r = _strip_device_condition(r) - if not profile_tag: - continue - if profile_tag not in rules['device']: - rules['device'][profile_tag] = {} - rules['device'][profile_tag] = ( - _add_empty_priority_class_arrays( - rules['device'][profile_tag] - ) - ) - - rulearray = rules['device'][profile_tag][template_name] - else: - rulearray = rules['global'][template_name] + rulearray = rules['global'][template_name] template_rule = _rule_to_template(r) if template_rule: @@ -195,24 +178,6 @@ class PushRuleRestServlet(ClientV1RestServlet): path = path[1:] result = _filter_ruleset_with_path(rules['global'], path) defer.returnValue((200, result)) - elif path[0] == 'device': - path = path[1:] - if path == []: - raise UnrecognizedRequestError( - PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR - ) - if path[0] == '': - defer.returnValue((200, rules['device'])) - - profile_tag = path[0] - path = path[1:] - if profile_tag not in rules['device']: - ret = {} - ret = _add_empty_priority_class_arrays(ret) - defer.returnValue((200, ret)) - ruleset = rules['device'][profile_tag] - result = _filter_ruleset_with_path(ruleset, path) - defer.returnValue((200, result)) else: raise UnrecognizedRequestError() @@ -252,16 +217,9 @@ def _rule_spec_from_path(path): scope = path[1] path = path[2:] - if scope not in ['global', 'device']: + if scope != 'global': raise UnrecognizedRequestError() - device = None - if scope == 'device': - if len(path) == 0: - raise UnrecognizedRequestError() - device = path[0] - path = path[1:] - if len(path) == 0: raise UnrecognizedRequestError() @@ -278,8 +236,6 @@ def _rule_spec_from_path(path): 'template': template, 'rule_id': rule_id } - if device: - spec['profile_tag'] = device path = path[1:] @@ -289,7 +245,7 @@ def _rule_spec_from_path(path): return spec -def _rule_tuple_from_request_object(rule_template, rule_id, req_obj, device=None): +def _rule_tuple_from_request_object(rule_template, rule_id, req_obj): if rule_template in ['override', 'underride']: if 'conditions' not in req_obj: raise InvalidRuleException("Missing 'conditions'") @@ -322,12 +278,6 @@ def _rule_tuple_from_request_object(rule_template, rule_id, req_obj, device=None else: raise InvalidRuleException("Unknown rule template: %s" % (rule_template,)) - if device: - conditions.append({ - 'kind': 'device', - 'profile_tag': device - }) - if 'actions' not in req_obj: raise InvalidRuleException("No actions found") actions = req_obj['actions'] @@ -349,17 +299,6 @@ def _add_empty_priority_class_arrays(d): return d -def _profile_tag_from_conditions(conditions): - """ - Given a list of conditions, return the profile tag of the - device rule if there is one - """ - for c in conditions: - if c['kind'] == 'device': - return c['profile_tag'] - return None - - def _filter_ruleset_with_path(ruleset, path): if path == []: raise UnrecognizedRequestError( @@ -403,19 +342,11 @@ def _priority_class_from_spec(spec): raise InvalidRuleException("Unknown template: %s" % (spec['template'])) pc = PRIORITY_CLASS_MAP[spec['template']] - if spec['scope'] == 'device': - pc += len(PRIORITY_CLASS_MAP) - return pc def _priority_class_to_template_name(pc): - if pc > PRIORITY_CLASS_MAP['override']: - # per-device - prio_class_index = pc - len(PRIORITY_CLASS_MAP) - return PRIORITY_CLASS_INVERSE_MAP[prio_class_index] - else: - return PRIORITY_CLASS_INVERSE_MAP[pc] + return PRIORITY_CLASS_INVERSE_MAP[pc] def _rule_to_template(rule): @@ -445,23 +376,12 @@ def _rule_to_template(rule): return templaterule -def _strip_device_condition(rule): - for i, c in enumerate(rule['conditions']): - if c['kind'] == 'device': - del rule['conditions'][i] - return rule - - def _namespaced_rule_id_from_spec(spec): return _namespaced_rule_id(spec, spec['rule_id']) def _namespaced_rule_id(spec, rule_id): - if spec['scope'] == 'global': - scope = 'global' - else: - scope = 'device/%s' % (spec['profile_tag']) - return "%s/%s/%s" % (scope, spec['template'], rule_id) + return "global/%s/%s" % (spec['template'], rule_id) def _rule_id_from_namespaced(in_rule_id): diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 5547f1b112..4c662e6e3c 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -45,7 +45,7 @@ class PusherRestServlet(ClientV1RestServlet): ) defer.returnValue((200, {})) - reqd = ['profile_tag', 'kind', 'app_id', 'app_display_name', + reqd = ['kind', 'app_id', 'app_display_name', 'device_display_name', 'pushkey', 'lang', 'data'] missing = [] for i in reqd: @@ -73,14 +73,14 @@ class PusherRestServlet(ClientV1RestServlet): yield pusher_pool.add_pusher( user_id=user.to_string(), access_token=requester.access_token_id, - profile_tag=content['profile_tag'], kind=content['kind'], app_id=content['app_id'], app_display_name=content['app_display_name'], device_display_name=content['device_display_name'], pushkey=content['pushkey'], lang=content['lang'], - data=content['data'] + data=content['data'], + profile_tag=content.get('profile_tag', ""), ) except PusherConfigException as pce: raise SynapseError(400, "Config Error: " + pce.message, diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index d77a817682..5820539a92 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -27,15 +27,14 @@ class EventPushActionsStore(SQLBaseStore): def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples): """ :param event: the event set actions for - :param tuples: list of tuples of (user_id, profile_tag, actions) + :param tuples: list of tuples of (user_id, actions) """ values = [] - for uid, profile_tag, actions in tuples: + for uid, actions in tuples: values.append({ 'room_id': event.room_id, 'event_id': event.event_id, 'user_id': uid, - 'profile_tag': profile_tag, 'actions': json.dumps(actions), 'stream_ordering': event.internal_metadata.stream_ordering, 'topological_ordering': event.depth, @@ -43,7 +42,7 @@ class EventPushActionsStore(SQLBaseStore): 'highlight': 1 if _action_has_highlight(actions) else 0, }) - for uid, _, __ in tuples: + for uid, __ in tuples: txn.call_after( self.get_unread_event_push_actions_by_room_for_user.invalidate_many, (event.room_id, uid) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 8ec706178a..c23648cdbc 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -80,9 +80,9 @@ class PusherStore(SQLBaseStore): defer.returnValue(rows) @defer.inlineCallbacks - def add_pusher(self, user_id, access_token, profile_tag, kind, app_id, + def add_pusher(self, user_id, access_token, kind, app_id, app_display_name, device_display_name, - pushkey, pushkey_ts, lang, data): + pushkey, pushkey_ts, lang, data, profile_tag=""): try: next_id = yield self._pushers_id_gen.get_next() yield self._simple_upsert( @@ -95,12 +95,12 @@ class PusherStore(SQLBaseStore): dict( access_token=access_token, kind=kind, - profile_tag=profile_tag, app_display_name=app_display_name, device_display_name=device_display_name, ts=pushkey_ts, lang=lang, data=encode_canonical_json(data), + profile_tag=profile_tag, ), insertion_values=dict( id=next_id, From b8cdec92c77f741aaafa3655e7f4959db7889a3d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 16:33:07 +0000 Subject: [PATCH 194/349] WheelTimer: Don't scan list, use index. --- synapse/util/wheel_timer.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index b447b2456c..2c9f957616 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -46,11 +46,14 @@ class WheelTimer(object): then (int): When to return the object strictly after. """ then_key = int(then / self.bucket_size) + 1 - for entry in self.entries: - # Add to first bucket we find. This should gracefully handle inserts - # for times in the past. - if entry.end_key >= then_key: - entry.queue.append(obj) + + if self.entries: + min_key = self.entries[0].end_key + max_key = self.entries[-1].end_key + + if then_key <= max_key: + # The max here is to protect against inserts for times in the past + self.entries[max(min_key, then_key) - min_key].queue.append(obj) return next_key = int(now / self.bucket_size) + 1 From 42109a62a43ca0b8c1e0d3f797bbc70e0018ca5c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 16:37:28 +0000 Subject: [PATCH 195/349] Remove unused param from get_max_token --- synapse/storage/account_data.py | 4 ++-- synapse/storage/events.py | 2 +- synapse/storage/receipts.py | 6 +++--- synapse/storage/stream.py | 2 +- synapse/storage/tags.py | 6 +++--- synapse/storage/util/id_generators.py | 4 +--- 6 files changed, 11 insertions(+), 13 deletions(-) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index b8387fc500..91cbf399b6 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -168,7 +168,7 @@ class AccountDataStore(SQLBaseStore): "add_room_account_data", add_account_data_txn, next_id ) - result = yield self._account_data_id_gen.get_max_token(self) + result = yield self._account_data_id_gen.get_max_token() defer.returnValue(result) @defer.inlineCallbacks @@ -207,7 +207,7 @@ class AccountDataStore(SQLBaseStore): "add_user_account_data", add_account_data_txn, next_id ) - result = yield self._account_data_id_gen.get_max_token(self) + result = yield self._account_data_id_gen.get_max_token() defer.returnValue(result) def _update_max_stream_id(self, txn, next_id): diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 3a5c6ee4b1..1dd3236829 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -131,7 +131,7 @@ class EventsStore(SQLBaseStore): except _RollbackButIsFineException: pass - max_persisted_id = yield self._stream_id_gen.get_max_token(self) + max_persisted_id = yield self._stream_id_gen.get_max_token() defer.returnValue((stream_ordering, max_persisted_id)) @defer.inlineCallbacks diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 4202a6b3dc..a7343c97f7 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -31,7 +31,7 @@ class ReceiptsStore(SQLBaseStore): super(ReceiptsStore, self).__init__(hs) self._receipts_stream_cache = StreamChangeCache( - "ReceiptsRoomChangeCache", self._receipts_id_gen.get_max_token(None) + "ReceiptsRoomChangeCache", self._receipts_id_gen.get_max_token() ) @cached(num_args=2) @@ -222,7 +222,7 @@ class ReceiptsStore(SQLBaseStore): defer.returnValue(results) def get_max_receipt_stream_id(self): - return self._receipts_id_gen.get_max_token(self) + return self._receipts_id_gen.get_max_token() def insert_linearized_receipt_txn(self, txn, room_id, receipt_type, user_id, event_id, data, stream_id): @@ -347,7 +347,7 @@ class ReceiptsStore(SQLBaseStore): room_id, receipt_type, user_id, event_ids, data ) - max_persisted_id = yield self._stream_id_gen.get_max_token(self) + max_persisted_id = yield self._stream_id_gen.get_max_token() defer.returnValue((stream_id, max_persisted_id)) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index c236dafafb..8908d5b5da 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -531,7 +531,7 @@ class StreamStore(SQLBaseStore): @defer.inlineCallbacks def get_room_events_max_id(self, direction='f'): - token = yield self._stream_id_gen.get_max_token(self) + token = yield self._stream_id_gen.get_max_token() if direction != 'b': defer.returnValue("s%d" % (token,)) else: diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index e1a9c0c261..9551aa9739 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -30,7 +30,7 @@ class TagsStore(SQLBaseStore): Returns: A deferred int. """ - return self._account_data_id_gen.get_max_token(self) + return self._account_data_id_gen.get_max_token() @cached() def get_tags_for_user(self, user_id): @@ -147,7 +147,7 @@ class TagsStore(SQLBaseStore): self.get_tags_for_user.invalidate((user_id,)) - result = yield self._account_data_id_gen.get_max_token(self) + result = yield self._account_data_id_gen.get_max_token() defer.returnValue(result) @defer.inlineCallbacks @@ -169,7 +169,7 @@ class TagsStore(SQLBaseStore): self.get_tags_for_user.invalidate((user_id,)) - result = yield self._account_data_id_gen.get_max_token(self) + result = yield self._account_data_id_gen.get_max_token() defer.returnValue(result) def _update_revision_txn(self, txn, user_id, room_id, next_id): diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 5ce54f76de..ef5e4a4668 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -130,11 +130,9 @@ class StreamIdGenerator(object): return manager() - def get_max_token(self, *args): + def get_max_token(self): """Returns the maximum stream id such that all stream ids less than or equal to it have been successfully persisted. - - Used to take a DataStore param, which is no longer needed. """ with self._lock: if self._unfinished_ids: From e6c5e3f28a234be342cf5e4f15a925e65e82fd0d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 16:39:28 +0000 Subject: [PATCH 196/349] Close cursor --- synapse/storage/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index fcb968e8f4..9be1d12fac 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -212,6 +212,7 @@ class DataStore(RoomMemberStore, RoomStore, txn = db_conn.cursor() txn.execute(sql, (PresenceState.OFFLINE,)) rows = self.cursor_to_dict(txn) + txn.close() for row in rows: row["currently_active"] = bool(row["currently_active"]) From e12ec335a58bb7957cb7abfc1c96500bb4fb2627 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Feb 2016 17:01:53 +0000 Subject: [PATCH 197/349] "You are not..." --- synapse/rest/client/v1/presence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 27ea5f2a43..bbfa1d6ac4 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -41,7 +41,7 @@ class PresenceStatusRestServlet(ClientV1RestServlet): ) if not allowed: - raise AuthError(403, "You are allowed to see their presence.") + raise AuthError(403, "You are not allowed to see their presence.") state = yield self.handlers.presence_handler.get_state(target_user=user) From 5614b4dafb2e1dd59fac55fe4e8b29cd4bc3e785 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Feb 2016 09:50:54 +0000 Subject: [PATCH 198/349] Add presence metrics --- synapse/handlers/presence.py | 8 ++++++++ synapse/util/wheel_timer.py | 6 ++++++ 2 files changed, 14 insertions(+) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index fb9536cee3..86b94ab84b 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -44,6 +44,9 @@ logger = logging.getLogger(__name__) metrics = synapse.metrics.get_metrics_for(__name__) +notified_presence_counter = metrics.register_counter("notified_presence") +presence_updates_counter = metrics.register_counter("presence_updates") + # If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them # "currently_active" @@ -170,6 +173,8 @@ class PresenceHandler(BaseHandler): 5000, ) + metrics.register_callback("wheel_timer_size", lambda: len(self.wheel_timer)) + @defer.inlineCallbacks def _on_shutdown(self): """Gets called when shutting down. This lets us persist any updates that @@ -233,7 +238,10 @@ class PresenceHandler(BaseHandler): # TODO: We should probably ensure there are no races hereafter + presence_updates_counter.inc_by(len(new_states)) + if to_notify: + notified_presence_counter.inc_by(len(to_notify)) yield self._persist_and_notify(to_notify.values()) self.unpersisted_users_changes |= set(s.user_id for s in new_states) diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index 2c9f957616..7412fc57a4 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -89,3 +89,9 @@ class WheelTimer(object): ret.extend(self.entries.pop(0).queue) return ret + + def __len__(self): + l = 0 + for entry in self.entries: + l += len(entry.queue) + return l From 5f4eca38160c34db4f282948cf6dceebd55f240e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Feb 2016 10:21:41 +0000 Subject: [PATCH 199/349] More metrics --- synapse/handlers/presence.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 86b94ab84b..c3cfc9441d 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -46,6 +46,9 @@ metrics = synapse.metrics.get_metrics_for(__name__) notified_presence_counter = metrics.register_counter("notified_presence") presence_updates_counter = metrics.register_counter("presence_updates") +presence_updates_counter = metrics.register_counter("presence_updates") +timers_fired_counter = metrics.register_counter("timers_fired") +federation_presence_counter = metrics.register_counter("federation_presence") # If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them @@ -276,6 +279,8 @@ class PresenceHandler(BaseHandler): for user_id in set(users_to_check) ] + timers_fired_counter.inc_by(len(states)) + changes = handle_timeouts( states, is_mine_fn=self.hs.is_mine_id, @@ -507,6 +512,7 @@ class PresenceHandler(BaseHandler): updates.append(prev_state.copy_and_replace(**new_fields)) if updates: + federation_presence_counter.inc_by(len(updates)) yield self._update_states(updates) @defer.inlineCallbacks From 929cb0ed7d2e5c189bdb75f0bc4f5eebecfee698 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Feb 2016 10:58:27 +0000 Subject: [PATCH 200/349] Don't set currently_active for remote presence --- synapse/handlers/presence.py | 18 ++++++++++++------ tests/handlers/test_presence.py | 19 +++++++++++++++---- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c3cfc9441d..80a2c0ceba 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -995,6 +995,18 @@ def handle_update(prev_state, new_state, is_mine, wheel_timer, now): then=new_state.last_active_ts + IDLE_TIMER ) + active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY + new_state = new_state.copy_and_replace( + currently_active=active, + ) + + if active: + wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY + ) + if new_state.state != PresenceState.OFFLINE: # User has stopped syncing wheel_timer.insert( @@ -1018,12 +1030,6 @@ def handle_update(prev_state, new_state, is_mine, wheel_timer, now): then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT ) - if new_state.state == PresenceState.ONLINE: - active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY - new_state = new_state.copy_and_replace( - currently_active=active, - ) - # Check whether the change was something worth notifying about if should_notify(prev_state, new_state): new_state = new_state.copy_and_replace( diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 197298db15..87c795fcfa 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -49,7 +49,7 @@ class PresenceUpdateTestCase(unittest.TestCase): self.assertEquals(new_state.status_msg, state.status_msg) self.assertEquals(state.last_federation_update_ts, now) - self.assertEquals(wheel_timer.insert.call_count, 2) + self.assertEquals(wheel_timer.insert.call_count, 3) wheel_timer.insert.assert_has_calls([ call( now=now, @@ -60,7 +60,12 @@ class PresenceUpdateTestCase(unittest.TestCase): now=now, obj=user_id, then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT - ) + ), + call( + now=now, + obj=user_id, + then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY + ), ], any_order=True) def test_online_to_online(self): @@ -91,7 +96,7 @@ class PresenceUpdateTestCase(unittest.TestCase): self.assertEquals(new_state.status_msg, state.status_msg) self.assertEquals(state.last_federation_update_ts, now) - self.assertEquals(wheel_timer.insert.call_count, 2) + self.assertEquals(wheel_timer.insert.call_count, 3) wheel_timer.insert.assert_has_calls([ call( now=now, @@ -102,7 +107,12 @@ class PresenceUpdateTestCase(unittest.TestCase): now=now, obj=user_id, then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT - ) + ), + call( + now=now, + obj=user_id, + then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY + ), ], any_order=True) def test_online_to_online_last_active(self): @@ -153,6 +163,7 @@ class PresenceUpdateTestCase(unittest.TestCase): prev_state = UserPresenceState.default(user_id) prev_state = prev_state.copy_and_replace( state=PresenceState.ONLINE, + last_active_ts=now, ) new_state = prev_state.copy_and_replace( From be799453aab4c06ffbca75ce90fc781db88d3abc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Feb 2016 11:29:33 +0000 Subject: [PATCH 201/349] Remove spurious extra metrics --- synapse/handlers/presence.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 80a2c0ceba..8ef5ce1acd 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -46,7 +46,6 @@ metrics = synapse.metrics.get_metrics_for(__name__) notified_presence_counter = metrics.register_counter("notified_presence") presence_updates_counter = metrics.register_counter("presence_updates") -presence_updates_counter = metrics.register_counter("presence_updates") timers_fired_counter = metrics.register_counter("timers_fired") federation_presence_counter = metrics.register_counter("federation_presence") From 4a95eb0a1257087d7dce1493442e519dabc8d159 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Feb 2016 11:32:04 +0000 Subject: [PATCH 202/349] Add presence metric --- synapse/handlers/presence.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 8ef5ce1acd..4197311a97 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -48,6 +48,7 @@ notified_presence_counter = metrics.register_counter("notified_presence") presence_updates_counter = metrics.register_counter("presence_updates") timers_fired_counter = metrics.register_counter("timers_fired") federation_presence_counter = metrics.register_counter("federation_presence") +bump_active_time_counter = metrics.register_counter("bump_active_time") # If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them @@ -296,6 +297,8 @@ class PresenceHandler(BaseHandler): """ user_id = user.to_string() + bump_active_time_counter.inc() + prev_state = yield self.current_state_for_user(user_id) new_fields = { From b71ca2b0140fa4d7866ebb10ee49556de7eff44f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Feb 2016 11:41:02 +0000 Subject: [PATCH 203/349] Allow guest users access to messages in rooms they have joined There should be no difference between guest users and non-guest users in terms of access to messages. Define the semantics of the is_peeking argument to filter_events_for_clients (slightly) better; interpret it appropriately, and set it correctly from /sync. --- synapse/handlers/_base.py | 52 +++++++++++++++++++----- synapse/handlers/sync.py | 2 - synapse/push/bulk_push_rule_evaluator.py | 2 +- 3 files changed, 42 insertions(+), 14 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 064e8723c8..da219184c5 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -53,9 +53,15 @@ class BaseHandler(object): self.event_builder_factory = hs.get_event_builder_factory() @defer.inlineCallbacks - def _filter_events_for_clients(self, user_tuples, events, event_id_to_state): + def filter_events_for_clients(self, user_tuples, events, event_id_to_state): """ Returns dict of user_id -> list of events that user is allowed to see. + + :param (str, bool) user_tuples: (user id, is_peeking) for each + user to be checked. is_peeking should be true if: + * the user is not currently a member of the room, and: + * the user has not been a member of the room since the given + events """ forgotten = yield defer.gatherResults([ self.store.who_forgot_in_room( @@ -72,18 +78,20 @@ class BaseHandler(object): def allowed(event, user_id, is_peeking): state = event_id_to_state[event.event_id] + # get the room_visibility at the time of the event. visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None) if visibility_event: visibility = visibility_event.content.get("history_visibility", "shared") else: visibility = "shared" + # if it was world_readable, it's easy: everyone can read it if visibility == "world_readable": return True - if is_peeking: - return False - + # get the user's membership at the time of the event. (or rather, + # just *after* the event. Which means that people can see their + # own join events, but not (currently) their own leave events.) membership_event = state.get((EventTypes.Member, user_id), None) if membership_event: if membership_event.event_id in event_id_forgotten: @@ -93,20 +101,32 @@ class BaseHandler(object): else: membership = None + # if the user was a member of the room at the time of the event, + # they can see it. if membership == Membership.JOIN: return True if event.type == EventTypes.RoomHistoryVisibility: - return not is_peeking + # XXX why are m.room.history_visibility events special? + # return True + pass if visibility == "shared": - return True - elif visibility == "joined": - return membership == Membership.JOIN + # user can also see the event if he has become a member since + # the event + # + # XXX: if the user has subsequently joined and then left again, + # ideally we would share history up to the point they left. But + # we don't know when they left. + return not is_peeking elif visibility == "invited": + # user can also see the event if he was *invited* at the time + # of the event. return membership == Membership.INVITE - return True + # presumably visibility is "joined"; we weren't a member at the + # time of the event, so we're done. + return False defer.returnValue({ user_id: [ @@ -119,7 +139,17 @@ class BaseHandler(object): @defer.inlineCallbacks def _filter_events_for_client(self, user_id, events, is_peeking=False): - # Assumes that user has at some point joined the room if not is_guest. + """ + Check which events a user is allowed to see + + :param str user_id: user id to be checked + :param [synapse.events.EventBase] events: list of events to be checked + :param bool is_peeking should be True if: + * the user is not currently a member of the room, and: + * the user has not been a member of the room since the given + events + :rtype [synapse.events.EventBase] + """ types = ( (EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id), @@ -128,7 +158,7 @@ class BaseHandler(object): frozenset(e.event_id for e in events), types=types ) - res = yield self._filter_events_for_clients( + res = yield self.filter_events_for_clients( [(user_id, is_peeking)], events, event_id_to_state ) defer.returnValue(res.get(user_id, [])) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1d0f0058a2..f5122b5fb1 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -623,7 +623,6 @@ class SyncHandler(BaseHandler): recents = yield self._filter_events_for_client( sync_config.user.to_string(), recents, - is_peeking=sync_config.is_guest, ) else: recents = [] @@ -645,7 +644,6 @@ class SyncHandler(BaseHandler): loaded_recents = yield self._filter_events_for_client( sync_config.user.to_string(), loaded_recents, - is_peeking=sync_config.is_guest, ) loaded_recents.extend(recents) recents = loaded_recents diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 8ac5ceb9ef..206b20e15f 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -103,7 +103,7 @@ class BulkPushRuleEvaluator: users_dict = yield self.store.are_guests(self.rules_by_user.keys()) - filtered_by_user = yield handler._filter_events_for_clients( + filtered_by_user = yield handler.filter_events_for_clients( users_dict.items(), [event], {event.event_id: current_state} ) From 24d9f2c140637390746198af900d656b75875cba Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Feb 2016 11:50:48 +0000 Subject: [PATCH 204/349] Add Measures to presence --- synapse/handlers/presence.py | 121 ++++++++++++++++++----------------- 1 file changed, 63 insertions(+), 58 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 4197311a97..44bdc97ccb 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -31,6 +31,7 @@ from synapse.storage.presence import UserPresenceState from synapse.util.logcontext import preserve_fn from synapse.util.logutils import log_function +from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer from synapse.types import UserID import synapse.metrics @@ -209,57 +210,60 @@ class PresenceHandler(BaseHandler): """ now = self.clock.time_msec() - # NOTE: We purposefully don't yield between now and when we've - # calculated what we want to do with the new states, to avoid races. + with Measure(self.clock, "presence_update_states"): - to_notify = {} # Changes we want to notify everyone about - to_federation_ping = {} # These need sending keep-alives - for new_state in new_states: - user_id = new_state.user_id + # NOTE: We purposefully don't yield between now and when we've + # calculated what we want to do with the new states, to avoid races. - # Its fine to not hit the database here, as the only thing not in - # the current state cache are OFFLINE states, where the only field - # of interest is last_active which is safe enough to assume is 0 - # here. - prev_state = self.user_to_current_state.get( - user_id, UserPresenceState.default(user_id) - ) + to_notify = {} # Changes we want to notify everyone about + to_federation_ping = {} # These need sending keep-alives - new_state, should_notify, should_ping = handle_update( - prev_state, new_state, - is_mine=self.hs.is_mine_id(user_id), - wheel_timer=self.wheel_timer, - now=now - ) + for new_state in new_states: + user_id = new_state.user_id - self.user_to_current_state[user_id] = new_state + # Its fine to not hit the database here, as the only thing not in + # the current state cache are OFFLINE states, where the only field + # of interest is last_active which is safe enough to assume is 0 + # here. + prev_state = self.user_to_current_state.get( + user_id, UserPresenceState.default(user_id) + ) - if should_notify: - to_notify[user_id] = new_state - elif should_ping: - to_federation_ping[user_id] = new_state + new_state, should_notify, should_ping = handle_update( + prev_state, new_state, + is_mine=self.hs.is_mine_id(user_id), + wheel_timer=self.wheel_timer, + now=now + ) - # TODO: We should probably ensure there are no races hereafter + self.user_to_current_state[user_id] = new_state - presence_updates_counter.inc_by(len(new_states)) + if should_notify: + to_notify[user_id] = new_state + elif should_ping: + to_federation_ping[user_id] = new_state - if to_notify: - notified_presence_counter.inc_by(len(to_notify)) - yield self._persist_and_notify(to_notify.values()) + # TODO: We should probably ensure there are no races hereafter - self.unpersisted_users_changes |= set(s.user_id for s in new_states) - self.unpersisted_users_changes -= set(to_notify.keys()) + presence_updates_counter.inc_by(len(new_states)) - to_federation_ping = { - user_id: state for user_id, state in to_federation_ping.items() - if user_id not in to_notify - } - if to_federation_ping: - _, _, hosts_to_states = yield self._get_interested_parties( - to_federation_ping.values() - ) + if to_notify: + notified_presence_counter.inc_by(len(to_notify)) + yield self._persist_and_notify(to_notify.values()) - self._push_to_remotes(hosts_to_states) + self.unpersisted_users_changes |= set(s.user_id for s in new_states) + self.unpersisted_users_changes -= set(to_notify.keys()) + + to_federation_ping = { + user_id: state for user_id, state in to_federation_ping.items() + if user_id not in to_notify + } + if to_federation_ping: + _, _, hosts_to_states = yield self._get_interested_parties( + to_federation_ping.values() + ) + + self._push_to_remotes(hosts_to_states) def _handle_timeouts(self): """Checks the presence of users that have timed out and updates as @@ -267,26 +271,27 @@ class PresenceHandler(BaseHandler): """ now = self.clock.time_msec() - # Fetch the list of users that *may* have timed out. Things may have - # changed since the timeout was set, so we won't necessarily have to - # take any action. - users_to_check = self.wheel_timer.fetch(now) + with Measure(self.clock, "presence_handle_timeouts"): + # Fetch the list of users that *may* have timed out. Things may have + # changed since the timeout was set, so we won't necessarily have to + # take any action. + users_to_check = self.wheel_timer.fetch(now) - states = [ - self.user_to_current_state.get( - user_id, UserPresenceState.default(user_id) + states = [ + self.user_to_current_state.get( + user_id, UserPresenceState.default(user_id) + ) + for user_id in set(users_to_check) + ] + + timers_fired_counter.inc_by(len(states)) + + changes = handle_timeouts( + states, + is_mine_fn=self.hs.is_mine_id, + user_to_num_current_syncs=self.user_to_num_current_syncs, + now=now, ) - for user_id in set(users_to_check) - ] - - timers_fired_counter.inc_by(len(states)) - - changes = handle_timeouts( - states, - is_mine_fn=self.hs.is_mine_id, - user_to_num_current_syncs=self.user_to_num_current_syncs, - now=now, - ) preserve_fn(self._update_states)(changes) From 42ac5f0c1ae560e64dd5e9335d7f16b83fdabda4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Feb 2016 12:07:49 +0000 Subject: [PATCH 205/349] Only send presence updates to remote hosts if user is ours --- synapse/handlers/presence.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 44bdc97ccb..451fc70b37 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -428,13 +428,21 @@ class PresenceHandler(BaseHandler): hosts_to_states = {} for room_id, states in room_ids_to_states.items(): + local_states = filter(self.hs.is_mine_id, states) + if not local_states: + continue + hosts = yield self.store.get_joined_hosts_for_room(room_id) for host in hosts: - hosts_to_states.setdefault(host, []).extend(states) + hosts_to_states.setdefault(host, []).extend(local_states) for user_id, states in users_to_states.items(): + local_states = filter(self.hs.is_mine_id, states) + if not local_states: + continue + host = UserID.from_string(user_id).domain - hosts_to_states.setdefault(host, []).extend(states) + hosts_to_states.setdefault(host, []).extend(local_states) # TODO: de-dup hosts_to_states, as a single host might have multiple # of same presence From 3dbaeef58cf33f0399f40a2aa3c6205fdbe5dbd8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Feb 2016 12:27:35 +0000 Subject: [PATCH 206/349] Correctly filter states --- synapse/handlers/presence.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 451fc70b37..aed640450f 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -428,7 +428,7 @@ class PresenceHandler(BaseHandler): hosts_to_states = {} for room_id, states in room_ids_to_states.items(): - local_states = filter(self.hs.is_mine_id, states) + local_states = filter(lambda s: self.hs.is_mine_id(s.user_id), states) if not local_states: continue @@ -437,7 +437,7 @@ class PresenceHandler(BaseHandler): hosts_to_states.setdefault(host, []).extend(local_states) for user_id, states in users_to_states.items(): - local_states = filter(self.hs.is_mine_id, states) + local_states = filter(lambda s: self.hs.is_mine_id(s.user_id), states) if not local_states: continue From 700487a7c72c769537fb04c56d2e9c6dd29d7f84 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 19 Feb 2016 15:34:38 +0000 Subject: [PATCH 207/349] Fix flake8 warnings for tests --- tests/__init__.py | 1 - tests/api/test_auth.py | 4 +- tests/api/test_filtering.py | 12 +- tests/api/test_ratelimiting.py | 1 + tests/appservice/test_appservice.py | 2 +- tests/appservice/test_scheduler.py | 4 +- tests/crypto/__init__.py | 1 - tests/events/test_utils.py | 1 + tests/handlers/test_appservice.py | 2 - tests/handlers/test_auth.py | 1 - tests/handlers/test_profile.py | 9 +- tests/handlers/test_typing.py | 115 +++++----- tests/rest/client/v1/__init__.py | 1 - tests/rest/client/v1/test_profile.py | 56 +++-- tests/rest/client/v1/test_rooms.py | 220 ++++++++++---------- tests/rest/client/v1/test_typing.py | 44 ++-- tests/rest/client/v1/utils.py | 5 +- tests/rest/client/v2_alpha/__init__.py | 2 +- tests/rest/client/v2_alpha/test_filter.py | 19 +- tests/rest/client/v2_alpha/test_register.py | 4 +- tests/storage/event_injector.py | 8 +- tests/storage/test__base.py | 8 +- tests/storage/test_background_update.py | 8 +- tests/storage/test_base.py | 89 ++++---- tests/storage/test_events.py | 1 - tests/storage/test_profile.py | 2 +- tests/storage/test_registration.py | 6 +- tests/storage/test_room.py | 14 +- tests/storage/test_roommember.py | 22 +- tests/storage/test_stream.py | 4 +- tests/test_distributor.py | 11 +- tests/test_test_utils.py | 3 +- tests/unittest.py | 7 +- tests/util/__init__.py | 1 - tests/util/test_dict_cache.py | 1 - tests/util/test_snapshot_cache.py | 1 + tests/util/test_treecache.py | 1 + tests/utils.py | 30 ++- tox.ini | 2 +- 39 files changed, 359 insertions(+), 364 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index d0e9399dda..bfebb0f644 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -12,4 +12,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 474c5c418f..7e7b0b4b1d 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -281,9 +281,9 @@ class AuthTestCase(unittest.TestCase): macaroon.add_first_party_caveat("gen = 1") macaroon.add_first_party_caveat("type = access") macaroon.add_first_party_caveat("user_id = %s" % (user,)) - macaroon.add_first_party_caveat("time < 1") # ms + macaroon.add_first_party_caveat("time < 1") # ms - self.hs.clock.now = 5000 # seconds + self.hs.clock.now = 5000 # seconds yield self.auth.get_user_from_macaroon(macaroon.serialize()) # TODO(daniel): Turn on the check that we validate expiration, when we diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index ceb0089268..4297a89f85 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -21,7 +21,6 @@ from tests.utils import ( MockHttpResource, DeferredMockCallable, setup_test_homeserver ) -from synapse.types import UserID from synapse.api.filtering import Filter from synapse.events import FrozenEvent @@ -356,7 +355,6 @@ class FilteringTestCase(unittest.TestCase): "types": ["m.*"] } } - user = UserID.from_string("@" + user_localpart + ":test") filter_id = yield self.datastore.add_user_filter( user_localpart=user_localpart, user_filter=user_filter_json, @@ -411,7 +409,6 @@ class FilteringTestCase(unittest.TestCase): } } } - user = UserID.from_string("@" + user_localpart + ":test") filter_id = yield self.datastore.add_user_filter( user_localpart=user_localpart, user_filter=user_filter_json, @@ -440,7 +437,6 @@ class FilteringTestCase(unittest.TestCase): } } } - user = UserID.from_string("@" + user_localpart + ":test") filter_id = yield self.datastore.add_user_filter( user_localpart=user_localpart, user_filter=user_filter_json, @@ -476,12 +472,12 @@ class FilteringTestCase(unittest.TestCase): ) self.assertEquals(filter_id, 0) - self.assertEquals(user_filter_json, - (yield self.datastore.get_user_filter( + self.assertEquals(user_filter_json, ( + yield self.datastore.get_user_filter( user_localpart=user_localpart, filter_id=0, - )) - ) + ) + )) @defer.inlineCallbacks def test_get_filter(self): diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index dd0bc19ecf..c45b59b36c 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -2,6 +2,7 @@ from synapse.api.ratelimiting import Ratelimiter from tests import unittest + class TestRatelimiter(unittest.TestCase): def test_allowed(self): diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index ef48bbc296..d6cc1881e9 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -14,7 +14,7 @@ # limitations under the License. from synapse.appservice import ApplicationService -from mock import Mock, PropertyMock +from mock import Mock from tests import unittest diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index c9c2d36210..631a229332 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.appservice import ApplicationServiceState, AppServiceTransaction +from synapse.appservice import ApplicationServiceState from synapse.appservice.scheduler import ( _ServiceQueuer, _TransactionController, _Recoverer ) @@ -235,7 +235,7 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase): srv_2_event2 = Mock(event_id="srv2b") send_return_list = [srv_1_defer, srv_2_defer] - self.txn_ctrl.send = Mock(side_effect=lambda x,y: send_return_list.pop(0)) + self.txn_ctrl.send = Mock(side_effect=lambda x, y: send_return_list.pop(0)) # send events for different ASes and make sure they are sent self.queuer.enqueue(srv1, srv_1_event) diff --git a/tests/crypto/__init__.py b/tests/crypto/__init__.py index d0e9399dda..bfebb0f644 100644 --- a/tests/crypto/__init__.py +++ b/tests/crypto/__init__.py @@ -12,4 +12,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 894d0c3845..fb0953c4ec 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -19,6 +19,7 @@ from .. import unittest from synapse.events import FrozenEvent from synapse.events.utils import prune_event + class PruneEventTestCase(unittest.TestCase): """ Asserts that a new event constructed with `evdict` will look like `matchdict` when it is redacted. """ diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index ba6e2c640e..7ddbbb9b4a 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -129,8 +129,6 @@ class AppServiceHandlerTestCase(unittest.TestCase): self.assertEquals(result.room_id, room_id) self.assertEquals(result.servers, servers) - - def _mkservice(self, is_interested): service = Mock() service.is_interested = Mock(return_value=is_interested) diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 2f21bf91e5..21077cbe9a 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -15,7 +15,6 @@ import pymacaroons -from mock import Mock, NonCallableMock from synapse.handlers.auth import AuthHandler from tests import unittest from tests.utils import setup_test_homeserver diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 95c87f0ebd..a87703bbfd 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -41,8 +41,10 @@ class ProfileTestCase(unittest.TestCase): ]) self.query_handlers = {} + def register_query_handler(query_type, handler): self.query_handlers[query_type] = handler + self.mock_federation.register_query_handler = register_query_handler hs = yield setup_test_homeserver( @@ -63,7 +65,7 @@ class ProfileTestCase(unittest.TestCase): self.store = hs.get_datastore() self.frank = UserID.from_string("@1234ABCD:test") - self.bob = UserID.from_string("@4567:test") + self.bob = UserID.from_string("@4567:test") self.alice = UserID.from_string("@alice:remote") yield self.store.create_profile(self.frank.localpart) @@ -133,8 +135,9 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_set_my_avatar(self): - yield self.handler.set_avatar_url(self.frank, self.frank, - "http://my.server/pic.gif") + yield self.handler.set_avatar_url( + self.frank, self.frank, "http://my.server/pic.gif" + ) self.assertEquals( (yield self.store.get_profile_avatar_url(self.frank.localpart)), diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 763c04d667..3955e7f5b1 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -138,9 +138,9 @@ class TypingNotificationsTestCase(unittest.TestCase): self.room_member_handler.get_joined_rooms_for_user = get_joined_rooms_for_user @defer.inlineCallbacks - def fetch_room_distributions_into(room_id, localusers=None, - remotedomains=None, ignore_user=None): - + def fetch_room_distributions_into( + room_id, localusers=None, remotedomains=None, ignore_user=None + ): members = yield get_room_members(room_id) for member in members: if ignore_user is not None and member == ignore_user: @@ -153,7 +153,8 @@ class TypingNotificationsTestCase(unittest.TestCase): if remotedomains is not None: remotedomains.add(member.domain) self.room_member_handler.fetch_room_distributions_into = ( - fetch_room_distributions_into) + fetch_room_distributions_into + ) def check_joined_room(room_id, user_id): if user_id not in [u.to_string() for u in self.room_members]: @@ -207,9 +208,12 @@ class TypingNotificationsTestCase(unittest.TestCase): put_json = self.mock_http_client.put_json put_json.expect_call_and_return( - call("farm", + call( + "farm", path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu("farm", "m.typing", + data=_expect_edu( + "farm", + "m.typing", content={ "room_id": self.room_id, "user_id": self.u_apple.to_string(), @@ -237,9 +241,12 @@ class TypingNotificationsTestCase(unittest.TestCase): self.assertEquals(self.event_source.get_current_key(), 0) - yield self.mock_federation_resource.trigger("PUT", + yield self.mock_federation_resource.trigger( + "PUT", "/_matrix/federation/v1/send/1000000/", - _make_edu_json("farm", "m.typing", + _make_edu_json( + "farm", + "m.typing", content={ "room_id": self.room_id, "user_id": self.u_onion.to_string(), @@ -257,16 +264,13 @@ class TypingNotificationsTestCase(unittest.TestCase): room_ids=[self.room_id], from_key=0 ) - self.assertEquals( - events[0], - [ - {"type": "m.typing", - "room_id": self.room_id, - "content": { - "user_ids": [self.u_onion.to_string()], - }}, - ] - ) + self.assertEquals(events[0], [{ + "type": "m.typing", + "room_id": self.room_id, + "content": { + "user_ids": [self.u_onion.to_string()], + }, + }]) @defer.inlineCallbacks def test_stopped_typing(self): @@ -274,9 +278,12 @@ class TypingNotificationsTestCase(unittest.TestCase): put_json = self.mock_http_client.put_json put_json.expect_call_and_return( - call("farm", + call( + "farm", path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu("farm", "m.typing", + data=_expect_edu( + "farm", + "m.typing", content={ "room_id": self.room_id, "user_id": self.u_apple.to_string(), @@ -317,16 +324,13 @@ class TypingNotificationsTestCase(unittest.TestCase): room_ids=[self.room_id], from_key=0, ) - self.assertEquals( - events[0], - [ - {"type": "m.typing", - "room_id": self.room_id, - "content": { - "user_ids": [], - }}, - ] - ) + self.assertEquals(events[0], [{ + "type": "m.typing", + "room_id": self.room_id, + "content": { + "user_ids": [], + }, + }]) @defer.inlineCallbacks def test_typing_timeout(self): @@ -351,16 +355,13 @@ class TypingNotificationsTestCase(unittest.TestCase): room_ids=[self.room_id], from_key=0, ) - self.assertEquals( - events[0], - [ - {"type": "m.typing", - "room_id": self.room_id, - "content": { - "user_ids": [self.u_apple.to_string()], - }}, - ] - ) + self.assertEquals(events[0], [{ + "type": "m.typing", + "room_id": self.room_id, + "content": { + "user_ids": [self.u_apple.to_string()], + }, + }]) self.clock.advance_time(11) @@ -373,16 +374,13 @@ class TypingNotificationsTestCase(unittest.TestCase): room_ids=[self.room_id], from_key=1, ) - self.assertEquals( - events[0], - [ - {"type": "m.typing", - "room_id": self.room_id, - "content": { - "user_ids": [], - }}, - ] - ) + self.assertEquals(events[0], [{ + "type": "m.typing", + "room_id": self.room_id, + "content": { + "user_ids": [], + }, + }]) # SYN-230 - see if we can still set after timeout @@ -403,13 +401,10 @@ class TypingNotificationsTestCase(unittest.TestCase): room_ids=[self.room_id], from_key=0, ) - self.assertEquals( - events[0], - [ - {"type": "m.typing", - "room_id": self.room_id, - "content": { - "user_ids": [self.u_apple.to_string()], - }}, - ] - ) + self.assertEquals(events[0], [{ + "type": "m.typing", + "room_id": self.room_id, + "content": { + "user_ids": [self.u_apple.to_string()], + }, + }]) diff --git a/tests/rest/client/v1/__init__.py b/tests/rest/client/v1/__init__.py index d0e9399dda..bfebb0f644 100644 --- a/tests/rest/client/v1/__init__.py +++ b/tests/rest/client/v1/__init__.py @@ -12,4 +12,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index c1a3f52043..0785965de2 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -65,8 +65,9 @@ class ProfileTestCase(unittest.TestCase): mocked_get = self.mock_handler.get_displayname mocked_get.return_value = defer.succeed("Frank") - (code, response) = yield self.mock_resource.trigger("GET", - "/profile/%s/displayname" % (myid), None) + (code, response) = yield self.mock_resource.trigger( + "GET", "/profile/%s/displayname" % (myid), None + ) self.assertEquals(200, code) self.assertEquals({"displayname": "Frank"}, response) @@ -77,9 +78,11 @@ class ProfileTestCase(unittest.TestCase): mocked_set = self.mock_handler.set_displayname mocked_set.return_value = defer.succeed(()) - (code, response) = yield self.mock_resource.trigger("PUT", - "/profile/%s/displayname" % (myid), - '{"displayname": "Frank Jr."}') + (code, response) = yield self.mock_resource.trigger( + "PUT", + "/profile/%s/displayname" % (myid), + '{"displayname": "Frank Jr."}' + ) self.assertEquals(200, code) self.assertEquals(mocked_set.call_args[0][0].localpart, "1234ABCD") @@ -91,19 +94,23 @@ class ProfileTestCase(unittest.TestCase): mocked_set = self.mock_handler.set_displayname mocked_set.side_effect = AuthError(400, "message") - (code, response) = yield self.mock_resource.trigger("PUT", - "/profile/%s/displayname" % ("@4567:test"), '"Frank Jr."') + (code, response) = yield self.mock_resource.trigger( + "PUT", "/profile/%s/displayname" % ("@4567:test"), '"Frank Jr."' + ) - self.assertTrue(400 <= code < 499, - msg="code %d is in the 4xx range" % (code)) + self.assertTrue( + 400 <= code < 499, + msg="code %d is in the 4xx range" % (code) + ) @defer.inlineCallbacks def test_get_other_name(self): mocked_get = self.mock_handler.get_displayname mocked_get.return_value = defer.succeed("Bob") - (code, response) = yield self.mock_resource.trigger("GET", - "/profile/%s/displayname" % ("@opaque:elsewhere"), None) + (code, response) = yield self.mock_resource.trigger( + "GET", "/profile/%s/displayname" % ("@opaque:elsewhere"), None + ) self.assertEquals(200, code) self.assertEquals({"displayname": "Bob"}, response) @@ -113,19 +120,23 @@ class ProfileTestCase(unittest.TestCase): mocked_set = self.mock_handler.set_displayname mocked_set.side_effect = SynapseError(400, "message") - (code, response) = yield self.mock_resource.trigger("PUT", - "/profile/%s/displayname" % ("@opaque:elsewhere"), None) + (code, response) = yield self.mock_resource.trigger( + "PUT", "/profile/%s/displayname" % ("@opaque:elsewhere"), None + ) - self.assertTrue(400 <= code <= 499, - msg="code %d is in the 4xx range" % (code)) + self.assertTrue( + 400 <= code <= 499, + msg="code %d is in the 4xx range" % (code) + ) @defer.inlineCallbacks def test_get_my_avatar(self): mocked_get = self.mock_handler.get_avatar_url mocked_get.return_value = defer.succeed("http://my.server/me.png") - (code, response) = yield self.mock_resource.trigger("GET", - "/profile/%s/avatar_url" % (myid), None) + (code, response) = yield self.mock_resource.trigger( + "GET", "/profile/%s/avatar_url" % (myid), None + ) self.assertEquals(200, code) self.assertEquals({"avatar_url": "http://my.server/me.png"}, response) @@ -136,12 +147,13 @@ class ProfileTestCase(unittest.TestCase): mocked_set = self.mock_handler.set_avatar_url mocked_set.return_value = defer.succeed(()) - (code, response) = yield self.mock_resource.trigger("PUT", - "/profile/%s/avatar_url" % (myid), - '{"avatar_url": "http://my.server/pic.gif"}') + (code, response) = yield self.mock_resource.trigger( + "PUT", + "/profile/%s/avatar_url" % (myid), + '{"avatar_url": "http://my.server/pic.gif"}' + ) self.assertEquals(200, code) self.assertEquals(mocked_set.call_args[0][0].localpart, "1234ABCD") self.assertEquals(mocked_set.call_args[0][1].localpart, "1234ABCD") - self.assertEquals(mocked_set.call_args[0][2], - "http://my.server/pic.gif") + self.assertEquals(mocked_set.call_args[0][2], "http://my.server/pic.gif") diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index a90b9dc3d8..afca5303ba 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -82,19 +82,22 @@ class RoomPermissionsTestCase(RestTestCase): is_public=True) # send a message in one of the rooms - self.created_rmid_msg_path = ("/rooms/%s/send/m.room.message/a1" % - (self.created_rmid)) + self.created_rmid_msg_path = ( + "/rooms/%s/send/m.room.message/a1" % (self.created_rmid) + ) (code, response) = yield self.mock_resource.trigger( - "PUT", - self.created_rmid_msg_path, - '{"msgtype":"m.text","body":"test msg"}') + "PUT", + self.created_rmid_msg_path, + '{"msgtype":"m.text","body":"test msg"}' + ) self.assertEquals(200, code, msg=str(response)) # set topic for public room (code, response) = yield self.mock_resource.trigger( - "PUT", - "/rooms/%s/state/m.room.topic" % self.created_public_rmid, - '{"topic":"Public Room Topic"}') + "PUT", + "/rooms/%s/state/m.room.topic" % self.created_public_rmid, + '{"topic":"Public Room Topic"}' + ) self.assertEquals(200, code, msg=str(response)) # auth as user_id now @@ -103,37 +106,6 @@ class RoomPermissionsTestCase(RestTestCase): def tearDown(self): pass -# @defer.inlineCallbacks -# def test_get_message(self): -# # get message in uncreated room, expect 403 -# (code, response) = yield self.mock_resource.trigger_get( -# "/rooms/noroom/messages/someid/m1") -# self.assertEquals(403, code, msg=str(response)) -# -# # get message in created room not joined (no state), expect 403 -# (code, response) = yield self.mock_resource.trigger_get( -# self.created_rmid_msg_path) -# self.assertEquals(403, code, msg=str(response)) -# -# # get message in created room and invited, expect 403 -# yield self.invite(room=self.created_rmid, src=self.rmcreator_id, -# targ=self.user_id) -# (code, response) = yield self.mock_resource.trigger_get( -# self.created_rmid_msg_path) -# self.assertEquals(403, code, msg=str(response)) -# -# # get message in created room and joined, expect 200 -# yield self.join(room=self.created_rmid, user=self.user_id) -# (code, response) = yield self.mock_resource.trigger_get( -# self.created_rmid_msg_path) -# self.assertEquals(200, code, msg=str(response)) -# -# # get message in created room and left, expect 403 -# yield self.leave(room=self.created_rmid, user=self.user_id) -# (code, response) = yield self.mock_resource.trigger_get( -# self.created_rmid_msg_path) -# self.assertEquals(403, code, msg=str(response)) - @defer.inlineCallbacks def test_send_message(self): msg_content = '{"msgtype":"m.text","body":"hello"}' @@ -195,25 +167,30 @@ class RoomPermissionsTestCase(RestTestCase): # set/get topic in uncreated room, expect 403 (code, response) = yield self.mock_resource.trigger( - "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid, - topic_content) + "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid, + topic_content + ) self.assertEquals(403, code, msg=str(response)) (code, response) = yield self.mock_resource.trigger_get( - "/rooms/%s/state/m.room.topic" % self.uncreated_rmid) + "/rooms/%s/state/m.room.topic" % self.uncreated_rmid + ) self.assertEquals(403, code, msg=str(response)) # set/get topic in created PRIVATE room not joined, expect 403 (code, response) = yield self.mock_resource.trigger( - "PUT", topic_path, topic_content) + "PUT", topic_path, topic_content + ) self.assertEquals(403, code, msg=str(response)) (code, response) = yield self.mock_resource.trigger_get(topic_path) self.assertEquals(403, code, msg=str(response)) # set topic in created PRIVATE room and invited, expect 403 - yield self.invite(room=self.created_rmid, src=self.rmcreator_id, - targ=self.user_id) + yield self.invite( + room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id + ) (code, response) = yield self.mock_resource.trigger( - "PUT", topic_path, topic_content) + "PUT", topic_path, topic_content + ) self.assertEquals(403, code, msg=str(response)) # get topic in created PRIVATE room and invited, expect 403 @@ -226,7 +203,8 @@ class RoomPermissionsTestCase(RestTestCase): # Only room ops can set topic by default self.auth_user_id = self.rmcreator_id (code, response) = yield self.mock_resource.trigger( - "PUT", topic_path, topic_content) + "PUT", topic_path, topic_content + ) self.assertEquals(200, code, msg=str(response)) self.auth_user_id = self.user_id @@ -237,30 +215,31 @@ class RoomPermissionsTestCase(RestTestCase): # set/get topic in created PRIVATE room and left, expect 403 yield self.leave(room=self.created_rmid, user=self.user_id) (code, response) = yield self.mock_resource.trigger( - "PUT", topic_path, topic_content) + "PUT", topic_path, topic_content + ) self.assertEquals(403, code, msg=str(response)) (code, response) = yield self.mock_resource.trigger_get(topic_path) self.assertEquals(200, code, msg=str(response)) # get topic in PUBLIC room, not joined, expect 403 (code, response) = yield self.mock_resource.trigger_get( - "/rooms/%s/state/m.room.topic" % self.created_public_rmid) + "/rooms/%s/state/m.room.topic" % self.created_public_rmid + ) self.assertEquals(403, code, msg=str(response)) # set topic in PUBLIC room, not joined, expect 403 (code, response) = yield self.mock_resource.trigger( - "PUT", - "/rooms/%s/state/m.room.topic" % self.created_public_rmid, - topic_content) + "PUT", + "/rooms/%s/state/m.room.topic" % self.created_public_rmid, + topic_content + ) self.assertEquals(403, code, msg=str(response)) @defer.inlineCallbacks def _test_get_membership(self, room=None, members=[], expect_code=None): - path = "/rooms/%s/state/m.room.member/%s" for member in members: - (code, response) = yield self.mock_resource.trigger_get( - path % - (room, member)) + path = "/rooms/%s/state/m.room.member/%s" % (room, member) + (code, response) = yield self.mock_resource.trigger_get(path) self.assertEquals(expect_code, code) @defer.inlineCallbacks @@ -461,20 +440,23 @@ class RoomsMemberListTestCase(RestTestCase): def test_get_member_list(self): room_id = yield self.create_room_as(self.user_id) (code, response) = yield self.mock_resource.trigger_get( - "/rooms/%s/members" % room_id) + "/rooms/%s/members" % room_id + ) self.assertEquals(200, code, msg=str(response)) @defer.inlineCallbacks def test_get_member_list_no_room(self): (code, response) = yield self.mock_resource.trigger_get( - "/rooms/roomdoesnotexist/members") + "/rooms/roomdoesnotexist/members" + ) self.assertEquals(403, code, msg=str(response)) @defer.inlineCallbacks def test_get_member_list_no_permission(self): room_id = yield self.create_room_as("@some_other_guy:red") (code, response) = yield self.mock_resource.trigger_get( - "/rooms/%s/members" % room_id) + "/rooms/%s/members" % room_id + ) self.assertEquals(403, code, msg=str(response)) @defer.inlineCallbacks @@ -636,34 +618,41 @@ class RoomTopicTestCase(RestTestCase): @defer.inlineCallbacks def test_invalid_puts(self): # missing keys or invalid json - (code, response) = yield self.mock_resource.trigger("PUT", - self.path, '{}') + (code, response) = yield self.mock_resource.trigger( + "PUT", self.path, '{}' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - self.path, '{"_name":"bob"}') + (code, response) = yield self.mock_resource.trigger( + "PUT", self.path, '{"_name":"bob"}' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - self.path, '{"nao') + (code, response) = yield self.mock_resource.trigger( + "PUT", self.path, '{"nao' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - self.path, '[{"_name":"bob"},{"_name":"jill"}]') + (code, response) = yield self.mock_resource.trigger( + "PUT", self.path, '[{"_name":"bob"},{"_name":"jill"}]' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - self.path, 'text only') + (code, response) = yield self.mock_resource.trigger( + "PUT", self.path, 'text only' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - self.path, '') + (code, response) = yield self.mock_resource.trigger( + "PUT", self.path, '' + ) self.assertEquals(400, code, msg=str(response)) # valid key, wrong type content = '{"topic":["Topic name"]}' - (code, response) = yield self.mock_resource.trigger("PUT", - self.path, content) + (code, response) = yield self.mock_resource.trigger( + "PUT", self.path, content + ) self.assertEquals(400, code, msg=str(response)) @defer.inlineCallbacks @@ -674,8 +663,9 @@ class RoomTopicTestCase(RestTestCase): # valid put content = '{"topic":"Topic name"}' - (code, response) = yield self.mock_resource.trigger("PUT", - self.path, content) + (code, response) = yield self.mock_resource.trigger( + "PUT", self.path, content + ) self.assertEquals(200, code, msg=str(response)) # valid get @@ -687,8 +677,9 @@ class RoomTopicTestCase(RestTestCase): def test_rooms_topic_with_extra_keys(self): # valid put with extra keys content = '{"topic":"Seasons","subtopic":"Summer"}' - (code, response) = yield self.mock_resource.trigger("PUT", - self.path, content) + (code, response) = yield self.mock_resource.trigger( + "PUT", self.path, content + ) self.assertEquals(200, code, msg=str(response)) # valid get @@ -740,33 +731,38 @@ class RoomMemberStateTestCase(RestTestCase): def test_invalid_puts(self): path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id) # missing keys or invalid json - (code, response) = yield self.mock_resource.trigger("PUT", - path, '{}') + (code, response) = yield self.mock_resource.trigger("PUT", path, '{}') self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - path, '{"_name":"bob"}') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, '{"_name":"bob"}' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - path, '{"nao') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, '{"nao' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - path, '[{"_name":"bob"},{"_name":"jill"}]') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, '[{"_name":"bob"},{"_name":"jill"}]' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - path, 'text only') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, 'text only' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - path, '') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, '' + ) self.assertEquals(400, code, msg=str(response)) # valid keys, wrong types - content = ('{"membership":["%s","%s","%s"]}' % - (Membership.INVITE, Membership.JOIN, Membership.LEAVE)) + content = ('{"membership":["%s","%s","%s"]}' % ( + Membership.INVITE, Membership.JOIN, Membership.LEAVE + )) (code, response) = yield self.mock_resource.trigger("PUT", path, content) self.assertEquals(400, code, msg=str(response)) @@ -813,8 +809,9 @@ class RoomMemberStateTestCase(RestTestCase): ) # valid invite message with custom key - content = ('{"membership":"%s","invite_text":"%s"}' % - (Membership.INVITE, "Join us!")) + content = ('{"membership":"%s","invite_text":"%s"}' % ( + Membership.INVITE, "Join us!" + )) (code, response) = yield self.mock_resource.trigger("PUT", path, content) self.assertEquals(200, code, msg=str(response)) @@ -867,28 +864,34 @@ class RoomMessagesTestCase(RestTestCase): path = "/rooms/%s/send/m.room.message/mid1" % ( urllib.quote(self.room_id)) # missing keys or invalid json - (code, response) = yield self.mock_resource.trigger("PUT", - path, '{}') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, '{}' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - path, '{"_name":"bob"}') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, '{"_name":"bob"}' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - path, '{"nao') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, '{"nao' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - path, '[{"_name":"bob"},{"_name":"jill"}]') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, '[{"_name":"bob"},{"_name":"jill"}]' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - path, 'text only') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, 'text only' + ) self.assertEquals(400, code, msg=str(response)) - (code, response) = yield self.mock_resource.trigger("PUT", - path, '') + (code, response) = yield self.mock_resource.trigger( + "PUT", path, '' + ) self.assertEquals(400, code, msg=str(response)) @defer.inlineCallbacks @@ -959,7 +962,8 @@ class RoomInitialSyncTestCase(RestTestCase): @defer.inlineCallbacks def test_initial_sync(self): (code, response) = yield self.mock_resource.trigger_get( - "/rooms/%s/initialSync" % self.room_id) + "/rooms/%s/initialSync" % self.room_id + ) self.assertEquals(200, code) self.assertEquals(self.room_id, response["room_id"]) @@ -983,8 +987,8 @@ class RoomInitialSyncTestCase(RestTestCase): self.assertTrue("presence" in response) - presence_by_user = {e["content"]["user_id"]: e - for e in response["presence"] + presence_by_user = { + e["content"]["user_id"]: e for e in response["presence"] } self.assertTrue(self.user_id in presence_by_user) self.assertEquals("m.presence", presence_by_user[self.user_id]["type"]) diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index c4ac181a33..16d788ff61 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -81,9 +81,9 @@ class RoomTypingTestCase(RestTestCase): return defer.succeed([]) @defer.inlineCallbacks - def fetch_room_distributions_into(room_id, localusers=None, - remotedomains=None, ignore_user=None): - + def fetch_room_distributions_into( + room_id, localusers=None, remotedomains=None, ignore_user=None + ): members = yield get_room_members(room_id) for member in members: if ignore_user is not None and member == ignore_user: @@ -96,7 +96,8 @@ class RoomTypingTestCase(RestTestCase): if remotedomains is not None: remotedomains.add(member.domain) hs.get_handlers().room_member_handler.fetch_room_distributions_into = ( - fetch_room_distributions_into) + fetch_room_distributions_into + ) synapse.rest.client.v1.room.register_servlets(hs, self.mock_resource) @@ -109,8 +110,8 @@ class RoomTypingTestCase(RestTestCase): @defer.inlineCallbacks def test_set_typing(self): - (code, _) = yield self.mock_resource.trigger("PUT", - "/rooms/%s/typing/%s" % (self.room_id, self.user_id), + (code, _) = yield self.mock_resource.trigger( + "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), '{"typing": true, "timeout": 30000}' ) self.assertEquals(200, code) @@ -120,41 +121,38 @@ class RoomTypingTestCase(RestTestCase): from_key=0, room_ids=[self.room_id], ) - self.assertEquals( - events[0], - [ - {"type": "m.typing", - "room_id": self.room_id, - "content": { - "user_ids": [self.user_id], - }}, - ] - ) + self.assertEquals(events[0], [{ + "type": "m.typing", + "room_id": self.room_id, + "content": { + "user_ids": [self.user_id], + } + }]) @defer.inlineCallbacks def test_set_not_typing(self): - (code, _) = yield self.mock_resource.trigger("PUT", - "/rooms/%s/typing/%s" % (self.room_id, self.user_id), + (code, _) = yield self.mock_resource.trigger( + "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), '{"typing": false}' ) self.assertEquals(200, code) @defer.inlineCallbacks def test_typing_timeout(self): - (code, _) = yield self.mock_resource.trigger("PUT", - "/rooms/%s/typing/%s" % (self.room_id, self.user_id), + (code, _) = yield self.mock_resource.trigger( + "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), '{"typing": true, "timeout": 30000}' ) self.assertEquals(200, code) self.assertEquals(self.event_source.get_current_key(), 1) - self.clock.advance_time(31); + self.clock.advance_time(31) self.assertEquals(self.event_source.get_current_key(), 2) - (code, _) = yield self.mock_resource.trigger("PUT", - "/rooms/%s/typing/%s" % (self.room_id, self.user_id), + (code, _) = yield self.mock_resource.trigger( + "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), '{"typing": true, "timeout": 30000}' ) self.assertEquals(200, code) diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index af376804f6..17524b2e23 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -84,8 +84,9 @@ class RestTestCase(unittest.TestCase): "membership": membership } - (code, response) = yield self.mock_resource.trigger("PUT", path, - json.dumps(data)) + (code, response) = yield self.mock_resource.trigger( + "PUT", path, json.dumps(data) + ) self.assertEquals(expect_code, code, msg=str(response)) self.auth_user_id = temp_id diff --git a/tests/rest/client/v2_alpha/__init__.py b/tests/rest/client/v2_alpha/__init__.py index 16dce6c723..84334dce34 100644 --- a/tests/rest/client/v2_alpha/__init__.py +++ b/tests/rest/client/v2_alpha/__init__.py @@ -55,7 +55,7 @@ class V2AlphaRestTestCase(unittest.TestCase): r.register_servlets(hs, self.mock_resource) def make_datastore_mock(self): - store = Mock(spec=[ + store = Mock(spec=[ "insert_client_ip", ]) store.get_app_service_by_token = Mock(return_value=None) diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py index c86e904c8e..d1442aafac 100644 --- a/tests/rest/client/v2_alpha/test_filter.py +++ b/tests/rest/client/v2_alpha/test_filter.py @@ -15,8 +15,6 @@ from twisted.internet import defer -from mock import Mock - from . import V2AlphaRestTestCase from synapse.rest.client.v2_alpha import filter @@ -53,9 +51,8 @@ class FilterTestCase(V2AlphaRestTestCase): @defer.inlineCallbacks def test_add_filter(self): - (code, response) = yield self.mock_resource.trigger("POST", - "/user/%s/filter" % (self.USER_ID), - '{"type": ["m.*"]}' + (code, response) = yield self.mock_resource.trigger( + "POST", "/user/%s/filter" % (self.USER_ID), '{"type": ["m.*"]}' ) self.assertEquals(200, code) self.assertEquals({"filter_id": "0"}, response) @@ -70,8 +67,8 @@ class FilterTestCase(V2AlphaRestTestCase): {"type": ["m.*"]} ] - (code, response) = yield self.mock_resource.trigger("GET", - "/user/%s/filter/0" % (self.USER_ID), None + (code, response) = yield self.mock_resource.trigger_get( + "/user/%s/filter/0" % (self.USER_ID) ) self.assertEquals(200, code) self.assertEquals({"type": ["m.*"]}, response) @@ -82,14 +79,14 @@ class FilterTestCase(V2AlphaRestTestCase): {"type": ["m.*"]} ] - (code, response) = yield self.mock_resource.trigger("GET", - "/user/%s/filter/2" % (self.USER_ID), None + (code, response) = yield self.mock_resource.trigger_get( + "/user/%s/filter/2" % (self.USER_ID) ) self.assertEquals(404, code) @defer.inlineCallbacks def test_get_filter_no_user(self): - (code, response) = yield self.mock_resource.trigger("GET", - "/user/%s/filter/0" % (self.USER_ID), None + (code, response) = yield self.mock_resource.trigger_get( + "/user/%s/filter/0" % (self.USER_ID) ) self.assertEquals(404, code) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index df0841b0b1..b867599079 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -1,7 +1,7 @@ from synapse.rest.client.v2_alpha.register import RegisterRestServlet from synapse.api.errors import SynapseError from twisted.internet import defer -from mock import Mock, MagicMock +from mock import Mock from tests import unittest import json @@ -24,7 +24,7 @@ class RegisterRestServletTestCase(unittest.TestCase): self.auth_result = (False, None, None) self.auth_handler = Mock( - check_auth=Mock(side_effect=lambda x,y,z: self.auth_result) + check_auth=Mock(side_effect=lambda x, y, z: self.auth_result) ) self.registration_handler = Mock() self.identity_handler = Mock() diff --git a/tests/storage/event_injector.py b/tests/storage/event_injector.py index dca785eb27..f22ba8db89 100644 --- a/tests/storage/event_injector.py +++ b/tests/storage/event_injector.py @@ -14,15 +14,9 @@ # limitations under the License. -from tests import unittest from twisted.internet import defer -from synapse.api.constants import EventTypes, Membership -from synapse.types import UserID, RoomID - -from tests.utils import setup_test_homeserver - -from mock import Mock +from synapse.api.constants import EventTypes class EventInjector: diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 219288621d..96b7dba5fe 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -174,11 +174,13 @@ class CacheDecoratorTestCase(unittest.TestCase): # There must have been at least 2 evictions, meaning if we calculate # all 12 values again, we must get called at least 2 more times - for k in range(0,12): + for k in range(0, 12): yield a.func(k) - self.assertTrue(callcount[0] >= 14, - msg="Expected callcount >= 14, got %d" % (callcount[0])) + self.assertTrue( + callcount[0] >= 14, + msg="Expected callcount >= 14, got %d" % (callcount[0]) + ) def test_prefill(self): callcount = [0] diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 29289fa9b4..6e4d9b1373 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -1,13 +1,11 @@ from tests import unittest from twisted.internet import defer -from synapse.api.constants import EventTypes -from synapse.types import UserID, RoomID, RoomAlias - from tests.utils import setup_test_homeserver from mock import Mock + class BackgroundUpdateTestCase(unittest.TestCase): @defer.inlineCallbacks @@ -24,8 +22,8 @@ class BackgroundUpdateTestCase(unittest.TestCase): @defer.inlineCallbacks def test_do_background_update(self): - desired_count = 1000; - duration_ms = 42; + desired_count = 1000 + duration_ms = 42 @defer.inlineCallbacks def update(progress, count): diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 152d027663..c76545be65 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -17,7 +17,7 @@ from tests import unittest from twisted.internet import defer -from mock import Mock, call +from mock import Mock from collections import OrderedDict @@ -62,13 +62,12 @@ class SQLBaseStoreTestCase(unittest.TestCase): self.mock_txn.rowcount = 1 yield self.datastore._simple_insert( - table="tablename", - values={"columname": "Value"} + table="tablename", + values={"columname": "Value"} ) self.mock_txn.execute.assert_called_with( - "INSERT INTO tablename (columname) VALUES(?)", - ("Value",) + "INSERT INTO tablename (columname) VALUES(?)", ("Value",) ) @defer.inlineCallbacks @@ -76,14 +75,14 @@ class SQLBaseStoreTestCase(unittest.TestCase): self.mock_txn.rowcount = 1 yield self.datastore._simple_insert( - table="tablename", - # Use OrderedDict() so we can assert on the SQL generated - values=OrderedDict([("colA", 1), ("colB", 2), ("colC", 3)]) + table="tablename", + # Use OrderedDict() so we can assert on the SQL generated + values=OrderedDict([("colA", 1), ("colB", 2), ("colC", 3)]) ) self.mock_txn.execute.assert_called_with( - "INSERT INTO tablename (colA, colB, colC) VALUES(?, ?, ?)", - (1, 2, 3,) + "INSERT INTO tablename (colA, colB, colC) VALUES(?, ?, ?)", + (1, 2, 3,) ) @defer.inlineCallbacks @@ -92,15 +91,14 @@ class SQLBaseStoreTestCase(unittest.TestCase): self.mock_txn.fetchall.return_value = [("Value",)] value = yield self.datastore._simple_select_one_onecol( - table="tablename", - keyvalues={"keycol": "TheKey"}, - retcol="retcol" + table="tablename", + keyvalues={"keycol": "TheKey"}, + retcol="retcol" ) self.assertEquals("Value", value) self.mock_txn.execute.assert_called_with( - "SELECT retcol FROM tablename WHERE keycol = ?", - ["TheKey"] + "SELECT retcol FROM tablename WHERE keycol = ?", ["TheKey"] ) @defer.inlineCallbacks @@ -109,15 +107,15 @@ class SQLBaseStoreTestCase(unittest.TestCase): self.mock_txn.fetchone.return_value = (1, 2, 3) ret = yield self.datastore._simple_select_one( - table="tablename", - keyvalues={"keycol": "TheKey"}, - retcols=["colA", "colB", "colC"] + table="tablename", + keyvalues={"keycol": "TheKey"}, + retcols=["colA", "colB", "colC"] ) self.assertEquals({"colA": 1, "colB": 2, "colC": 3}, ret) self.mock_txn.execute.assert_called_with( - "SELECT colA, colB, colC FROM tablename WHERE keycol = ?", - ["TheKey"] + "SELECT colA, colB, colC FROM tablename WHERE keycol = ?", + ["TheKey"] ) @defer.inlineCallbacks @@ -126,32 +124,32 @@ class SQLBaseStoreTestCase(unittest.TestCase): self.mock_txn.fetchone.return_value = None ret = yield self.datastore._simple_select_one( - table="tablename", - keyvalues={"keycol": "Not here"}, - retcols=["colA"], - allow_none=True + table="tablename", + keyvalues={"keycol": "Not here"}, + retcols=["colA"], + allow_none=True ) self.assertFalse(ret) @defer.inlineCallbacks def test_select_list(self): - self.mock_txn.rowcount = 3; + self.mock_txn.rowcount = 3 self.mock_txn.fetchall.return_value = ((1,), (2,), (3,)) self.mock_txn.description = ( - ("colA", None, None, None, None, None, None), + ("colA", None, None, None, None, None, None), ) ret = yield self.datastore._simple_select_list( - table="tablename", - keyvalues={"keycol": "A set"}, - retcols=["colA"], + table="tablename", + keyvalues={"keycol": "A set"}, + retcols=["colA"], ) self.assertEquals([{"colA": 1}, {"colA": 2}, {"colA": 3}], ret) self.mock_txn.execute.assert_called_with( - "SELECT colA FROM tablename WHERE keycol = ?", - ["A set"] + "SELECT colA FROM tablename WHERE keycol = ?", + ["A set"] ) @defer.inlineCallbacks @@ -159,14 +157,14 @@ class SQLBaseStoreTestCase(unittest.TestCase): self.mock_txn.rowcount = 1 yield self.datastore._simple_update_one( - table="tablename", - keyvalues={"keycol": "TheKey"}, - updatevalues={"columnname": "New Value"} + table="tablename", + keyvalues={"keycol": "TheKey"}, + updatevalues={"columnname": "New Value"} ) self.mock_txn.execute.assert_called_with( - "UPDATE tablename SET columnname = ? WHERE keycol = ?", - ["New Value", "TheKey"] + "UPDATE tablename SET columnname = ? WHERE keycol = ?", + ["New Value", "TheKey"] ) @defer.inlineCallbacks @@ -174,15 +172,15 @@ class SQLBaseStoreTestCase(unittest.TestCase): self.mock_txn.rowcount = 1 yield self.datastore._simple_update_one( - table="tablename", - keyvalues=OrderedDict([("colA", 1), ("colB", 2)]), - updatevalues=OrderedDict([("colC", 3), ("colD", 4)]) + table="tablename", + keyvalues=OrderedDict([("colA", 1), ("colB", 2)]), + updatevalues=OrderedDict([("colC", 3), ("colD", 4)]) ) self.mock_txn.execute.assert_called_with( - "UPDATE tablename SET colC = ?, colD = ? WHERE " + - "colA = ? AND colB = ?", - [3, 4, 1, 2] + "UPDATE tablename SET colC = ?, colD = ? WHERE" + " colA = ? AND colB = ?", + [3, 4, 1, 2] ) @defer.inlineCallbacks @@ -190,11 +188,10 @@ class SQLBaseStoreTestCase(unittest.TestCase): self.mock_txn.rowcount = 1 yield self.datastore._simple_delete_one( - table="tablename", - keyvalues={"keycol": "Go away"}, + table="tablename", + keyvalues={"keycol": "Go away"}, ) self.mock_txn.execute.assert_called_with( - "DELETE FROM tablename WHERE keycol = ?", - ["Go away"] + "DELETE FROM tablename WHERE keycol = ?", ["Go away"] ) diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index 4aa82d4c9d..18a6cff0c7 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import uuid from mock import Mock from synapse.types import RoomID, UserID diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index 47e2768b2c..24118bbc86 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -55,7 +55,7 @@ class ProfileStoreTestCase(unittest.TestCase): ) yield self.store.set_profile_avatar_url( - self.u_frank.localpart, "http://my.site/here" + self.u_frank.localpart, "http://my.site/here" ) self.assertEquals( diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 7b3b4c13bc..b8384c98d8 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -33,8 +33,10 @@ class RegistrationStoreTestCase(unittest.TestCase): self.store = hs.get_datastore() self.user_id = "@my-user:test" - self.tokens = ["AbCdEfGhIjKlMnOpQrStUvWxYz", - "BcDeFgHiJkLmNoPqRsTuVwXyZa"] + self.tokens = [ + "AbCdEfGhIjKlMnOpQrStUvWxYz", + "BcDeFgHiJkLmNoPqRsTuVwXyZa" + ] self.pwhash = "{xx1}123456789" @defer.inlineCallbacks diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index 0baaf3df21..ef8a4d234f 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -37,7 +37,8 @@ class RoomStoreTestCase(unittest.TestCase): self.alias = RoomAlias.from_string("#a-room-name:test") self.u_creator = UserID.from_string("@creator:test") - yield self.store.store_room(self.room.to_string(), + yield self.store.store_room( + self.room.to_string(), room_creator_user_id=self.u_creator.to_string(), is_public=True ) @@ -45,9 +46,11 @@ class RoomStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_get_room(self): self.assertDictContainsSubset( - {"room_id": self.room.to_string(), - "creator": self.u_creator.to_string(), - "is_public": True}, + { + "room_id": self.room.to_string(), + "creator": self.u_creator.to_string(), + "is_public": True + }, (yield self.store.get_room(self.room.to_string())) ) @@ -65,7 +68,8 @@ class RoomEventsStoreTestCase(unittest.TestCase): self.room = RoomID.from_string("!abcde:test") - yield self.store.store_room(self.room.to_string(), + yield self.store.store_room( + self.room.to_string(), room_creator_user_id="@creator:text", is_public=True ) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index bab15c4165..677d11f68d 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -88,8 +88,8 @@ class RoomMemberStoreTestCase(unittest.TestCase): [m.room_id for m in ( yield self.store.get_rooms_for_user_where_membership_is( self.u_alice.to_string(), [Membership.JOIN] - )) - ] + ) + )] ) self.assertFalse( (yield self.store.user_rooms_intersect( @@ -108,11 +108,11 @@ class RoomMemberStoreTestCase(unittest.TestCase): yield self.store.get_room_members(self.room.to_string()) )} ) - self.assertTrue( - (yield self.store.user_rooms_intersect( - [self.u_alice.to_string(), self.u_bob.to_string()] - )) - ) + self.assertTrue(( + yield self.store.user_rooms_intersect([ + self.u_alice.to_string(), self.u_bob.to_string() + ]) + )) @defer.inlineCallbacks def test_room_hosts(self): @@ -136,9 +136,7 @@ class RoomMemberStoreTestCase(unittest.TestCase): self.assertEquals( {"test", "elsewhere"}, - (yield - self.store.get_joined_hosts_for_room(self.room.to_string()) - ) + (yield self.store.get_joined_hosts_for_room(self.room.to_string())) ) # Should still have both hosts @@ -146,9 +144,7 @@ class RoomMemberStoreTestCase(unittest.TestCase): self.assertEquals( {"test", "elsewhere"}, - (yield - self.store.get_joined_hosts_for_room(self.room.to_string()) - ) + (yield self.store.get_joined_hosts_for_room(self.room.to_string())) ) # Should have only one host after other leaves diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index 708208aff1..da322152c7 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -156,13 +156,13 @@ class StreamStoreTestCase(unittest.TestCase): self.room1, self.u_bob, Membership.JOIN ) - event1 = yield self.event_injector.inject_room_member( + yield self.event_injector.inject_room_member( self.room1, self.u_alice, Membership.JOIN ) start = yield self.store.get_room_events_max_id() - event2 = yield self.event_injector.inject_room_member( + yield self.event_injector.inject_room_member( self.room1, self.u_alice, Membership.JOIN, ) diff --git a/tests/test_distributor.py b/tests/test_distributor.py index a80f580ba6..acebcf4a86 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -44,8 +44,10 @@ class DistributorTestCase(unittest.TestCase): self.dist.declare("whine") d_inner = defer.Deferred() + def observer(): return d_inner + self.dist.observe("whine", observer) d_outer = self.dist.fire("whine") @@ -66,8 +68,8 @@ class DistributorTestCase(unittest.TestCase): observers[0].side_effect = Exception("Awoogah!") - with patch("synapse.util.distributor.logger", - spec=["warning"] + with patch( + "synapse.util.distributor.logger", spec=["warning"] ) as mock_logger: d = self.dist.fire("alarm", "Go") yield d @@ -77,8 +79,9 @@ class DistributorTestCase(unittest.TestCase): observers[1].assert_called_once_with("Go") self.assertEquals(mock_logger.warning.call_count, 1) - self.assertIsInstance(mock_logger.warning.call_args[0][0], - str) + self.assertIsInstance( + mock_logger.warning.call_args[0][0], str + ) @defer.inlineCallbacks def test_signal_catch_no_suppress(self): diff --git a/tests/test_test_utils.py b/tests/test_test_utils.py index 3718f4fc3f..d28bb726bb 100644 --- a/tests/test_test_utils.py +++ b/tests/test_test_utils.py @@ -17,6 +17,7 @@ from tests import unittest from tests.utils import MockClock + class MockClockTestCase(unittest.TestCase): def setUp(self): @@ -60,7 +61,7 @@ class MockClockTestCase(unittest.TestCase): def _cb1(): invoked[1] = 1 - t1 = self.clock.call_later(20, _cb1) + self.clock.call_later(20, _cb1) self.clock.cancel_call_later(t0) diff --git a/tests/unittest.py b/tests/unittest.py index 6f02eb4cac..5b22abfe74 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -37,9 +37,12 @@ def around(target): def _around(code): name = code.__name__ orig = getattr(target, name) + def new(*args, **kwargs): return code(orig, *args, **kwargs) + setattr(target, name, new) + return _around @@ -53,9 +56,7 @@ class TestCase(unittest.TestCase): method = getattr(self, methodName) - level = getattr(method, "loglevel", - getattr(self, "loglevel", - NEVER)) + level = getattr(method, "loglevel", getattr(self, "loglevel", NEVER)) @around(self) def setUp(orig): diff --git a/tests/util/__init__.py b/tests/util/__init__.py index d0e9399dda..bfebb0f644 100644 --- a/tests/util/__init__.py +++ b/tests/util/__init__.py @@ -12,4 +12,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py index 7bbe795622..272b71034a 100644 --- a/tests/util/test_dict_cache.py +++ b/tests/util/test_dict_cache.py @@ -14,7 +14,6 @@ # limitations under the License. -from twisted.internet import defer from tests import unittest from synapse.util.caches.dictionary_cache import DictionaryCache diff --git a/tests/util/test_snapshot_cache.py b/tests/util/test_snapshot_cache.py index 4ee0d49673..7e289715ba 100644 --- a/tests/util/test_snapshot_cache.py +++ b/tests/util/test_snapshot_cache.py @@ -19,6 +19,7 @@ from .. import unittest from synapse.util.caches.snapshot_cache import SnapshotCache from twisted.internet.defer import Deferred + class SnapshotCacheTestCase(unittest.TestCase): def setUp(self): diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py index 1efbeb6b33..bcc64422b2 100644 --- a/tests/util/test_treecache.py +++ b/tests/util/test_treecache.py @@ -18,6 +18,7 @@ from .. import unittest from synapse.util.caches.treecache import TreeCache + class TreeCacheTestCase(unittest.TestCase): def test_get_set_onelevel(self): cache = TreeCache() diff --git a/tests/utils.py b/tests/utils.py index f71125042b..bf7a31ff9e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -152,7 +152,7 @@ class MockHttpResource(HttpServer): mock_request.getClientIP.return_value = "-" - mock_request.requestHeaders.getRawHeaders.return_value=[ + mock_request.requestHeaders.getRawHeaders.return_value = [ "X-Matrix origin=test,key=,sig=" ] @@ -360,13 +360,12 @@ class MemoryDataStore(object): def get_rooms_for_user_where_membership_is(self, user_id, membership_list): return [ - self.members[r].get(user_id) for r in self.members - if user_id in self.members[r] and - self.members[r][user_id].membership in membership_list + m[user_id] for m in self.members.values() + if user_id in m and m[user_id].membership in membership_list ] def get_room_events_stream(self, user_id=None, from_key=None, to_key=None, - limit=0, with_feedback=False): + limit=0, with_feedback=False): return ([], from_key) # TODO def get_joined_hosts_for_room(self, room_id): @@ -376,7 +375,6 @@ class MemoryDataStore(object): if event.type == EventTypes.Member: room_id = event.room_id user = event.state_key - membership = event.membership self.members.setdefault(room_id, {})[user] = event if hasattr(event, "state_key"): @@ -456,9 +454,9 @@ class DeferredMockCallable(object): d.callback(None) return result - failure = AssertionError("Was not expecting call(%s)" % + failure = AssertionError("Was not expecting call(%s)" % ( _format_call(args, kwargs) - ) + )) for _, _, d in self.expectations: try: @@ -479,14 +477,12 @@ class DeferredMockCallable(object): ) timer = reactor.callLater( - timeout/1000, + timeout / 1000, deferred.errback, - AssertionError( - "%d pending calls left: %s"% ( - len([e for e in self.expectations if not e[2].called]), - [e for e in self.expectations if not e[2].called] - ) - ) + AssertionError("%d pending calls left: %s" % ( + len([e for e in self.expectations if not e[2].called]), + [e for e in self.expectations if not e[2].called] + )) ) yield deferred @@ -500,8 +496,8 @@ class DeferredMockCallable(object): calls = self.calls self.calls = [] - raise AssertionError("Expected not to received any calls, got:\n" + - "\n".join([ + raise AssertionError( + "Expected not to received any calls, got:\n" + "\n".join([ "call(%s)" % _format_call(c[0], c[1]) for c in calls ]) ) diff --git a/tox.ini b/tox.ini index 6f01599af7..757b7189c3 100644 --- a/tox.ini +++ b/tox.ini @@ -26,4 +26,4 @@ skip_install = True basepython = python2.7 deps = flake8 -commands = /bin/bash -c "flake8 synapse {env:PEP8SUFFIX:}" +commands = /bin/bash -c "flake8 synapse tests {env:PEP8SUFFIX:}" From 6c5b147a39c4c1ee5bc0ec0decacebb9f32de286 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Feb 2016 17:11:11 +0000 Subject: [PATCH 208/349] Interpret unknown visibilities the same as shared --- synapse/handlers/_base.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index f01ab6780b..ef6716002c 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -111,22 +111,24 @@ class BaseHandler(object): # return True pass - if visibility == "shared": - # user can also see the event if he has become a member since - # the event - # - # XXX: if the user has subsequently joined and then left again, - # ideally we would share history up to the point they left. But - # we don't know when they left. - return not is_peeking + if visibility == "joined": + # we weren't a member at the time of the event, so we can't + # see this event. + return False + elif visibility == "invited": # user can also see the event if he was *invited* at the time # of the event. return membership == Membership.INVITE - # presumably visibility is "joined"; we weren't a member at the - # time of the event, so we're done. - return False + else: + # visibility is shared: user can also see the event if he has + # become a member since the event + # + # XXX: if the user has subsequently joined and then left again, + # ideally we would share history up to the point they left. But + # we don't know when they left. + return not is_peeking defer.returnValue({ user_id: [ From 5c79ef9396ffc1212676610d4bb9df00f1361126 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 19 Feb 2016 17:46:58 +0000 Subject: [PATCH 209/349] Test Filter.filter_rooms Also check that the __repr__ method for FilterCollection does something sensible. --- tests/api/test_filtering.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 4297a89f85..dcb6c5bc31 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -456,6 +456,22 @@ class FilteringTestCase(unittest.TestCase): results = user_filter.filter_room_state(events) self.assertEquals([], results) + def test_filter_rooms(self): + definition = { + "rooms": ["!allowed:example.com", "!excluded:example.com"], + "not_rooms": ["!excluded:example.com"], + } + + room_ids = [ + "!allowed:example.com", # Allowed because in rooms and not in not_rooms. + "!excluded:example.com", # Disallowed because in not_rooms. + "!not_included:example.com", # Disallowed because not in rooms. + ] + + filtered_room_ids = list(Filter(definition).filter_rooms(room_ids)) + + self.assertEquals(filtered_room_ids, ["!allowed:example.com"]) + @defer.inlineCallbacks def test_add_filter(self): user_filter_json = { @@ -500,3 +516,5 @@ class FilteringTestCase(unittest.TestCase): ) self.assertEquals(filter.get_filter_json(), user_filter_json) + + self.assertRegexpMatches(repr(filter), r"") From 9e696bd6a38f280e40f7ec47f457bd1052f1723d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Feb 2016 13:54:46 +0000 Subject: [PATCH 210/349] Remove superfluous call to get_state_at when we already have an event for that stream position --- synapse/handlers/sync.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index efeec72fd8..558c7bacb9 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -845,16 +845,20 @@ class SyncHandler(BaseHandler): # TODO(mjark) Check for new redactions in the state events. with Measure(self.clock, "compute_state_delta"): - current_state = yield self.get_state_at( - room_id, stream_position=now_token - ) - if full_state: if batch: + current_state = yield self.store.get_state_for_event( + batch.events[-1].event_id + ) + state = yield self.store.get_state_for_event( batch.events[0].event_id ) else: + current_state = yield self.get_state_at( + room_id, stream_position=now_token + ) + state = current_state timeline_state = { @@ -873,6 +877,10 @@ class SyncHandler(BaseHandler): room_id, stream_position=since_token ) + current_state = yield self.store.get_state_for_event( + batch.events[-1].event_id + ) + state_at_timeline_start = yield self.store.get_state_for_event( batch.events[0].event_id ) From 7641a90c34c07621e813b1aa7060114074f2741b Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 22 Feb 2016 15:22:38 +0000 Subject: [PATCH 211/349] Add a test for TreeCache.__contains__ --- tests/metrics/test_metric.py | 3 +++ tests/util/test_treecache.py | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py index f9e5e5af01..f3c1927ce1 100644 --- a/tests/metrics/test_metric.py +++ b/tests/metrics/test_metric.py @@ -61,6 +61,9 @@ class CounterMetricTestCase(unittest.TestCase): 'vector{method="PUT"} 1', ]) + # Check that passing too few values errors + self.assertRaises(ValueError, counter.inc) + class CallbackMetricTestCase(unittest.TestCase): diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py index bcc64422b2..7ab578a185 100644 --- a/tests/util/test_treecache.py +++ b/tests/util/test_treecache.py @@ -77,3 +77,9 @@ class TreeCacheTestCase(unittest.TestCase): cache[("b",)] = "B" cache.clear() self.assertEquals(len(cache), 0) + + def test_contains(self): + cache = TreeCache() + cache[("a",)] = "A" + self.assertTrue(("a",) in cache) + self.assertFalse(("b",) in cache) From 5be3944730ccf809b72a4412d599b5f622246589 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 22 Feb 2016 15:27:44 +0000 Subject: [PATCH 212/349] address review comments drop commented-out special casing for historyvisibility event s/he/they/ for users --- synapse/handlers/_base.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index ef6716002c..5613bd2059 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -106,23 +106,18 @@ class BaseHandler(object): if membership == Membership.JOIN: return True - if event.type == EventTypes.RoomHistoryVisibility: - # XXX why are m.room.history_visibility events special? - # return True - pass - if visibility == "joined": # we weren't a member at the time of the event, so we can't # see this event. return False elif visibility == "invited": - # user can also see the event if he was *invited* at the time + # user can also see the event if they were *invited* at the time # of the event. return membership == Membership.INVITE else: - # visibility is shared: user can also see the event if he has + # visibility is shared: user can also see the event if they have # become a member since the event # # XXX: if the user has subsequently joined and then left again, From 5e2890bd49c70d7258ecc06505c921c2bb736260 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 22 Feb 2016 16:01:29 +0000 Subject: [PATCH 213/349] Check that the disable_registration config key is handled correctly --- tests/config/test_load.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/config/test_load.py b/tests/config/test_load.py index fbbbf93fef..bf46233c5c 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -60,6 +60,22 @@ class ConfigLoadingTestCase(unittest.TestCase): config2 = HomeServerConfig.load_config("", ["-c", self.file]) self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key) + def test_disable_registration(self): + self.generate_config() + self.add_lines_to_config([ + "enable_registration: true", + "disable_registration: true", + ]) + # Check that disable_registration clobbers enable_registration. + config = HomeServerConfig.load_config("", ["-c", self.file]) + self.assertFalse(config.enable_registration) + + # Check that either config value is clobbered by the command line. + config = HomeServerConfig.load_config("", [ + "-c", self.file, "--enable-registration" + ]) + self.assertTrue(config.enable_registration) + def generate_config(self): HomeServerConfig.load_config("", [ "--generate-config", @@ -76,3 +92,8 @@ class ConfigLoadingTestCase(unittest.TestCase): contents = [l for l in contents if needle not in l] with open(self.file, "w") as f: f.write("".join(contents)) + + def add_lines_to_config(self, lines): + with open(self.file, "a") as f: + for line in lines: + f.write(line + "\n") From 1dcfb201c4dcf329694195c89fadce80804459b7 Mon Sep 17 00:00:00 2001 From: Patrik Oldsberg Date: Tue, 23 Feb 2016 00:21:48 +0100 Subject: [PATCH 214/349] handlers/sync: fix SyncResult not counting account_data change when converting to bool This fixes account_data events not triggering an immediate /sync response Signed-off-by: Patrik Oldsberg --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c87ff75c05..4c6cc22a26 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -121,7 +121,7 @@ class SyncResult(collections.namedtuple("SyncResult", [ events. """ return bool( - self.presence or self.joined or self.invited or self.archived + self.presence or self.joined or self.invited or self.archived or self.account_data ) From 210b7d8e004e5d107a95572860dcc3d7c5b66fdd Mon Sep 17 00:00:00 2001 From: Patrik Oldsberg Date: Mon, 22 Feb 2016 22:55:21 +0100 Subject: [PATCH 215/349] handlers/_base: don't allow room create event to be changed Signed-off-by: Patrik Oldsberg --- synapse/handlers/_base.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 5613bd2059..5b27ec1362 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -342,6 +342,12 @@ class BaseHandler(object): "You don't have permission to redact events" ) + if event.type == EventTypes.Create and context.current_state: + raise AuthError( + 403, + "Changing the room create event is forbidden", + ) + action_generator = ActionGenerator(self.hs) yield action_generator.handle_push_actions_for_event( event, context, self From 138c405974cf4c460d33a8a1ac0c88051d44467f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Feb 2016 10:40:11 +0000 Subject: [PATCH 216/349] Pick up currently_active across federation --- synapse/handlers/presence.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index aed640450f..acf400e324 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -522,6 +522,7 @@ class PresenceHandler(BaseHandler): new_fields["last_active_ts"] = now - last_active_ago new_fields["status_msg"] = push.get("status_msg", None) + new_fields["currently_active"] = push.get("currently_active", False) prev_state = yield self.current_state_for_user(user_id) updates.append(prev_state.copy_and_replace(**new_fields)) From baf056bae84d5cac12956468589a3d7ef1f3b15e Mon Sep 17 00:00:00 2001 From: Patrik Oldsberg Date: Tue, 23 Feb 2016 14:40:02 +0100 Subject: [PATCH 217/349] handlers/sync: style fix, line too long Signed-off-by: Patrik Oldsberg --- synapse/handlers/sync.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 4c6cc22a26..fded6e4009 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -121,7 +121,11 @@ class SyncResult(collections.namedtuple("SyncResult", [ events. """ return bool( - self.presence or self.joined or self.invited or self.archived or self.account_data + self.presence or + self.joined or + self.invited or + self.archived or + self.account_data ) From 56a94ccd9e0b3f01c016481b0e3eb26f1a09ac0e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Feb 2016 13:49:16 +0000 Subject: [PATCH 218/349] Measure PresenceEventSource.get_new_events --- synapse/handlers/presence.py | 69 ++++++++++++++++++------------------ 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index aed640450f..91e62e9a00 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -835,50 +835,51 @@ class PresenceEventSource(object): # We don't try and limit the presence updates by the current token, as # sending down the rare duplicate is not a concern. - user_id = user.to_string() - if from_key is not None: - from_key = int(from_key) - room_ids = room_ids or [] + with Measure(self.clock, "Presence.get_new_events"): + user_id = user.to_string() + if from_key is not None: + from_key = int(from_key) + room_ids = room_ids or [] - presence = self.hs.get_handlers().presence_handler + presence = self.hs.get_handlers().presence_handler - if not room_ids: - rooms = yield self.store.get_rooms_for_user(user_id) - room_ids = set(e.room_id for e in rooms) + if not room_ids: + rooms = yield self.store.get_rooms_for_user(user_id) + room_ids = set(e.room_id for e in rooms) - user_ids_to_check = set() - for room_id in room_ids: - users = yield self.store.get_users_in_room(room_id) - user_ids_to_check.update(users) + user_ids_to_check = set() + for room_id in room_ids: + users = yield self.store.get_users_in_room(room_id) + user_ids_to_check.update(users) - plist = yield self.store.get_presence_list_accepted(user.localpart) - user_ids_to_check.update([row["observed_user_id"] for row in plist]) + plist = yield self.store.get_presence_list_accepted(user.localpart) + user_ids_to_check.update([row["observed_user_id"] for row in plist]) - # Always include yourself. Only really matters for when the user is - # not in any rooms, but still. - user_ids_to_check.add(user_id) + # Always include yourself. Only really matters for when the user is + # not in any rooms, but still. + user_ids_to_check.add(user_id) - max_token = self.store.get_current_presence_token() + max_token = self.store.get_current_presence_token() - if from_key: - user_ids_changed = self.store.presence_stream_cache.get_entities_changed( - user_ids_to_check, from_key, - ) - else: - user_ids_changed = user_ids_to_check + if from_key: + user_ids_changed = self.store.presence_stream_cache.get_entities_changed( + user_ids_to_check, from_key, + ) + else: + user_ids_changed = user_ids_to_check - updates = yield presence.current_state_for_users(user_ids_changed) + updates = yield presence.current_state_for_users(user_ids_changed) - now = self.clock.time_msec() + now = self.clock.time_msec() - defer.returnValue(([ - { - "type": "m.presence", - "content": _format_user_presence_state(s, now), - } - for s in updates.values() - if include_offline or s.state != PresenceState.OFFLINE - ], max_token)) + defer.returnValue(([ + { + "type": "m.presence", + "content": _format_user_presence_state(s, now), + } + for s in updates.values() + if include_offline or s.state != PresenceState.OFFLINE + ], max_token)) def get_current_key(self): return self.store.get_current_presence_token() From 02e928cf9b20fe7fcb3abfb905693a48c99deeca Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Feb 2016 14:03:46 +0000 Subject: [PATCH 219/349] Don't include defer.returnValue in Measure block --- synapse/handlers/presence.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 91e62e9a00..80de4f43b6 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -46,6 +46,7 @@ logger = logging.getLogger(__name__) metrics = synapse.metrics.get_metrics_for(__name__) notified_presence_counter = metrics.register_counter("notified_presence") +federation_presence_out_counter = metrics.register_counter("federation_presence_out") presence_updates_counter = metrics.register_counter("presence_updates") timers_fired_counter = metrics.register_counter("timers_fired") federation_presence_counter = metrics.register_counter("federation_presence") @@ -259,6 +260,8 @@ class PresenceHandler(BaseHandler): if user_id not in to_notify } if to_federation_ping: + federation_presence_out_counter.inc_by(len(to_federation_ping)) + _, _, hosts_to_states = yield self._get_interested_parties( to_federation_ping.values() ) @@ -835,7 +838,7 @@ class PresenceEventSource(object): # We don't try and limit the presence updates by the current token, as # sending down the rare duplicate is not a concern. - with Measure(self.clock, "Presence.get_new_events"): + with Measure(self.clock, "presence.get_new_events"): user_id = user.to_string() if from_key is not None: from_key = int(from_key) @@ -872,14 +875,14 @@ class PresenceEventSource(object): now = self.clock.time_msec() - defer.returnValue(([ - { - "type": "m.presence", - "content": _format_user_presence_state(s, now), - } - for s in updates.values() - if include_offline or s.state != PresenceState.OFFLINE - ], max_token)) + defer.returnValue(([ + { + "type": "m.presence", + "content": _format_user_presence_state(s, now), + } + for s in updates.values() + if include_offline or s.state != PresenceState.OFFLINE + ], max_token)) def get_current_key(self): return self.store.get_current_presence_token() From c77dae7a1abfef19940efd1378df5e2215a5b48b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Feb 2016 14:48:23 +0000 Subject: [PATCH 220/349] Change the way we figure out presence updates for small deltas --- synapse/handlers/presence.py | 53 +++++++++++++++------- synapse/util/caches/stream_change_cache.py | 16 +++++++ 2 files changed, 52 insertions(+), 17 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 80de4f43b6..84624d4902 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -845,35 +845,54 @@ class PresenceEventSource(object): room_ids = room_ids or [] presence = self.hs.get_handlers().presence_handler + stream_change_cache = self.store.presence_stream_cache if not room_ids: rooms = yield self.store.get_rooms_for_user(user_id) room_ids = set(e.room_id for e in rooms) - - user_ids_to_check = set() - for room_id in room_ids: - users = yield self.store.get_users_in_room(room_id) - user_ids_to_check.update(users) + else: + room_ids = set(room_ids) plist = yield self.store.get_presence_list_accepted(user.localpart) - user_ids_to_check.update([row["observed_user_id"] for row in plist]) + friends = set(row["observed_user_id"] for row in plist) + friends.add(user_id) # So that we receive our own presence - # Always include yourself. Only really matters for when the user is - # not in any rooms, but still. - user_ids_to_check.add(user_id) + user_ids_changed = set() + if from_key and from_key < 100: + changed = stream_change_cache.get_all_entities_changed(from_key) - max_token = self.store.get_current_presence_token() - - if from_key: - user_ids_changed = self.store.presence_stream_cache.get_entities_changed( - user_ids_to_check, from_key, - ) + for other_user_id in changed: + if other_user_id in friends: + user_ids_changed.add(other_user_id) + continue + other_rooms = yield self.store.get_rooms_for_user(other_user_id) + if room_ids.intersection(e.room_id for e in other_rooms): + user_ids_changed.add(other_user_id) + continue else: - user_ids_changed = user_ids_to_check + user_ids_to_check = set() + for room_id in room_ids: + users = yield self.store.get_users_in_room(room_id) + user_ids_to_check.update(users) + + plist = yield self.store.get_presence_list_accepted(user.localpart) + user_ids_to_check.update([row["observed_user_id"] for row in plist]) + + # Always include yourself. Only really matters for when the user is + # not in any rooms, but still. + user_ids_to_check.add(user_id) + + if from_key: + user_ids_changed = stream_change_cache.get_entities_changed( + user_ids_to_check, from_key, + ) + else: + user_ids_changed = user_ids_to_check updates = yield presence.current_state_for_users(user_ids_changed) - now = self.clock.time_msec() + max_token = self.store.get_current_presence_token() + now = self.clock.time_msec() defer.returnValue(([ { diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index b37f1c0725..970488a19c 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -85,6 +85,22 @@ class StreamChangeCache(object): return result + def get_all_entities_changed(self, stream_pos): + """Returns all entites that have had new things since the given + position. If the position is too old it will return None. + """ + assert type(stream_pos) is int + + if stream_pos >= self._earliest_known_stream_pos: + keys = self._cache.keys() + i = keys.bisect_right(stream_pos) + + return ( + self._cache[k] for k in keys[i:] + ) + else: + return None + def entity_has_changed(self, entity, stream_pos): """Informs the cache that the entity has been changed at the given position. From 6e0209112bfb7fc2eee958bfc2e425b1cd399505 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Feb 2016 14:57:45 +0000 Subject: [PATCH 221/349] Add comments --- synapse/handlers/presence.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 84624d4902..ff62d47679 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -859,6 +859,8 @@ class PresenceEventSource(object): user_ids_changed = set() if from_key and from_key < 100: + # For small deltas, its quicker to get all changes and then + # work out if we share a room or they're in our presence list changed = stream_change_cache.get_all_entities_changed(from_key) for other_user_id in changed: @@ -870,6 +872,8 @@ class PresenceEventSource(object): user_ids_changed.add(other_user_id) continue else: + # Too many possible updates. Find all users we can see and check + # if any of them have changed. user_ids_to_check = set() for room_id in room_ids: users = yield self.store.get_users_in_room(room_id) From 13f86c3489dba5a2df06cf635f220c23e36b662a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Feb 2016 15:05:37 +0000 Subject: [PATCH 222/349] Handle get_all_entities_changed returning None --- synapse/handlers/presence.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index ff62d47679..952e48e319 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -858,11 +858,14 @@ class PresenceEventSource(object): friends.add(user_id) # So that we receive our own presence user_ids_changed = set() + changed = None if from_key and from_key < 100: # For small deltas, its quicker to get all changes and then # work out if we share a room or they're in our presence list changed = stream_change_cache.get_all_entities_changed(from_key) + # get_all_entities_changed can return None + if changed is not None: for other_user_id in changed: if other_user_id in friends: user_ids_changed.add(other_user_id) From 577951b0324f67308f50c14fad703d2103621bc5 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 23 Feb 2016 15:11:25 +0000 Subject: [PATCH 223/349] Allow third_party_signed to be specified on /join --- synapse/api/auth.py | 57 +++++++++------ synapse/federation/federation_server.py | 15 +++- synapse/federation/transport/server.py | 12 +++- synapse/handlers/federation.py | 93 +++++++++++++++++++------ synapse/handlers/room.py | 67 +++++++++++++++--- synapse/python_dependencies.py | 2 +- synapse/rest/client/v1/room.py | 4 ++ 7 files changed, 196 insertions(+), 54 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index e2f84c4d57..183245443c 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -434,31 +434,46 @@ class Auth(object): if event.user_id != invite_event.user_id: return False - try: - public_key = invite_event.content["public_key"] - if signed["mxid"] != event.state_key: - return False - if signed["token"] != token: - return False - for server, signature_block in signed["signatures"].items(): - for key_name, encoded_signature in signature_block.items(): - if not key_name.startswith("ed25519:"): - return False - verify_key = decode_verify_key_bytes( - key_name, - decode_base64(public_key) - ) - verify_signed_json(signed, server, verify_key) - # We got the public key from the invite, so we know that the - # correct server signed the signed bundle. - # The caller is responsible for checking that the signing - # server has not revoked that public key. - return True + if signed["mxid"] != event.state_key: return False - except (KeyError, SignatureVerifyException,): + if signed["token"] != token: return False + for public_key_object in self.get_public_keys(invite_event): + public_key = public_key_object["public_key"] + try: + for server, signature_block in signed["signatures"].items(): + for key_name, encoded_signature in signature_block.items(): + if not key_name.startswith("ed25519:"): + continue + verify_key = decode_verify_key_bytes( + key_name, + decode_base64(public_key) + ) + verify_signed_json(signed, server, verify_key) + + # We got the public key from the invite, so we know that the + # correct server signed the signed bundle. + # The caller is responsible for checking that the signing + # server has not revoked that public key. + return True + except (KeyError, SignatureVerifyException,): + continue + return False + + def get_public_keys(self, invite_event): + public_keys = [] + if "public_key" in invite_event.content: + o = { + "public_key": invite_event.content["public_key"], + } + if "key_validity_url" in invite_event.content: + o["key_validity_url"] = invite_event.content["key_validity_url"] + public_keys.append(o) + public_keys.extend(invite_event.content.get("public_keys", [])) + return public_keys + def _get_power_level_event(self, auth_events): key = (EventTypes.PowerLevels, "", ) return auth_events.get(key) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 90718192dd..e8bfbe7cb5 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -543,8 +543,19 @@ class FederationServer(FederationBase): return event @defer.inlineCallbacks - def exchange_third_party_invite(self, invite): - ret = yield self.handler.exchange_third_party_invite(invite) + def exchange_third_party_invite( + self, + sender_user_id, + target_user_id, + room_id, + signed, + ): + ret = yield self.handler.exchange_third_party_invite( + sender_user_id, + target_user_id, + room_id, + signed, + ) defer.returnValue(ret) @defer.inlineCallbacks diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 65e054f7dd..6e92e2f8f4 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -425,7 +425,17 @@ class On3pidBindServlet(BaseFederationServlet): last_exception = None for invite in content["invites"]: try: - yield self.handler.exchange_third_party_invite(invite) + if "signed" not in invite or "token" not in invite["signed"]: + message = ("Rejecting received notification of third-" + "party invite without signed: %s" % (invite,)) + logger.info(message) + raise SynapseError(400, message) + yield self.handler.exchange_third_party_invite( + invite["sender"], + invite["mxid"], + invite["room_id"], + invite["signed"], + ) except Exception as e: last_exception = e if last_exception: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ac15f9e5dd..3655b9e5e2 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -14,6 +14,9 @@ # limitations under the License. """Contains handlers for federation events.""" +from signedjson.key import decode_verify_key_bytes +from signedjson.sign import verify_signed_json +from unpaddedbase64 import decode_base64 from ._base import BaseHandler @@ -1620,19 +1623,15 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks @log_function - def exchange_third_party_invite(self, invite): - sender = invite["sender"] - room_id = invite["room_id"] - - if "signed" not in invite or "token" not in invite["signed"]: - logger.info( - "Discarding received notification of third party invite " - "without signed: %s" % (invite,) - ) - return - + def exchange_third_party_invite( + self, + sender_user_id, + target_user_id, + room_id, + signed, + ): third_party_invite = { - "signed": invite["signed"], + "signed": signed, } event_dict = { @@ -1642,8 +1641,8 @@ class FederationHandler(BaseHandler): "third_party_invite": third_party_invite, }, "room_id": room_id, - "sender": sender, - "state_key": invite["mxid"], + "sender": sender_user_id, + "state_key": target_user_id, } if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)): @@ -1656,11 +1655,11 @@ class FederationHandler(BaseHandler): ) self.auth.check(event, context.current_state) - yield self._validate_keyserver(event, auth_events=context.current_state) + yield self._check_signature(event, auth_events=context.current_state) member_handler = self.hs.get_handlers().room_member_handler yield member_handler.send_membership_event(event, context, from_client=False) else: - destinations = set([x.split(":", 1)[-1] for x in (sender, room_id)]) + destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id)) yield self.replication_layer.forward_third_party_invite( destinations, room_id, @@ -1681,7 +1680,7 @@ class FederationHandler(BaseHandler): ) self.auth.check(event, auth_events=context.current_state) - yield self._validate_keyserver(event, auth_events=context.current_state) + yield self._check_signature(event, auth_events=context.current_state) returned_invite = yield self.send_invite(origin, event) # TODO: Make sure the signatures actually are correct. @@ -1711,17 +1710,69 @@ class FederationHandler(BaseHandler): defer.returnValue((event, context)) @defer.inlineCallbacks - def _validate_keyserver(self, event, auth_events): - token = event.content["third_party_invite"]["signed"]["token"] + def _check_signature(self, event, auth_events): + """ + Checks that the signature in the event is consistent with its invite. + :param event (Event): The m.room.member event to check + :param auth_events (dict<(event type, state_key), event>) + + :raises + AuthError if signature didn't match any keys, or key has been + revoked, + SynapseError if a transient error meant a key couldn't be checked + for revocation. + """ + signed = event.content["third_party_invite"]["signed"] + token = signed["token"] invite_event = auth_events.get( (EventTypes.ThirdPartyInvite, token,) ) + if not invite_event: + raise AuthError(403, "Could not find invite") + + last_exception = None + for public_key_object in self.hs.get_auth().get_public_keys(invite_event): + try: + for server, signature_block in signed["signatures"].items(): + for key_name, encoded_signature in signature_block.items(): + if not key_name.startswith("ed25519:"): + continue + + public_key = public_key_object["public_key"] + verify_key = decode_verify_key_bytes( + key_name, + decode_base64(public_key) + ) + verify_signed_json(signed, server, verify_key) + if "key_validity_url" in public_key_object: + yield self._check_key_revocation( + public_key, + public_key_object["key_validity_url"] + ) + return + except Exception as e: + last_exception = e + raise last_exception + + @defer.inlineCallbacks + def _check_key_revocation(self, public_key, url): + """ + Checks whether public_key has been revoked. + + :param public_key (str): base-64 encoded public key. + :param url (str): Key revocation URL. + + :raises + AuthError if they key has been revoked. + SynapseError if a transient error meant a key couldn't be checked + for revocation. + """ try: response = yield self.hs.get_simple_http_client().get_json( - invite_event.content["key_validity_url"], - {"public_key": invite_event.content["public_key"]} + url, + {"public_key": public_key} ) except Exception: raise SynapseError( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b00cac4bd4..eb9700a35b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -398,6 +398,7 @@ class RoomMemberHandler(BaseHandler): action, txn_id=None, remote_room_hosts=None, + third_party_signed=None, ratelimit=True, ): effective_membership_state = action @@ -406,6 +407,15 @@ class RoomMemberHandler(BaseHandler): elif action == "forget": effective_membership_state = "leave" + if third_party_signed is not None: + replication = self.hs.get_replication_layer() + yield replication.exchange_third_party_invite( + third_party_signed["sender"], + target.to_string(), + room_id, + third_party_signed, + ) + msg_handler = self.hs.get_handlers().message_handler content = {"membership": effective_membership_state} @@ -759,7 +769,7 @@ class RoomMemberHandler(BaseHandler): if room_avatar_event: room_avatar_url = room_avatar_event.content.get("url", "") - token, public_key, key_validity_url, display_name = ( + token, public_keys, fallback_public_key, display_name = ( yield self._ask_id_server_for_third_party_invite( id_server=id_server, medium=medium, @@ -774,14 +784,18 @@ class RoomMemberHandler(BaseHandler): inviter_avatar_url=inviter_avatar_url ) ) + msg_handler = self.hs.get_handlers().message_handler yield msg_handler.create_and_send_nonmember_event( { "type": EventTypes.ThirdPartyInvite, "content": { "display_name": display_name, - "key_validity_url": key_validity_url, - "public_key": public_key, + "public_keys": public_keys, + + # For backwards compatibility: + "key_validity_url": fallback_public_key["key_validity_url"], + "public_key": fallback_public_key["public_key"], }, "room_id": room_id, "sender": user.to_string(), @@ -806,6 +820,34 @@ class RoomMemberHandler(BaseHandler): inviter_display_name, inviter_avatar_url ): + """ + Asks an identity server for a third party invite. + + :param id_server (str): hostname + optional port for the identity server. + :param medium (str): The literal string "email". + :param address (str): The third party address being invited. + :param room_id (str): The ID of the room to which the user is invited. + :param inviter_user_id (str): The user ID of the inviter. + :param room_alias (str): An alias for the room, for cosmetic + notifications. + :param room_avatar_url (str): The URL of the room's avatar, for cosmetic + notifications. + :param room_join_rules (str): The join rules of the email + (e.g. "public"). + :param room_name (str): The m.room.name of the room. + :param inviter_display_name (str): The current display name of the + inviter. + :param inviter_avatar_url (str): The URL of the inviter's avatar. + + :return: A deferred tuple containing: + token (str): The token which must be signed to prove authenticity. + public_keys ([{"public_key": str, "key_validity_url": str}]): + public_key is a base64-encoded ed25519 public key. + fallback_public_key: One element from public_keys. + display_name (str): A user-friendly name to represent the invited + user. + """ + is_url = "%s%s/_matrix/identity/api/v1/store-invite" % ( id_server_scheme, id_server, ) @@ -826,12 +868,21 @@ class RoomMemberHandler(BaseHandler): ) # TODO: Check for success token = data["token"] - public_key = data["public_key"] + public_keys = data.get("public_keys", []) + if "public_key" in data: + fallback_public_key = { + "public_key": data["public_key"], + "key_validity_url": "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % ( + id_server_scheme, id_server, + ), + } + else: + fallback_public_key = public_keys[0] + + if not public_keys: + public_keys.append(fallback_public_key) display_name = data["display_name"] - key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % ( - id_server_scheme, id_server, - ) - defer.returnValue((token, public_key, key_validity_url, display_name)) + defer.returnValue((token, public_keys, fallback_public_key, display_name)) def forget(self, user, room_id): return self.store.forget(user.to_string(), room_id) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 75bf3d13aa..35933324a4 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -19,7 +19,7 @@ logger = logging.getLogger(__name__) REQUIREMENTS = { "frozendict>=0.4": ["frozendict"], - "unpaddedbase64>=1.0.1": ["unpaddedbase64>=1.0.1"], + "unpaddedbase64>=1.1.0": ["unpaddedbase64>=1.1.0"], "canonicaljson>=1.0.0": ["canonicaljson>=1.0.0"], "signedjson>=1.0.0": ["signedjson>=1.0.0"], "pynacl==0.3.0": ["nacl==0.3.0", "nacl.bindings"], diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index e6f5c5614a..07a2a5dd82 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -228,6 +228,8 @@ class JoinRoomAliasServlet(ClientV1RestServlet): allow_guest=True, ) + content = _parse_json(request) + if RoomID.is_valid(room_identifier): room_id = room_identifier remote_room_hosts = None @@ -248,6 +250,7 @@ class JoinRoomAliasServlet(ClientV1RestServlet): action="join", txn_id=txn_id, remote_room_hosts=remote_room_hosts, + third_party_signed=content.get("third_party_signed", None), ) defer.returnValue((200, {"room_id": room_id})) @@ -451,6 +454,7 @@ class RoomMembershipRestServlet(ClientV1RestServlet): room_id=room_id, action=membership_action, txn_id=txn_id, + third_party_signed=content.get("third_party_signed", None), ) defer.returnValue((200, {})) From f1dd03548f31be2e65e9c37550b5456f4510d52c Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 23 Feb 2016 15:15:10 +0000 Subject: [PATCH 224/349] Set WORKSPACE if not set --- jenkins.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/jenkins.sh b/jenkins.sh index ed3bdf80fa..359fd22771 100755 --- a/jenkins.sh +++ b/jenkins.sh @@ -1,5 +1,7 @@ #!/bin/bash -eu +: ${WORKSPACE:="$(pwd)"} + export PYTHONDONTWRITEBYTECODE=yep # Output test results as junit xml From b5f77eb12aa77923059d01340b569d4626fbfe49 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Feb 2016 15:47:37 +0000 Subject: [PATCH 225/349] Check presence token interval is less than 100, rather than the token itself --- synapse/handlers/presence.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 8134c5185a..08e38cdd25 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -854,13 +854,15 @@ class PresenceEventSource(object): else: room_ids = set(room_ids) + max_token = self.store.get_current_presence_token() + plist = yield self.store.get_presence_list_accepted(user.localpart) friends = set(row["observed_user_id"] for row in plist) friends.add(user_id) # So that we receive our own presence user_ids_changed = set() changed = None - if from_key and from_key < 100: + if from_key and max_token - from_key < 100: # For small deltas, its quicker to get all changes and then # work out if we share a room or they're in our presence list changed = stream_change_cache.get_all_entities_changed(from_key) @@ -883,8 +885,7 @@ class PresenceEventSource(object): users = yield self.store.get_users_in_room(room_id) user_ids_to_check.update(users) - plist = yield self.store.get_presence_list_accepted(user.localpart) - user_ids_to_check.update([row["observed_user_id"] for row in plist]) + user_ids_to_check.update(friends) # Always include yourself. Only really matters for when the user is # not in any rooms, but still. @@ -899,7 +900,6 @@ class PresenceEventSource(object): updates = yield presence.current_state_for_users(user_ids_changed) - max_token = self.store.get_current_presence_token() now = self.clock.time_msec() defer.returnValue(([ From 6451fcd08580cc6142d15b4be5f8956ba496f178 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Feb 2016 15:51:39 +0000 Subject: [PATCH 226/349] Create a new stream_id per presence update --- synapse/storage/presence.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 70ece56548..3ef91d34db 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -58,17 +58,20 @@ class UserPresenceState(namedtuple("UserPresenceState", class PresenceStore(SQLBaseStore): @defer.inlineCallbacks def update_presence(self, presence_states): - stream_id_manager = yield self._presence_id_gen.get_next(self) - with stream_id_manager as stream_id: + stream_ordering_manager = yield self._presence_id_gen.get_next_mult( + self, len(presence_states) + ) + + with stream_ordering_manager as stream_orderings: yield self.runInteraction( "update_presence", - self._update_presence_txn, stream_id, presence_states, + self._update_presence_txn, stream_orderings, presence_states, ) - defer.returnValue((stream_id, self._presence_id_gen.get_max_token())) + defer.returnValue((stream_orderings[-1], self._presence_id_gen.get_max_token())) - def _update_presence_txn(self, txn, stream_id, presence_states): - for state in presence_states: + def _update_presence_txn(self, txn, stream_orderings, presence_states): + for stream_id, state in zip(stream_orderings, presence_states): txn.call_after( self.presence_stream_cache.entity_has_changed, state.user_id, stream_id, From 278d6c05274288fa964bc52fb656a0f5a26a453f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Feb 2016 11:54:48 +0000 Subject: [PATCH 227/349] Report size of ExpiringCache --- synapse/util/caches/expiringcache.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 62cae99649..138cbefff4 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from synapse.util.caches import cache_counter, caches_by_name + import logging @@ -47,6 +49,8 @@ class ExpiringCache(object): self._cache = {} + caches_by_name[cache_name] = self._cache + def start(self): if not self._expiry_ms: # Don't bother starting the loop if things never expire @@ -72,7 +76,11 @@ class ExpiringCache(object): self._cache.pop(k) def __getitem__(self, key): - entry = self._cache[key] + try: + entry = self._cache[key] + cache_counter.inc_hits(self._cache_name) + finally: + cache_counter.inc_misses(self._cache_name) if self._reset_expiry_on_get: entry.time = self._clock.time_msec() @@ -105,9 +113,12 @@ class ExpiringCache(object): logger.debug( "[%s] _prune_cache before: %d, after len: %d", - self._cache_name, begin_length, len(self._cache.keys()) + self._cache_name, begin_length, len(self._cache) ) + def __len__(self): + return len(self._cache) + class _CacheEntry(object): def __init__(self, time, value): From 869580206daa5aa940d079fc907f75dea7770505 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Wed, 24 Feb 2016 08:50:28 +0000 Subject: [PATCH 228/349] Ignore invalid POST bodies when joining rooms --- synapse/rest/client/v1/room.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 07a2a5dd82..f5ed4f7302 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -228,7 +228,12 @@ class JoinRoomAliasServlet(ClientV1RestServlet): allow_guest=True, ) - content = _parse_json(request) + try: + content = _parse_json(request) + except: + # Turns out we used to ignore the body entirely, and some clients + # cheekily send invalid bodies. + content = {} if RoomID.is_valid(room_identifier): room_id = room_identifier @@ -427,7 +432,12 @@ class RoomMembershipRestServlet(ClientV1RestServlet): }: raise AuthError(403, "Guest access not allowed") - content = _parse_json(request) + try: + content = _parse_json(request) + except: + # Turns out we used to ignore the body entirely, and some clients + # cheekily send invalid bodies. + content = {} if membership_action == "invite" and self._has_3pid_invite_keys(content): yield self.handlers.room_member_handler.do_3pid_invite( From 33300673b7a6f79802f691ac121e720cb44c0dfc Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Wed, 24 Feb 2016 14:41:25 +0000 Subject: [PATCH 229/349] Generate guest access token on 3pid invites This means that following the same link across multiple sessions or devices can re-use the same guest account. Note that this is somewhat of an abuse vector; we can't throw up captchas on this flow, so this is a way of registering ephemeral accounts for spam, whose sign-up we don't rate limit. --- synapse/handlers/register.py | 15 +++++++ synapse/handlers/room.py | 8 ++++ synapse/storage/registration.py | 44 +++++++++++++++++++ .../delta/30/threepid_guest_access_tokens.sql | 24 ++++++++++ 4 files changed, 91 insertions(+) create mode 100644 synapse/storage/schema/delta/30/threepid_guest_access_tokens.sql diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index f8959e5d82..6d155d57e7 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -349,3 +349,18 @@ class RegistrationHandler(BaseHandler): def auth_handler(self): return self.hs.get_handlers().auth_handler + + @defer.inlineCallbacks + def guest_access_token_for(self, medium, address, inviter_user_id): + access_token = yield self.store.get_3pid_guest_access_token(medium, address) + if access_token: + defer.returnValue(access_token) + + _, access_token = yield self.register( + generate_token=True, + make_guest=True + ) + access_token = yield self.store.save_or_get_3pid_guest_access_token( + medium, address, access_token, inviter_user_id + ) + defer.returnValue(access_token) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index eb9700a35b..d2de23a6cc 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -848,6 +848,13 @@ class RoomMemberHandler(BaseHandler): user. """ + registration_handler = self.hs.get_handlers().registration_handler + guest_access_token = yield registration_handler.guest_access_token_for( + medium=medium, + address=address, + inviter_user_id=inviter_user_id, + ) + is_url = "%s%s/_matrix/identity/api/v1/store-invite" % ( id_server_scheme, id_server, ) @@ -864,6 +871,7 @@ class RoomMemberHandler(BaseHandler): "sender": inviter_user_id, "sender_display_name": inviter_display_name, "sender_avatar_url": inviter_avatar_url, + "guest_access_token": guest_access_token, } ) # TODO: Check for success diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 967c732bda..03a9b66e4a 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -387,3 +387,47 @@ class RegistrationStore(SQLBaseStore): "find_next_generated_user_id", _find_next_generated_user_id ))) + + @defer.inlineCallbacks + def get_3pid_guest_access_token(self, medium, address): + ret = yield self._simple_select_one( + "threepid_guest_access_tokens", + { + "medium": medium, + "address": address + }, + ["guest_access_token"], True, 'get_3pid_guest_access_token' + ) + if ret: + defer.returnValue(ret["guest_access_token"]) + defer.returnValue(None) + + @defer.inlineCallbacks + def save_or_get_3pid_guest_access_token( + self, medium, address, access_token, inviter_user_id + ): + """ + Gets the 3pid's guest access token if exists, else saves access_token. + + :param medium (str): Medium of the 3pid. Must be "email". + :param address (str): 3pid address. + :param access_token (str): The access token to persist if none is + already persisted. + :param inviter_user_id (str): User ID of the inviter. + :return (deferred str): Whichever access token is persisted at the end + of this function call. + """ + def insert(txn): + txn.execute( + "INSERT INTO threepid_guest_access_tokens " + "(medium, address, guest_access_token, first_inviter) " + "VALUES (?, ?, ?, ?)", + (medium, address, access_token, inviter_user_id) + ) + + try: + yield self.runInteraction("save_3pid_guest_access_token", insert) + defer.returnValue(access_token) + except self.database_engine.module.IntegrityError: + ret = yield self.get_3pid_guest_access_token(medium, address) + defer.returnValue(ret) diff --git a/synapse/storage/schema/delta/30/threepid_guest_access_tokens.sql b/synapse/storage/schema/delta/30/threepid_guest_access_tokens.sql new file mode 100644 index 0000000000..0dd2f1360c --- /dev/null +++ b/synapse/storage/schema/delta/30/threepid_guest_access_tokens.sql @@ -0,0 +1,24 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Stores guest account access tokens generated for unbound 3pids. +CREATE TABLE threepid_guest_access_tokens( + medium TEXT, -- The medium of the 3pid. Must be "email". + address TEXT, -- The 3pid address. + guest_access_token TEXT, -- The access token for a guest user for this 3pid. + first_inviter TEXT -- User ID of the first user to invite this 3pid to a room. +); + +CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens(medium, address); From 9892d017b25380184fb8db47ef859b07053f00f9 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 24 Feb 2016 16:31:07 +0000 Subject: [PATCH 230/349] Remove unused get_rule_attr method --- synapse/rest/client/v1/push_rule.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 5db2805d68..6c8f09e898 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -200,14 +200,6 @@ class PushRuleRestServlet(ClientV1RestServlet): else: raise UnrecognizedRequestError() - def get_rule_attr(self, user_id, namespaced_rule_id, attr): - if attr == 'enabled': - return self.hs.get_datastore().get_push_rule_enabled_by_user_rule_id( - user_id, namespaced_rule_id - ) - else: - raise UnrecognizedRequestError() - def _rule_spec_from_path(path): if len(path) < 2: From 15c2ac2cac7377e48eb0531f911c4b7ea1891457 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 25 Feb 2016 15:13:07 +0000 Subject: [PATCH 231/349] =?UTF-8?q?Make=20sure=20we=20return=20a=20JSON=20?= =?UTF-8?q?object=20when=20returning=20the=20values=20of=20specif=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …ic keys from a push rule --- synapse/rest/client/v1/push_rule.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 6c8f09e898..d26e4cde3e 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -324,7 +324,9 @@ def _filter_ruleset_with_path(ruleset, path): attr = path[0] if attr in the_rule: - return the_rule[attr] + # Make sure we return a JSON object as the attribute may be a + # JSON value. + return {attr: the_rule[attr]} else: raise UnrecognizedRequestError() From 0f0b011440eba5ee5c018cb7b9d2618bccd20220 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 25 Feb 2016 18:12:09 +0000 Subject: [PATCH 232/349] Send the invier's member event in room invite state so the invitee has their display name and avatar. --- synapse/handlers/_base.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 5613bd2059..bdade98bf7 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -293,6 +293,12 @@ class BaseHandler(object): if event.type == EventTypes.Member: if event.content["membership"] == Membership.INVITE: + def is_inviter_member_event(e): + return ( + e.type == EventTypes.Member and + e.sender == event.sender + ) + event.unsigned["invite_room_state"] = [ { "type": e.type, @@ -306,7 +312,7 @@ class BaseHandler(object): EventTypes.CanonicalAlias, EventTypes.RoomAvatar, EventTypes.Name, - ) + ) or is_inviter_member_event(e) ] invitee = UserID.from_string(event.state_key) From a53774721a90955cfb6180d332ca9f54f9b5e58a Mon Sep 17 00:00:00 2001 From: Gergely Polonkai Date: Fri, 26 Feb 2016 10:22:35 +0100 Subject: [PATCH 233/349] Add error codes for malformed/bad JSON in /login Signed-off-by: Gergely Polonkai --- synapse/rest/client/v1/login.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 79101106ac..a4f89aea7b 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -404,10 +404,10 @@ def _parse_json(request): try: content = json.loads(request.content.read()) if type(content) != dict: - raise SynapseError(400, "Content must be a JSON object.") + raise SynapseError(400, "Content must be a JSON object.", errcode=Codes.BAD_JSON) return content except ValueError: - raise SynapseError(400, "Content not JSON.") + raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) def register_servlets(hs, http_server): From 87acd8fb075114849ca06188ef333119ea73ad12 Mon Sep 17 00:00:00 2001 From: Gergely Polonkai Date: Fri, 26 Feb 2016 12:05:38 +0100 Subject: [PATCH 234/349] Fix to appease the PEP8 dragon --- synapse/rest/client/v1/login.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index a4f89aea7b..f13272da8e 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -404,7 +404,9 @@ def _parse_json(request): try: content = json.loads(request.content.read()) if type(content) != dict: - raise SynapseError(400, "Content must be a JSON object.", errcode=Codes.BAD_JSON) + raise SynapseError( + 400, "Content must be a JSON object.", errcode=Codes.BAD_JSON + ) return content except ValueError: raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) From de27f7fc79b785961181d13749468ae3e2019772 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 26 Feb 2016 14:28:19 +0000 Subject: [PATCH 236/349] Add support for changing the actions for default rules See matrix-org/matrix-doc#283 Works by adding dummy rules to the push rules table with a negative priority class and then using those rules to clobber the default rule actions when adding the default rules in ``list_with_base_rules`` --- synapse/push/baserules.py | 57 +++++++++++++++++++++++++---- synapse/rest/client/v1/push_rule.py | 31 ++++++++++++++-- synapse/storage/push_rule.py | 25 +++++++++++++ 3 files changed, 102 insertions(+), 11 deletions(-) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 0832c77cb4..86a2998bcc 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -13,46 +13,67 @@ # limitations under the License. from synapse.push.rulekinds import PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP +import copy def list_with_base_rules(rawrules): + """Combine the list of rules set by the user with the default push rules + + :param list rawrules: The rules the user has modified or set. + :returns: A new list with the rules set by the user combined with the + defaults. + """ ruleslist = [] + # Grab the base rules that the user has modified. + # The modified base rules have a priority_class of -1. + modified_base_rules = { + r['rule_id']: r for r in rawrules if r['priority_class'] < 0 + } + + # Remove the modified base rules from the list, They'll be added back + # in the default postions in the list. + rawrules = [r for r in rawrules if r['priority_class'] >= 0] + # shove the server default rules for each kind onto the end of each current_prio_class = PRIORITY_CLASS_INVERSE_MAP.keys()[-1] ruleslist.extend(make_base_prepend_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class] + PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules )) for r in rawrules: if r['priority_class'] < current_prio_class: while r['priority_class'] < current_prio_class: ruleslist.extend(make_base_append_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class] + PRIORITY_CLASS_INVERSE_MAP[current_prio_class], + modified_base_rules, )) current_prio_class -= 1 if current_prio_class > 0: ruleslist.extend(make_base_prepend_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class] + PRIORITY_CLASS_INVERSE_MAP[current_prio_class], + modified_base_rules, )) ruleslist.append(r) while current_prio_class > 0: ruleslist.extend(make_base_append_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class] + PRIORITY_CLASS_INVERSE_MAP[current_prio_class], + modified_base_rules, )) current_prio_class -= 1 if current_prio_class > 0: ruleslist.extend(make_base_prepend_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class] + PRIORITY_CLASS_INVERSE_MAP[current_prio_class], + modified_base_rules, )) return ruleslist -def make_base_append_rules(kind): +def make_base_append_rules(kind, modified_base_rules): rules = [] if kind == 'override': @@ -62,15 +83,31 @@ def make_base_append_rules(kind): elif kind == 'content': rules = BASE_APPEND_CONTENT_RULES + # Copy the rules before modifying them + rules = copy.deepcopy(rules) + for r in rules: + # Only modify the actions, keep the conditions the same. + modified = modified_base_rules.get(r['rule_id']) + if modified: + r['actions'] = modified['actions'] + return rules -def make_base_prepend_rules(kind): +def make_base_prepend_rules(kind, modified_base_rules): rules = [] if kind == 'override': rules = BASE_PREPEND_OVERRIDE_RULES + # Copy the rules before modifying them + rules = copy.deepcopy(rules) + for r in rules: + # Only modify the actions, keep the conditions the same. + modified = modified_base_rules.get(r['rule_id']) + if modified: + r['actions'] = modified['actions'] + return rules @@ -263,18 +300,24 @@ BASE_APPEND_UNDERRIDE_RULES = [ ] +BASE_RULE_IDS = set() + for r in BASE_APPEND_CONTENT_RULES: r['priority_class'] = PRIORITY_CLASS_MAP['content'] r['default'] = True + BASE_RULE_IDS.add(r['rule_id']) for r in BASE_PREPEND_OVERRIDE_RULES: r['priority_class'] = PRIORITY_CLASS_MAP['override'] r['default'] = True + BASE_RULE_IDS.add(r['rule_id']) for r in BASE_APPEND_OVRRIDE_RULES: r['priority_class'] = PRIORITY_CLASS_MAP['override'] r['default'] = True + BASE_RULE_IDS.add(r['rule_id']) for r in BASE_APPEND_UNDERRIDE_RULES: r['priority_class'] = PRIORITY_CLASS_MAP['underride'] r['default'] = True + BASE_RULE_IDS.add(r['rule_id']) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index d26e4cde3e..970a019223 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -22,7 +22,7 @@ from .base import ClientV1RestServlet, client_path_patterns from synapse.storage.push_rule import ( InconsistentRuleException, RuleNotFoundException ) -import synapse.push.baserules as baserules +from synapse.push.baserules import list_with_base_rules, BASE_RULE_IDS from synapse.push.rulekinds import ( PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP ) @@ -55,6 +55,10 @@ class PushRuleRestServlet(ClientV1RestServlet): yield self.set_rule_attr(requester.user.to_string(), spec, content) defer.returnValue((200, {})) + if spec['rule_id'].startswith('.'): + # Rule ids starting with '.' are reserved for server default rules. + raise SynapseError(400, "cannot add new rule_ids that start with '.'") + try: (conditions, actions) = _rule_tuple_from_request_object( spec['template'], @@ -128,7 +132,7 @@ class PushRuleRestServlet(ClientV1RestServlet): ruleslist.append(rule) # We're going to be mutating this a lot, so do a deep copy - ruleslist = copy.deepcopy(baserules.list_with_base_rules(ruleslist)) + ruleslist = copy.deepcopy(list_with_base_rules(ruleslist)) rules = {'global': {}, 'device': {}} @@ -197,6 +201,18 @@ class PushRuleRestServlet(ClientV1RestServlet): return self.hs.get_datastore().set_push_rule_enabled( user_id, namespaced_rule_id, val ) + elif spec['attr'] == 'actions': + actions = val.get('actions') + _check_actions(actions) + namespaced_rule_id = _namespaced_rule_id_from_spec(spec) + rule_id = spec['rule_id'] + is_default_rule = rule_id.startswith(".") + if is_default_rule: + if namespaced_rule_id not in BASE_RULE_IDS: + raise SynapseError(404, "Unknown rule %r" % (namespaced_rule_id,)) + return self.hs.get_datastore().set_push_rule_actions( + user_id, namespaced_rule_id, actions, is_default_rule + ) else: raise UnrecognizedRequestError() @@ -274,6 +290,15 @@ def _rule_tuple_from_request_object(rule_template, rule_id, req_obj): raise InvalidRuleException("No actions found") actions = req_obj['actions'] + _check_actions(actions) + + return conditions, actions + + +def _check_actions(actions): + if not isinstance(actions, list): + raise InvalidRuleException("No actions found") + for a in actions: if a in ['notify', 'dont_notify', 'coalesce']: pass @@ -282,8 +307,6 @@ def _rule_tuple_from_request_object(rule_template, rule_id, req_obj): else: raise InvalidRuleException("Unrecognised action") - return conditions, actions - def _add_empty_priority_class_arrays(d): for pc in PRIORITY_CLASS_MAP.keys(): diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index e19a81e41f..bb5c14d912 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -294,6 +294,31 @@ class PushRuleStore(SQLBaseStore): self.get_push_rules_enabled_for_user.invalidate, (user_id,) ) + def set_push_rule_actions(self, user_id, rule_id, actions, is_default_rule): + actions_json = json.dumps(actions) + + def set_push_rule_actions_txn(txn): + if is_default_rule: + # Add a dummy rule to the rules table with the user specified + # actions. + priority_class = -1 + priority = 1 + self._upsert_push_rule_txn( + txn, user_id, rule_id, priority_class, priority, + "[]", actions_json + ) + else: + self._simple_update_one_txn( + txn, + "push_rules", + {'user_name': user_id, 'rule_id': rule_id}, + {'actions': actions_json}, + ) + + return self.runInteraction( + "set_push_rule_actions", set_push_rule_actions_txn, + ) + class RuleNotFoundException(Exception): pass From 9c48f1ed224e02c30d73388c5e6a5839976bbc58 Mon Sep 17 00:00:00 2001 From: Patrik Oldsberg Date: Mon, 29 Feb 2016 23:11:06 +0100 Subject: [PATCH 237/349] handlers/register: make sure another user id is generated when a collision occurs Signed-off-by: Patrik Oldsberg --- synapse/handlers/register.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 6d155d57e7..c5e5b28811 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -157,6 +157,7 @@ class RegistrationHandler(BaseHandler): ) except SynapseError: # if user id is taken, just generate another + user = None user_id = None token = None attempts += 1 From ff2d7551c738ea6138b7bcd245620d91e80841cf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 1 Mar 2016 10:59:17 +0000 Subject: [PATCH 238/349] Correct cache miss detection --- synapse/util/caches/expiringcache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 138cbefff4..55ba1c08c0 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -79,7 +79,7 @@ class ExpiringCache(object): try: entry = self._cache[key] cache_counter.inc_hits(self._cache_name) - finally: + except KeyError: cache_counter.inc_misses(self._cache_name) if self._reset_expiry_on_get: From 72165e5b779709ea6724f6dd2081027c0818276b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 1 Mar 2016 11:00:10 +0000 Subject: [PATCH 239/349] Reraise exception --- synapse/util/caches/expiringcache.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 55ba1c08c0..e863a8f8a9 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -81,6 +81,7 @@ class ExpiringCache(object): cache_counter.inc_hits(self._cache_name) except KeyError: cache_counter.inc_misses(self._cache_name) + raise if self._reset_expiry_on_get: entry.time = self._clock.time_msec() From 910fc0f28f82ca7c719b5bbe98f4d3c2d0d76abf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 1 Mar 2016 12:56:39 +0000 Subject: [PATCH 240/349] Add enviroment variable SYNAPSE_CACHE_FACTOR, default it to 0.1 --- synapse/handlers/presence.py | 4 ++++ synapse/handlers/receipts.py | 2 -- synapse/util/caches/descriptors.py | 6 ++++++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 08e38cdd25..a5c363e390 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -130,6 +130,10 @@ class PresenceHandler(BaseHandler): for state in active_presence } + metrics.register_callback( + "user_to_current_state_size", lambda: len(self.user_to_current_state) + ) + now = self.clock.time_msec() for state in active_presence: self.wheel_timer.insert( diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index de4c694714..935c339707 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -36,8 +36,6 @@ class ReceiptsHandler(BaseHandler): ) self.clock = self.hs.get_clock() - self._receipt_cache = None - @defer.inlineCallbacks def received_client_receipt(self, room_id, receipt_type, user_id, event_id): diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 277854ccbc..2b51ad6d04 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -28,6 +28,7 @@ from twisted.internet import defer from collections import OrderedDict +import os import functools import inspect import threading @@ -38,9 +39,14 @@ logger = logging.getLogger(__name__) _CacheSentinel = object() +CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1)) + + class Cache(object): def __init__(self, name, max_entries=1000, keylen=1, lru=True, tree=False): + max_entries = int(max_entries * CACHE_SIZE_FACTOR) + if lru: cache_type = TreeCache if tree else dict self.cache = LruCache( From ce2cdced6136e81e182242c11c50e7b1c8a5a622 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 1 Mar 2016 13:21:46 +0000 Subject: [PATCH 241/349] Move cache size fiddling to descriptors only. Fix tests --- synapse/util/caches/descriptors.py | 4 ++-- tests/storage/test_appservice.py | 12 +++++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 2b51ad6d04..35544b19fd 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -45,8 +45,6 @@ CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1)) class Cache(object): def __init__(self, name, max_entries=1000, keylen=1, lru=True, tree=False): - max_entries = int(max_entries * CACHE_SIZE_FACTOR) - if lru: cache_type = TreeCache if tree else dict self.cache = LruCache( @@ -146,6 +144,8 @@ class CacheDescriptor(object): """ def __init__(self, orig, max_entries=1000, num_args=1, lru=True, tree=False, inlineCallbacks=False): + max_entries = int(max_entries * CACHE_SIZE_FACTOR) + self.orig = orig if inlineCallbacks: diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index ed8af10d87..5734198121 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -35,7 +35,8 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): def setUp(self): self.as_yaml_files = [] config = Mock( - app_service_config_files=self.as_yaml_files + app_service_config_files=self.as_yaml_files, + event_cache_size=1, ) hs = yield setup_test_homeserver(config=config) @@ -109,7 +110,8 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): self.as_yaml_files = [] config = Mock( - app_service_config_files=self.as_yaml_files + app_service_config_files=self.as_yaml_files, + event_cache_size=1, ) hs = yield setup_test_homeserver(config=config) self.db_pool = hs.get_db_pool() @@ -438,7 +440,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): f1 = self._write_config(suffix="1") f2 = self._write_config(suffix="2") - config = Mock(app_service_config_files=[f1, f2]) + config = Mock(app_service_config_files=[f1, f2], event_cache_size=1) hs = yield setup_test_homeserver(config=config, datastore=Mock()) ApplicationServiceStore(hs) @@ -448,7 +450,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): f1 = self._write_config(id="id", suffix="1") f2 = self._write_config(id="id", suffix="2") - config = Mock(app_service_config_files=[f1, f2]) + config = Mock(app_service_config_files=[f1, f2], event_cache_size=1) hs = yield setup_test_homeserver(config=config, datastore=Mock()) with self.assertRaises(ConfigError) as cm: @@ -464,7 +466,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): f1 = self._write_config(as_token="as_token", suffix="1") f2 = self._write_config(as_token="as_token", suffix="2") - config = Mock(app_service_config_files=[f1, f2]) + config = Mock(app_service_config_files=[f1, f2], event_cache_size=1) hs = yield setup_test_homeserver(config=config, datastore=Mock()) with self.assertRaises(ConfigError) as cm: From 374f9b2f073a8d7832916cf1b17d6aacba235066 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 1 Mar 2016 13:30:15 +0000 Subject: [PATCH 242/349] Limit stream change cache size too --- synapse/util/caches/stream_change_cache.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 970488a19c..a1aec7aa55 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -18,11 +18,15 @@ from synapse.util.caches import cache_counter, caches_by_name from blist import sorteddict import logging +import os logger = logging.getLogger(__name__) +CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1)) + + class StreamChangeCache(object): """Keeps track of the stream positions of the latest change in a set of entities. @@ -33,7 +37,7 @@ class StreamChangeCache(object): old then the cache will simply return all given entities. """ def __init__(self, name, current_stream_pos, max_size=10000, prefilled_cache={}): - self._max_size = max_size + self._max_size = int(max_size * CACHE_SIZE_FACTOR) self._entity_to_key = {} self._cache = sorteddict() self._earliest_known_stream_pos = current_stream_pos From 54172924c834a954fcbbc6224318140b9e95aa7d Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 1 Mar 2016 14:32:56 +0000 Subject: [PATCH 243/349] Load the current id in the IdGenerator constructor Rather than loading them lazily. This allows us to remove all the yield statements and spurious arguments for the get_next methods. It also allows us to replace all instances of get_next_txn with get_next since get_next no longer needs to access the db. --- synapse/storage/__init__.py | 14 +++--- synapse/storage/account_data.py | 8 ++-- synapse/storage/events.py | 6 +-- synapse/storage/presence.py | 4 +- synapse/storage/push_rule.py | 4 +- synapse/storage/pusher.py | 2 +- synapse/storage/receipts.py | 4 +- synapse/storage/registration.py | 6 +-- synapse/storage/state.py | 2 +- synapse/storage/tags.py | 8 ++-- synapse/storage/transactions.py | 2 +- synapse/storage/util/id_generators.py | 69 +++++++++------------------ 12 files changed, 52 insertions(+), 77 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 9be1d12fac..f257721ea3 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -115,13 +115,13 @@ class DataStore(RoomMemberStore, RoomStore, db_conn, "presence_stream", "stream_id" ) - self._transaction_id_gen = IdGenerator("sent_transactions", "id", self) - self._state_groups_id_gen = IdGenerator("state_groups", "id", self) - self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self) - self._refresh_tokens_id_gen = IdGenerator("refresh_tokens", "id", self) - self._pushers_id_gen = IdGenerator("pushers", "id", self) - self._push_rule_id_gen = IdGenerator("push_rules", "id", self) - self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) + self._transaction_id_gen = IdGenerator(db_conn, "sent_transactions", "id") + self._state_groups_id_gen = IdGenerator(db_conn, "state_groups", "id") + self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id") + self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id") + self._pushers_id_gen = IdGenerator(db_conn, "pushers", "id") + self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id") + self._push_rules_enable_id_gen = IdGenerator(db_conn, "push_rules_enable", "id") events_max = self._stream_id_gen.get_max_token() event_cache_prefill, min_event_val = self._get_cache_dict( diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 91cbf399b6..21a3240d9d 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -163,12 +163,12 @@ class AccountDataStore(SQLBaseStore): ) self._update_max_stream_id(txn, next_id) - with (yield self._account_data_id_gen.get_next(self)) as next_id: + with self._account_data_id_gen.get_next() as next_id: yield self.runInteraction( "add_room_account_data", add_account_data_txn, next_id ) - result = yield self._account_data_id_gen.get_max_token() + result = self._account_data_id_gen.get_max_token() defer.returnValue(result) @defer.inlineCallbacks @@ -202,12 +202,12 @@ class AccountDataStore(SQLBaseStore): ) self._update_max_stream_id(txn, next_id) - with (yield self._account_data_id_gen.get_next(self)) as next_id: + with self._account_data_id_gen.get_next() as next_id: yield self.runInteraction( "add_user_account_data", add_account_data_txn, next_id ) - result = yield self._account_data_id_gen.get_max_token() + result = self._account_data_id_gen.get_max_token() defer.returnValue(result) def _update_max_stream_id(self, txn, next_id): diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 1dd3236829..73a152bc07 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -75,8 +75,8 @@ class EventsStore(SQLBaseStore): yield stream_orderings stream_ordering_manager = stream_ordering_manager() else: - stream_ordering_manager = yield self._stream_id_gen.get_next_mult( - self, len(events_and_contexts) + stream_ordering_manager = self._stream_id_gen.get_next_mult( + len(events_and_contexts) ) with stream_ordering_manager as stream_orderings: @@ -109,7 +109,7 @@ class EventsStore(SQLBaseStore): stream_ordering = self.min_stream_token if stream_ordering is None: - stream_ordering_manager = yield self._stream_id_gen.get_next(self) + stream_ordering_manager = self._stream_id_gen.get_next() else: @contextmanager def stream_ordering_manager(): diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 3ef91d34db..eece7f8961 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -58,8 +58,8 @@ class UserPresenceState(namedtuple("UserPresenceState", class PresenceStore(SQLBaseStore): @defer.inlineCallbacks def update_presence(self, presence_states): - stream_ordering_manager = yield self._presence_id_gen.get_next_mult( - self, len(presence_states) + stream_ordering_manager = self._presence_id_gen.get_next_mult( + len(presence_states) ) with stream_ordering_manager as stream_orderings: diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index bb5c14d912..56e69495b1 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -226,7 +226,7 @@ class PushRuleStore(SQLBaseStore): if txn.rowcount == 0: # We didn't update a row with the given rule_id so insert one - push_rule_id = self._push_rule_id_gen.get_next_txn(txn) + push_rule_id = self._push_rule_id_gen.get_next() self._simple_insert_txn( txn, @@ -279,7 +279,7 @@ class PushRuleStore(SQLBaseStore): defer.returnValue(ret) def _set_push_rule_enabled_txn(self, txn, user_id, rule_id, enabled): - new_id = self._push_rules_enable_id_gen.get_next_txn(txn) + new_id = self._push_rules_enable_id_gen.get_next() self._simple_upsert_txn( txn, "push_rules_enable", diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index c23648cdbc..7693ab9082 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -84,7 +84,7 @@ class PusherStore(SQLBaseStore): app_display_name, device_display_name, pushkey, pushkey_ts, lang, data, profile_tag=""): try: - next_id = yield self._pushers_id_gen.get_next() + next_id = self._pushers_id_gen.get_next() yield self._simple_upsert( "pushers", dict( diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index a7343c97f7..cd6dca4901 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -330,7 +330,7 @@ class ReceiptsStore(SQLBaseStore): "insert_receipt_conv", graph_to_linear ) - stream_id_manager = yield self._receipts_id_gen.get_next(self) + stream_id_manager = self._receipts_id_gen.get_next() with stream_id_manager as stream_id: have_persisted = yield self.runInteraction( "insert_linearized_receipt", @@ -347,7 +347,7 @@ class ReceiptsStore(SQLBaseStore): room_id, receipt_type, user_id, event_ids, data ) - max_persisted_id = yield self._stream_id_gen.get_max_token() + max_persisted_id = self._stream_id_gen.get_max_token() defer.returnValue((stream_id, max_persisted_id)) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 03a9b66e4a..ad1157f979 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -40,7 +40,7 @@ class RegistrationStore(SQLBaseStore): Raises: StoreError if there was a problem adding this. """ - next_id = yield self._access_tokens_id_gen.get_next() + next_id = self._access_tokens_id_gen.get_next() yield self._simple_insert( "access_tokens", @@ -62,7 +62,7 @@ class RegistrationStore(SQLBaseStore): Raises: StoreError if there was a problem adding this. """ - next_id = yield self._refresh_tokens_id_gen.get_next() + next_id = self._refresh_tokens_id_gen.get_next() yield self._simple_insert( "refresh_tokens", @@ -99,7 +99,7 @@ class RegistrationStore(SQLBaseStore): def _register(self, txn, user_id, token, password_hash, was_guest, make_guest): now = int(self.clock.time()) - next_id = self._access_tokens_id_gen.get_next_txn(txn) + next_id = self._access_tokens_id_gen.get_next() try: if was_guest: diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 372b540002..8ed8a21b0a 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -83,7 +83,7 @@ class StateStore(SQLBaseStore): if event.is_state(): state_events[(event.type, event.state_key)] = event - state_group = self._state_groups_id_gen.get_next_txn(txn) + state_group = self._state_groups_id_gen.get_next() self._simple_insert_txn( txn, table="state_groups", diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 9551aa9739..1127b0bd7e 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -142,12 +142,12 @@ class TagsStore(SQLBaseStore): ) self._update_revision_txn(txn, user_id, room_id, next_id) - with (yield self._account_data_id_gen.get_next(self)) as next_id: + with self._account_data_id_gen.get_next() as next_id: yield self.runInteraction("add_tag", add_tag_txn, next_id) self.get_tags_for_user.invalidate((user_id,)) - result = yield self._account_data_id_gen.get_max_token() + result = self._account_data_id_gen.get_max_token() defer.returnValue(result) @defer.inlineCallbacks @@ -164,12 +164,12 @@ class TagsStore(SQLBaseStore): txn.execute(sql, (user_id, room_id, tag)) self._update_revision_txn(txn, user_id, room_id, next_id) - with (yield self._account_data_id_gen.get_next(self)) as next_id: + with self._account_data_id_gen.get_next() as next_id: yield self.runInteraction("remove_tag", remove_tag_txn, next_id) self.get_tags_for_user.invalidate((user_id,)) - result = yield self._account_data_id_gen.get_max_token() + result = self._account_data_id_gen.get_max_token() defer.returnValue(result) def _update_revision_txn(self, txn, user_id, room_id, next_id): diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 4475c451c1..d338dfcf0a 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -117,7 +117,7 @@ class TransactionStore(SQLBaseStore): def _prep_send_transaction(self, txn, transaction_id, destination, origin_server_ts): - next_id = self._transaction_id_gen.get_next_txn(txn) + next_id = self._transaction_id_gen.get_next() # First we find out what the prev_txns should be. # Since we know that we are only sending one transaction at a time, diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index ef5e4a4668..efe3f68e6e 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -13,51 +13,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - from collections import deque import contextlib import threading class IdGenerator(object): - def __init__(self, table, column, store): + def __init__(self, db_conn, table, column): self.table = table self.column = column - self.store = store self._lock = threading.Lock() - self._next_id = None + cur = db_conn.cursor() + self._next_id = self._load_next_id(cur) + cur.close() + + def _load_next_id(self, txn): + txn.execute("SELECT MAX(%s) FROM %s" % (self.column, self.table,)) + val, = txn.fetchone() + return val + 1 if val else 1 - @defer.inlineCallbacks def get_next(self): - if self._next_id is None: - yield self.store.runInteraction( - "IdGenerator_%s" % (self.table,), - self.get_next_txn, - ) - with self._lock: i = self._next_id self._next_id += 1 - defer.returnValue(i) - - def get_next_txn(self, txn): - with self._lock: - if self._next_id: - i = self._next_id - self._next_id += 1 - return i - else: - txn.execute( - "SELECT MAX(%s) FROM %s" % (self.column, self.table,) - ) - - val, = txn.fetchone() - cur = val or 0 - cur += 1 - self._next_id = cur + 1 - - return cur + return i class StreamIdGenerator(object): @@ -69,7 +48,7 @@ class StreamIdGenerator(object): persistence of events can complete out of order. Usage: - with stream_id_gen.get_next_txn(txn) as stream_id: + with stream_id_gen.get_next() as stream_id: # ... persist event ... """ def __init__(self, db_conn, table, column): @@ -79,15 +58,21 @@ class StreamIdGenerator(object): self._lock = threading.Lock() cur = db_conn.cursor() - self._current_max = self._get_or_compute_current_max(cur) + self._current_max = self._load_current_max(cur) cur.close() self._unfinished_ids = deque() - def get_next(self, store): + def _load_current_max(self, txn): + txn.execute("SELECT MAX(%s) FROM %s" % (self.column, self.table)) + rows = txn.fetchall() + val, = rows[0] + return int(val) if val else 1 + + def get_next(self): """ Usage: - with yield stream_id_gen.get_next as stream_id: + with stream_id_gen.get_next() as stream_id: # ... persist event ... """ with self._lock: @@ -106,10 +91,10 @@ class StreamIdGenerator(object): return manager() - def get_next_mult(self, store, n): + def get_next_mult(self, n): """ Usage: - with yield stream_id_gen.get_next(store, n) as stream_ids: + with stream_id_gen.get_next(n) as stream_ids: # ... persist events ... """ with self._lock: @@ -139,13 +124,3 @@ class StreamIdGenerator(object): return self._unfinished_ids[0] - 1 return self._current_max - - def _get_or_compute_current_max(self, txn): - with self._lock: - txn.execute("SELECT MAX(%s) FROM %s" % (self.column, self.table)) - rows = txn.fetchall() - val, = rows[0] - - self._current_max = int(val) if val else 1 - - return self._current_max From f9af8962f8ea6201ed3910eb248b8668f1262fef Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 1 Mar 2016 14:46:31 +0000 Subject: [PATCH 244/349] Allow alias creators to delete aliases --- synapse/handlers/directory.py | 27 ++++++++++++++----- synapse/rest/client/v1/directory.py | 3 --- synapse/storage/directory.py | 15 ++++++++++- .../storage/schema/delta/30/alias_creator.sql | 16 +++++++++++ 4 files changed, 51 insertions(+), 10 deletions(-) create mode 100644 synapse/storage/schema/delta/30/alias_creator.sql diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index e0a778e7ff..cce6f76f0e 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -17,9 +17,9 @@ from twisted.internet import defer from ._base import BaseHandler -from synapse.api.errors import SynapseError, Codes, CodeMessageException +from synapse.api.errors import SynapseError, Codes, CodeMessageException, AuthError from synapse.api.constants import EventTypes -from synapse.types import RoomAlias +from synapse.types import RoomAlias, UserID import logging import string @@ -38,7 +38,7 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def _create_association(self, room_alias, room_id, servers=None): + def _create_association(self, room_alias, room_id, servers=None, creator=None): # general association creation for both human users and app services for wchar in string.whitespace: @@ -60,7 +60,8 @@ class DirectoryHandler(BaseHandler): yield self.store.create_room_alias_association( room_alias, room_id, - servers + servers, + creator=creator, ) @defer.inlineCallbacks @@ -77,7 +78,7 @@ class DirectoryHandler(BaseHandler): 400, "This alias is reserved by an application service.", errcode=Codes.EXCLUSIVE ) - yield self._create_association(room_alias, room_id, servers) + yield self._create_association(room_alias, room_id, servers, creator=user_id) @defer.inlineCallbacks def create_appservice_association(self, service, room_alias, room_id, @@ -95,7 +96,11 @@ class DirectoryHandler(BaseHandler): def delete_association(self, user_id, room_alias): # association deletion for human users - # TODO Check if server admin + can_delete = yield self._user_can_delete_alias(room_alias, user_id) + if not can_delete: + raise AuthError( + 403, "You don't have permission to delete the alias.", + ) can_delete = yield self.can_modify_alias( room_alias, @@ -257,3 +262,13 @@ class DirectoryHandler(BaseHandler): return # either no interested services, or no service with an exclusive lock defer.returnValue(True) + + @defer.inlineCallbacks + def _user_can_delete_alias(self, alias, user_id): + creator = yield self.store.get_room_alias_creator(alias.to_string()) + + if creator and creator == user_id: + defer.returnValue(True) + + is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) + defer.returnValue(is_admin) diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 74ec1e50e0..55c22000fd 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -118,9 +118,6 @@ class ClientDirectoryServer(ClientV1RestServlet): requester = yield self.auth.get_user_by_req(request) user = requester.user - is_admin = yield self.auth.is_server_admin(user) - if not is_admin: - raise AuthError(403, "You need to be a server admin") room_alias = RoomAlias.from_string(room_alias) diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index 1556619d5e..012a0b414a 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -70,13 +70,14 @@ class DirectoryStore(SQLBaseStore): ) @defer.inlineCallbacks - def create_room_alias_association(self, room_alias, room_id, servers): + def create_room_alias_association(self, room_alias, room_id, servers, creator=None): """ Creates an associatin between a room alias and room_id/servers Args: room_alias (RoomAlias) room_id (str) servers (list) + creator (str): Optional user_id of creator. Returns: Deferred @@ -87,6 +88,7 @@ class DirectoryStore(SQLBaseStore): { "room_alias": room_alias.to_string(), "room_id": room_id, + "creator": creator, }, desc="create_room_alias_association", ) @@ -107,6 +109,17 @@ class DirectoryStore(SQLBaseStore): ) self.get_aliases_for_room.invalidate((room_id,)) + def get_room_alias_creator(self, room_alias): + return self._simple_select_one_onecol( + table="room_aliases", + keyvalues={ + "room_alias": room_alias, + }, + retcol="creator", + desc="get_room_alias_creator", + allow_none=True + ) + @defer.inlineCallbacks def delete_room_alias(self, room_alias): room_id = yield self.runInteraction( diff --git a/synapse/storage/schema/delta/30/alias_creator.sql b/synapse/storage/schema/delta/30/alias_creator.sql new file mode 100644 index 0000000000..c9d0dde638 --- /dev/null +++ b/synapse/storage/schema/delta/30/alias_creator.sql @@ -0,0 +1,16 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE room_aliases ADD COLUMN creator TEXT; From 60a0f81c7a2da86bf959227a440e3f7a2b727bb5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 1 Mar 2016 14:49:41 +0000 Subject: [PATCH 245/349] Add a /replication API for extracting the updates that happened on synapse This is necessary for replicating the data in synapse to be visible to a separate service because presence and typing notifications aren't stored in a database so won't be visible to another process. This API can be used to either get the raw data by requesting the tables themselves or to just receive notifications for updates by following the streams meta-stream. Returns updates for each table requested a JSON array of arrays with a row for each row in the table. Each table is prefixed by a header row with the: name of the table, current stream_id position for the table, number of rows, number of columns and the names of the columns. This is followed by the rows that have been added to the server since the requester last asked. The API has a timeout and is hooked up to the notifier so that a slave can long poll for updates. --- scripts-dev/tail-synapse.py | 67 ++++++ synapse/app/homeserver.py | 4 + synapse/handlers/presence.py | 19 ++ synapse/handlers/typing.py | 14 ++ synapse/notifier.py | 48 +++++ synapse/replication/__init__.py | 14 ++ synapse/replication/resource.py | 320 +++++++++++++++++++++++++++++ synapse/storage/account_data.py | 36 +++- synapse/storage/events.py | 45 ++++ synapse/storage/presence.py | 16 ++ synapse/storage/receipts.py | 16 ++ synapse/storage/tags.py | 53 +++++ tests/replication/__init__.py | 14 ++ tests/replication/test_resource.py | 179 ++++++++++++++++ tests/utils.py | 5 +- 15 files changed, 846 insertions(+), 4 deletions(-) create mode 100644 scripts-dev/tail-synapse.py create mode 100644 synapse/replication/__init__.py create mode 100644 synapse/replication/resource.py create mode 100644 tests/replication/__init__.py create mode 100644 tests/replication/test_resource.py diff --git a/scripts-dev/tail-synapse.py b/scripts-dev/tail-synapse.py new file mode 100644 index 0000000000..18be711e92 --- /dev/null +++ b/scripts-dev/tail-synapse.py @@ -0,0 +1,67 @@ +import requests +import collections +import sys +import time +import json + +Entry = collections.namedtuple("Entry", "name position rows") + +ROW_TYPES = {} + + +def row_type_for_columns(name, column_names): + column_names = tuple(column_names) + row_type = ROW_TYPES.get((name, column_names)) + if row_type is None: + row_type = collections.namedtuple(name, column_names) + ROW_TYPES[(name, column_names)] = row_type + return row_type + + +def parse_response(content): + streams = json.loads(content) + result = {} + for name, value in streams.items(): + row_type = row_type_for_columns(name, value["field_names"]) + position = value["position"] + rows = [row_type(*row) for row in value["rows"]] + result[name] = Entry(name, position, rows) + return result + + +def replicate(server, streams): + return parse_response(requests.get( + server + "/_synapse/replication", + verify=False, + params=streams + ).content) + + +def main(): + server = sys.argv[1] + + streams = None + while not streams: + try: + streams = { + row.name: row.position + for row in replicate(server, {"streams":"-1"})["streams"].rows + } + except requests.exceptions.ConnectionError as e: + time.sleep(0.1) + + while True: + try: + results = replicate(server, streams) + except: + sys.stdout.write("connection_lost("+ repr(streams) + ")\n") + break + for update in results.values(): + for row in update.rows: + sys.stdout.write(repr(row) + "\n") + streams[update.name] = update.position + + + +if __name__=='__main__': + main() diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 2b4be7bdd0..de5ee988f1 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -63,6 +63,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory from synapse.util.logcontext import LoggingContext from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX from synapse.federation.transport.server import TransportLayerServer from synapse import events @@ -169,6 +170,9 @@ class SynapseHomeServer(HomeServer): if name == "metrics" and self.get_config().enable_metrics: resources[METRICS_PREFIX] = MetricsResource(self) + if name == "replication": + resources[REPLICATION_PREFIX] = ReplicationResource(self) + root_resource = create_resource_tree(resources) if tls: reactor.listenSSL( diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 08e38cdd25..d98e80086e 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -774,6 +774,25 @@ class PresenceHandler(BaseHandler): defer.returnValue(observer_user.to_string() in accepted_observers) + @defer.inlineCallbacks + def get_all_presence_updates(self, last_id, current_id): + """ + Gets a list of presence update rows from between the given stream ids. + Each row has: + - stream_id(str) + - user_id(str) + - state(str) + - last_active_ts(int) + - last_federation_update_ts(int) + - last_user_sync_ts(int) + - status_msg(int) + - currently_active(int) + """ + # TODO(markjh): replicate the unpersisted changes. + # This could use the in-memory stores for recent changes. + rows = yield self.store.get_all_presence_updates(last_id, current_id) + defer.returnValue(rows) + def should_notify(old_state, new_state): """Decides if a presence state change should be sent to interested parties. diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index b16d0017df..8ce27f49ec 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -25,6 +25,7 @@ from synapse.types import UserID import logging from collections import namedtuple +import ujson as json logger = logging.getLogger(__name__) @@ -219,6 +220,19 @@ class TypingNotificationHandler(BaseHandler): "typing_key", self._latest_room_serial, rooms=[room_id] ) + def get_all_typing_updates(self, last_id, current_id): + # TODO: Work out a way to do this without scanning the entire state. + rows = [] + for room_id, serial in self._room_serials.items(): + if last_id < serial and serial <= current_id: + typing = self._room_typing[room_id] + typing_bytes = json.dumps([ + u.to_string() for u in typing + ], ensure_ascii=False) + rows.append((serial, room_id, typing_bytes)) + rows.sort() + return rows + class TypingNotificationEventSource(object): def __init__(self, hs): diff --git a/synapse/notifier.py b/synapse/notifier.py index 560866b26e..3c36a20868 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -159,6 +159,8 @@ class Notifier(object): self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) + self.replication_deferred = ObservableDeferred(defer.Deferred()) + # This is not a very cheap test to perform, but it's only executed # when rendering the metrics page, which is likely once per minute at # most when scraping it. @@ -207,6 +209,8 @@ class Notifier(object): )) self._notify_pending_new_room_events(max_room_stream_id) + self.notify_replication() + def _notify_pending_new_room_events(self, max_room_stream_id): """Notify for the room events that were queued waiting for a previous event to be persisted. @@ -276,6 +280,8 @@ class Notifier(object): except: logger.exception("Failed to notify listener") + self.notify_replication() + @defer.inlineCallbacks def wait_for_events(self, user_id, timeout, callback, room_ids=None, from_token=StreamToken("s0", "0", "0", "0", "0")): @@ -479,3 +485,45 @@ class Notifier(object): room_streams = self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id) + + def notify_replication(self): + """Notify the any replication listeners that there's a new event""" + with PreserveLoggingContext(): + deferred = self.replication_deferred + self.replication_deferred = ObservableDeferred(defer.Deferred()) + deferred.callback(None) + + @defer.inlineCallbacks + def wait_for_replication(self, callback, timeout): + """Wait for an event to happen. + + :param callback: + Gets called whenever an event happens. If this returns a truthy + value then ``wait_for_replication`` returns, otherwise it waits + for another event. + :param int timeout: + How many milliseconds to wait for callback return a truthy value. + :returns: + A deferred that resolves with the value returned by the callback. + """ + listener = _NotificationListener(None) + + def timed_out(): + listener.deferred.cancel() + + timer = self.clock.call_later(timeout / 1000., timed_out) + while True: + listener.deferred = self.replication_deferred.observe() + result = yield callback() + if result: + break + + try: + with PreserveLoggingContext(): + yield listener.deferred + except defer.CancelledError: + break + + self.clock.cancel_call_later(timer, ignore_errs=True) + + defer.returnValue(result) diff --git a/synapse/replication/__init__.py b/synapse/replication/__init__.py new file mode 100644 index 0000000000..b7df13c9ee --- /dev/null +++ b/synapse/replication/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/synapse/replication/resource.py b/synapse/replication/resource.py new file mode 100644 index 0000000000..e0d039518d --- /dev/null +++ b/synapse/replication/resource.py @@ -0,0 +1,320 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.http.servlet import parse_integer, parse_string +from synapse.http.server import request_handler, finish_request + +from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET +from twisted.internet import defer + +import ujson as json + +import collections +import logging + +logger = logging.getLogger(__name__) + +REPLICATION_PREFIX = "/_synapse/replication" + +STREAM_NAMES = ( + ("events",), + ("presence",), + ("typing",), + ("receipts",), + ("user_account_data", "room_account_data", "tag_account_data",), + ("backfill",), +) + + +class ReplicationResource(Resource): + """ + HTTP endpoint for extracting data from synapse. + + The streams of data returned by the endpoint are controlled by the + parameters given to the API. To return a given stream pass a query + parameter with a position in the stream to return data from or the + special value "-1" to return data from the start of the stream. + + If there is no data for any of the supplied streams after the given + position then the request will block until there is data for one + of the streams. This allows clients to long-poll this API. + + The possible streams are: + + * "streams": A special stream returing the positions of other streams. + * "events": The new events seen on the server. + * "presence": Presence updates. + * "typing": Typing updates. + * "receipts": Receipt updates. + * "user_account_data": Top-level per user account data. + * "room_account_data: Per room per user account data. + * "tag_account_data": Per room per user tags. + * "backfill": Old events that have been backfilled from other servers. + + The API takes two additional query parameters: + + * "timeout": How long to wait before returning an empty response. + * "limit": The maximum number of rows to return for the selected streams. + + The response is a JSON object with keys for each stream with updates. Under + each key is a JSON object with: + + * "postion": The current position of the stream. + * "field_names": The names of the fields in each row. + * "rows": The updates as an array of arrays. + + There are a number of ways this API could be used: + + 1) To replicate the contents of the backing database to another database. + 2) To be notified when the contents of a shared backing database changes. + 3) To "tail" the activity happening on a server for debugging. + + In the first case the client would track all of the streams and store it's + own copy of the data. + + In the second case the client might theoretically just be able to follow + the "streams" stream to track where the other streams are. However in + practise it will probably need to get the contents of the streams in + order to expire the any in-memory caches. Whether it gets the contents + of the streams from this replication API or directly from the backing + store is a matter of taste. + + In the third case the client would use the "streams" stream to find what + streams are available and their current positions. Then it can start + long-polling this replication API for new data on those streams. + """ + + isLeaf = True + + def __init__(self, hs): + Resource.__init__(self) # Resource is old-style, so no super() + + self.version_string = hs.version_string + self.store = hs.get_datastore() + self.sources = hs.get_event_sources() + self.presence_handler = hs.get_handlers().presence_handler + self.typing_handler = hs.get_handlers().typing_notification_handler + self.notifier = hs.notifier + + def render_GET(self, request): + self._async_render_GET(request) + return NOT_DONE_YET + + @defer.inlineCallbacks + def current_replication_token(self): + stream_token = yield self.sources.get_current_token() + backfill_token = yield self.store.get_current_backfill_token() + + defer.returnValue(_ReplicationToken( + stream_token.room_stream_id, + int(stream_token.presence_key), + int(stream_token.typing_key), + int(stream_token.receipt_key), + int(stream_token.account_data_key), + backfill_token, + )) + + @request_handler + @defer.inlineCallbacks + def _async_render_GET(self, request): + limit = parse_integer(request, "limit", 100) + timeout = parse_integer(request, "timeout", 10 * 1000) + + request.setHeader(b"Content-Type", b"application/json") + writer = _Writer(request) + + @defer.inlineCallbacks + def replicate(): + current_token = yield self.current_replication_token() + logger.info("Replicating up to %r", current_token) + + yield self.account_data(writer, current_token, limit) + yield self.events(writer, current_token, limit) + yield self.presence(writer, current_token) # TODO: implement limit + yield self.typing(writer, current_token) # TODO: implement limit + yield self.receipts(writer, current_token, limit) + self.streams(writer, current_token) + + logger.info("Replicated %d rows", writer.total) + defer.returnValue(writer.total) + + yield self.notifier.wait_for_replication(replicate, timeout) + + writer.finish() + + def streams(self, writer, current_token): + request_token = parse_string(writer.request, "streams") + + streams = [] + + if request_token is not None: + if request_token == "-1": + for names, position in zip(STREAM_NAMES, current_token): + streams.extend((name, position) for name in names) + else: + items = zip( + STREAM_NAMES, + current_token, + _ReplicationToken(request_token) + ) + for names, current_id, last_id in items: + if last_id < current_id: + streams.extend((name, current_id) for name in names) + + if streams: + writer.write_header_and_rows( + "streams", streams, ("name", "position"), + position=str(current_token) + ) + + @defer.inlineCallbacks + def events(self, writer, current_token, limit): + request_events = parse_integer(writer.request, "events") + request_backfill = parse_integer(writer.request, "backfill") + + if request_events is not None or request_backfill is not None: + if request_events is None: + request_events = current_token.events + if request_backfill is None: + request_backfill = current_token.backfill + events_rows, backfill_rows = yield self.store.get_all_new_events( + request_backfill, request_events, + current_token.backfill, current_token.events, + limit + ) + writer.write_header_and_rows( + "events", events_rows, ("position", "internal", "json") + ) + writer.write_header_and_rows( + "backfill", backfill_rows, ("position", "internal", "json") + ) + + @defer.inlineCallbacks + def presence(self, writer, current_token): + current_position = current_token.presence + + request_presence = parse_integer(writer.request, "presence") + + if request_presence is not None: + presence_rows = yield self.presence_handler.get_all_presence_updates( + request_presence, current_position + ) + writer.write_header_and_rows("presence", presence_rows, ( + "position", "user_id", "state", "last_active_ts", + "last_federation_update_ts", "last_user_sync_ts", + "status_msg", "currently_active", + )) + + @defer.inlineCallbacks + def typing(self, writer, current_token): + current_position = current_token.presence + + request_typing = parse_integer(writer.request, "typing") + + if request_typing is not None: + typing_rows = yield self.typing_handler.get_all_typing_updates( + request_typing, current_position + ) + writer.write_header_and_rows("typing", typing_rows, ( + "position", "room_id", "typing" + )) + + @defer.inlineCallbacks + def receipts(self, writer, current_token, limit): + current_position = current_token.receipts + + request_receipts = parse_integer(writer.request, "receipts") + + if request_receipts is not None: + receipts_rows = yield self.store.get_all_updated_receipts( + request_receipts, current_position, limit + ) + writer.write_header_and_rows("receipts", receipts_rows, ( + "position", "room_id", "receipt_type", "user_id", "event_id", "data" + )) + + @defer.inlineCallbacks + def account_data(self, writer, current_token, limit): + current_position = current_token.account_data + + user_account_data = parse_integer(writer.request, "user_account_data") + room_account_data = parse_integer(writer.request, "room_account_data") + tag_account_data = parse_integer(writer.request, "tag_account_data") + + if user_account_data is not None or room_account_data is not None: + if user_account_data is None: + user_account_data = current_position + if room_account_data is None: + room_account_data = current_position + user_rows, room_rows = yield self.store.get_all_updated_account_data( + user_account_data, room_account_data, current_position, limit + ) + writer.write_header_and_rows("user_account_data", user_rows, ( + "position", "user_id", "type", "content" + )) + writer.write_header_and_rows("room_account_data", room_rows, ( + "position", "user_id", "room_id", "type", "content" + )) + + if tag_account_data is not None: + tag_rows = yield self.store.get_all_updated_tags( + tag_account_data, current_position, limit + ) + writer.write_header_and_rows("tag_account_data", tag_rows, ( + "position", "user_id", "room_id", "tags" + )) + + +class _Writer(object): + """Writes the streams as a JSON object as the response to the request""" + def __init__(self, request): + self.streams = {} + self.request = request + self.total = 0 + + def write_header_and_rows(self, name, rows, fields, position=None): + if not rows: + return + + if position is None: + position = rows[-1][0] + + self.streams[name] = { + "position": str(position), + "field_names": fields, + "rows": rows, + } + + self.total += len(rows) + + def finish(self): + self.request.write(json.dumps(self.streams, ensure_ascii=False)) + finish_request(self.request) + + +class _ReplicationToken(collections.namedtuple("_ReplicationToken", ( + "events", "presence", "typing", "receipts", "account_data", "backfill", +))): + __slots__ = [] + + def __new__(cls, *args): + if len(args) == 1: + return cls(*(int(value) for value in args[0].split("_"))) + else: + return super(_ReplicationToken, cls).__new__(cls, *args) + + def __str__(self): + return "_".join(str(value) for value in self) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 91cbf399b6..05f98a9a29 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -83,8 +83,40 @@ class AccountDataStore(SQLBaseStore): "get_account_data_for_room", get_account_data_for_room_txn ) - def get_updated_account_data_for_user(self, user_id, stream_id, room_ids=None): - """Get all the client account_data for a that's changed. + def get_all_updated_account_data(self, last_global_id, last_room_id, + current_id, limit): + """Get all the client account_data that has changed on the server + Args: + last_global_id(int): The position to fetch from for top level data + last_room_id(int): The position to fetch from for per room data + current_id(int): The position to fetch up to. + Returns: + A deferred pair of lists of tuples of stream_id int, user_id string, + room_id string, type string, and content string. + """ + def get_updated_account_data_txn(txn): + sql = ( + "SELECT stream_id, user_id, account_data_type, content" + " FROM account_data WHERE ? < stream_id AND stream_id <= ?" + " ORDER BY stream_id ASC LIMIT ?" + ) + txn.execute(sql, (last_global_id, current_id, limit)) + global_results = txn.fetchall() + + sql = ( + "SELECT stream_id, user_id, room_id, account_data_type, content" + " FROM room_account_data WHERE ? < stream_id AND stream_id <= ?" + " ORDER BY stream_id ASC LIMIT ?" + ) + txn.execute(sql, (last_room_id, current_id, limit)) + room_results = txn.fetchall() + return (global_results, room_results) + return self.runInteraction( + "get_all_updated_account_data_txn", get_updated_account_data_txn + ) + + def get_updated_account_data_for_user(self, user_id, stream_id): + """Get all the client account_data for a that's changed for a user Args: user_id(str): The user to get the account_data for. diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 1dd3236829..c0872dd7e2 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1064,3 +1064,48 @@ class EventsStore(SQLBaseStore): yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) defer.returnValue(result) + + def get_current_backfill_token(self): + """The current minimum token that backfilled events have reached""" + + # TODO: Fix race with the persit_event txn by using one of the + # stream id managers + return -self.min_stream_token + + def get_all_new_events(self, last_backfill_id, last_forward_id, + current_backfill_id, current_forward_id, limit): + """Get all the new events that have arrived at the server either as + new events or as backfilled events""" + def get_all_new_events_txn(txn): + sql = ( + "SELECT e.stream_ordering, ej.internal_metadata, ej.json" + " FROM events as e" + " JOIN event_json as ej" + " ON e.event_id = ej.event_id AND e.room_id = ej.room_id" + " WHERE ? < e.stream_ordering AND e.stream_ordering <= ?" + " ORDER BY e.stream_ordering ASC" + " LIMIT ?" + ) + if last_forward_id != current_forward_id: + txn.execute(sql, (last_forward_id, current_forward_id, limit)) + new_forward_events = txn.fetchall() + else: + new_forward_events = [] + + sql = ( + "SELECT -e.stream_ordering, ej.internal_metadata, ej.json" + " FROM events as e" + " JOIN event_json as ej" + " ON e.event_id = ej.event_id AND e.room_id = ej.room_id" + " WHERE ? > e.stream_ordering AND e.stream_ordering >= ?" + " ORDER BY e.stream_ordering DESC" + " LIMIT ?" + ) + if last_backfill_id != current_backfill_id: + txn.execute(sql, (-last_backfill_id, -current_backfill_id, limit)) + new_backfill_events = txn.fetchall() + else: + new_backfill_events = [] + + return (new_forward_events, new_backfill_events) + return self.runInteraction("get_all_new_events", get_all_new_events_txn) diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 3ef91d34db..de15741893 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -115,6 +115,22 @@ class PresenceStore(SQLBaseStore): args ) + def get_all_presence_updates(self, last_id, current_id): + def get_all_presence_updates_txn(txn): + sql = ( + "SELECT stream_id, user_id, state, last_active_ts," + " last_federation_update_ts, last_user_sync_ts, status_msg," + " currently_active" + " FROM presence_stream" + " WHERE ? < stream_id AND stream_id <= ?" + ) + txn.execute(sql, (last_id, current_id)) + return txn.fetchall() + + return self.runInteraction( + "get_all_presence_updates", get_all_presence_updates_txn + ) + @defer.inlineCallbacks def get_presence_for_users(self, user_ids): rows = yield self._simple_select_many_batch( diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index a7343c97f7..6567fa844f 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -390,3 +390,19 @@ class ReceiptsStore(SQLBaseStore): "data": json.dumps(data), } ) + + def get_all_updated_receipts(self, last_id, current_id, limit): + def get_all_updated_receipts_txn(txn): + sql = ( + "SELECT stream_id, room_id, receipt_type, user_id, event_id, data" + " FROM receipts_linearized" + " WHERE ? < stream_id AND stream_id <= ?" + " ORDER BY stream_id ASC" + " LIMIT ?" + ) + txn.execute(sql, (last_id, current_id, limit)) + + return txn.fetchall() + return self.runInteraction( + "get_all_updated_receipts", get_all_updated_receipts_txn + ) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 9551aa9739..b225f508a5 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -58,6 +58,59 @@ class TagsStore(SQLBaseStore): return deferred + @defer.inlineCallbacks + def get_all_updated_tags(self, last_id, current_id, limit): + """Get all the client tags that have changed on the server + Args: + last_id(int): The position to fetch from. + current_id(int): The position to fetch up to. + Returns: + A deferred list of tuples of stream_id int, user_id string, + room_id string, tag string and content string. + """ + def get_all_updated_tags_txn(txn): + sql = ( + "SELECT stream_id, user_id, room_id" + " FROM room_tags_revisions as r" + " WHERE ? < stream_id AND stream_id <= ?" + " ORDER BY stream_id ASC LIMIT ?" + ) + txn.execute(sql, (last_id, current_id, limit)) + return txn.fetchall() + + tag_ids = yield self.runInteraction( + "get_all_updated_tags", get_all_updated_tags_txn + ) + + def get_tag_content(txn, tag_ids): + sql = ( + "SELECT tag, content" + " FROM room_tags" + " WHERE user_id=? AND room_id=?" + ) + results = [] + for stream_id, user_id, room_id in tag_ids: + txn.execute(sql, (user_id, room_id)) + tags = [] + for tag, content in txn.fetchall(): + tags.append(json.dumps(tag) + ":" + content) + tag_json = "{" + ",".join(tags) + "}" + results.append((stream_id, user_id, room_id, tag_json)) + + return results + + batch_size = 50 + results = [] + for i in xrange(0, len(tag_ids), batch_size): + tags = yield self.runInteraction( + "get_all_updated_tag_content", + get_tag_content, + tag_ids[i:i + batch_size], + ) + results.extend(tags) + + defer.returnValue(results) + @defer.inlineCallbacks def get_updated_tags(self, user_id, stream_id): """Get all the tags for the rooms where the tags have changed since the diff --git a/tests/replication/__init__.py b/tests/replication/__init__.py new file mode 100644 index 0000000000..b7df13c9ee --- /dev/null +++ b/tests/replication/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/replication/test_resource.py b/tests/replication/test_resource.py new file mode 100644 index 0000000000..38daaf87e2 --- /dev/null +++ b/tests/replication/test_resource.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.replication.resource import ReplicationResource +from synapse.types import Requester, UserID + +from twisted.internet import defer +from tests import unittest +from tests.utils import setup_test_homeserver +from mock import Mock, NonCallableMock +import json +import contextlib + + +class ReplicationResourceCase(unittest.TestCase): + @defer.inlineCallbacks + def setUp(self): + self.hs = yield setup_test_homeserver( + "red", + http_client=None, + replication_layer=Mock(), + ratelimiter=NonCallableMock(spec_set=[ + "send_message", + ]), + ) + self.user = UserID.from_string("@seeing:red") + + self.hs.get_ratelimiter().send_message.return_value = (True, 0) + + self.resource = ReplicationResource(self.hs) + + @defer.inlineCallbacks + def test_streams(self): + # Passing "-1" returns the current stream positions + code, body = yield self.get(streams="-1") + self.assertEquals(code, 200) + self.assertEquals(body["streams"]["field_names"], ["name", "position"]) + position = body["streams"]["position"] + # Passing the current position returns an empty response after the + # timeout + get = self.get(streams=str(position), timeout="0") + self.hs.clock.advance_time_msec(1) + code, body = yield get + self.assertEquals(code, 200) + self.assertEquals(body, {}) + + @defer.inlineCallbacks + def test_events(self): + get = self.get(events="-1", timeout="0") + yield self.hs.get_handlers().room_creation_handler.create_room( + Requester(self.user, "", False), {} + ) + code, body = yield get + self.assertEquals(code, 200) + self.assertEquals(body["events"]["field_names"], [ + "position", "internal", "json" + ]) + + @defer.inlineCallbacks + def test_presence(self): + get = self.get(presence="-1") + yield self.hs.get_handlers().presence_handler.set_state( + self.user, {"presence": "online"} + ) + code, body = yield get + self.assertEquals(code, 200) + self.assertEquals(body["presence"]["field_names"], [ + "position", "user_id", "state", "last_active_ts", + "last_federation_update_ts", "last_user_sync_ts", + "status_msg", "currently_active", + ]) + + @defer.inlineCallbacks + def test_typing(self): + room_id = yield self.create_room() + get = self.get(typing="-1") + yield self.hs.get_handlers().typing_notification_handler.started_typing( + self.user, self.user, room_id, timeout=2 + ) + code, body = yield get + self.assertEquals(code, 200) + self.assertEquals(body["typing"]["field_names"], [ + "position", "room_id", "typing" + ]) + + @defer.inlineCallbacks + def test_receipts(self): + room_id = yield self.create_room() + event_id = yield self.send_text_message(room_id, "Hello, World") + get = self.get(receipts="-1") + yield self.hs.get_handlers().receipts_handler.received_client_receipt( + room_id, "m.read", self.user.to_string(), event_id + ) + code, body = yield get + self.assertEquals(code, 200) + self.assertEquals(body["receipts"]["field_names"], [ + "position", "room_id", "receipt_type", "user_id", "event_id", "data" + ]) + + def _test_timeout(stream): + """Check that a request for the given stream timesout""" + @defer.inlineCallbacks + def test_timeout(self): + get = self.get(**{stream: "-1", "timeout": "0"}) + self.hs.clock.advance_time_msec(1) + code, body = yield get + self.assertEquals(code, 200) + self.assertEquals(body, {}) + test_timeout.__name__ = "test_timeout_%s" % (stream) + return test_timeout + + test_timeout_events = _test_timeout("events") + test_timeout_presence = _test_timeout("presence") + test_timeout_typing = _test_timeout("typing") + test_timeout_receipts = _test_timeout("receipts") + test_timeout_user_account_data = _test_timeout("user_account_data") + test_timeout_room_account_data = _test_timeout("room_account_data") + test_timeout_tag_account_data = _test_timeout("tag_account_data") + test_timeout_backfill = _test_timeout("backfill") + + @defer.inlineCallbacks + def send_text_message(self, room_id, message): + handler = self.hs.get_handlers().message_handler + event = yield handler.create_and_send_nonmember_event({ + "type": "m.room.message", + "content": {"body": "message", "msgtype": "m.text"}, + "room_id": room_id, + "sender": self.user.to_string(), + }) + defer.returnValue(event.event_id) + + @defer.inlineCallbacks + def create_room(self): + result = yield self.hs.get_handlers().room_creation_handler.create_room( + Requester(self.user, "", False), {} + ) + defer.returnValue(result["room_id"]) + + @defer.inlineCallbacks + def get(self, **params): + request = NonCallableMock(spec_set=[ + "write", "finish", "setResponseCode", "setHeader", "args", + "method", "processing" + ]) + + request.method = "GET" + request.args = {k: [v] for k, v in params.items()} + + @contextlib.contextmanager + def processing(): + yield + request.processing = processing + + yield self.resource._async_render_GET(request) + self.assertTrue(request.finish.called) + + if request.setResponseCode.called: + response_code = request.setResponseCode.call_args[0][0] + else: + response_code = 200 + + response_json = "".join( + call[0][0] for call in request.write.call_args_list + ) + response_body = json.loads(response_json) + + defer.returnValue((response_code, response_body)) diff --git a/tests/utils.py b/tests/utils.py index bf7a31ff9e..dfbee5c23a 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -239,9 +239,10 @@ class MockClock(object): def looping_call(self, function, interval): pass - def cancel_call_later(self, timer): + def cancel_call_later(self, timer, ignore_errs=False): if timer[2]: - raise Exception("Cannot cancel an expired timer") + if not ignore_errs: + raise Exception("Cannot cancel an expired timer") timer[2] = True self.timers = [t for t in self.timers if t != timer] From 8a1d3b86af5a6b23bd45ed137a4eb2b53c274297 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 1 Mar 2016 17:27:22 +0000 Subject: [PATCH 246/349] Handle rejections of invites from local users locally Slightly hacky fix to SYN-642, which avoids the federation codepath when trying to reject invites from local users. --- synapse/handlers/_base.py | 16 +++++++++++++--- synapse/handlers/room.py | 23 ++++++++++++++++++----- 2 files changed, 31 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index bdade98bf7..51bd1ee4de 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -199,8 +199,7 @@ class BaseHandler(object): # events in the room, because we don't know enough about the graph # fragment we received to treat it like a graph, so the above returned # no relevant events. It may have returned some events (if we have - # joined and left the room), but not useful ones, like the invite. So we - # forcibly set our context to the invite we received over federation. + # joined and left the room), but not useful ones, like the invite. if ( not self.is_host_in_room(context.current_state) and builder.type == EventTypes.Member @@ -208,7 +207,18 @@ class BaseHandler(object): prev_member_event = yield self.store.get_room_member( builder.sender, builder.room_id ) - if prev_member_event: + + # if we have the prev_member_event in context, we didn't receive + # the invite over federation. (More likely is that the inviting + # user, and all other local users, have left, making + # is_host_in_room return false). + # + context_events = (e.event_id for e in context.current_state.values()) + + if prev_member_event and not prev_member_event.event_id in context_events: + # The prev_member_event is missing from context, so it must + # have arrived over federation and is an outlier. We forcibly + # set our context to the invite we received over federation builder.prev_events = ( prev_member_event.event_id, prev_member_event.prev_events diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d2de23a6cc..714bff1750 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -521,8 +521,24 @@ class RoomMemberHandler(BaseHandler): action = "remote_join" elif event.membership == Membership.LEAVE: is_host_in_room = self.is_host_in_room(context.current_state) + if not is_host_in_room: - action = "remote_reject" + # perhaps we've been invited + inviter = self.get_inviter(target_user.to_string(), context.current_state) + if not inviter: + raise SynapseError(404, "Not a known room") + + if inviter.domain == self.server_name: + # the inviter was on our server, but has now left. Carry on + # with the normal rejection codepath. + # + # This is a bit of a hack, because the room might still be + # active on other servers. + pass + else: + # send the rejection to the inviter's HS. + remote_room_hosts = [inviter.domain] + action = "remote_reject" federation_handler = self.hs.get_handlers().federation_handler @@ -541,11 +557,8 @@ class RoomMemberHandler(BaseHandler): event.content, ) elif action == "remote_reject": - inviter = self.get_inviter(target_user.to_string(), context.current_state) - if not inviter: - raise SynapseError(404, "No known servers") yield federation_handler.do_remotely_reject_invite( - [inviter.domain], + remote_room_hosts, room_id, event.user_id ) From 05ea111c47efaec7f24cbe4c9bfae972d4b1f87f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 1 Mar 2016 17:45:24 +0000 Subject: [PATCH 247/349] Fix pyflakes warning --- synapse/handlers/_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 51bd1ee4de..c1f40a4ec0 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -215,7 +215,7 @@ class BaseHandler(object): # context_events = (e.event_id for e in context.current_state.values()) - if prev_member_event and not prev_member_event.event_id in context_events: + if prev_member_event and prev_member_event.event_id not in context_events: # The prev_member_event is missing from context, so it must # have arrived over federation and is an outlier. We forcibly # set our context to the invite we received over federation From a1cf9e3bf343c3e5adb8dce7923726aa9b09115e Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 1 Mar 2016 13:35:37 +0000 Subject: [PATCH 248/349] Add a stream for push rule updates --- synapse/storage/__init__.py | 5 +- synapse/storage/_base.py | 25 ++- synapse/storage/push_rule.py | 173 ++++++++++++++---- .../schema/delta/30/push_rule_stream.sql | 38 ++++ synapse/storage/util/id_generators.py | 84 ++++++--- 5 files changed, 251 insertions(+), 74 deletions(-) create mode 100644 synapse/storage/schema/delta/30/push_rule_stream.sql diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index f257721ea3..e2d7b52569 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -45,7 +45,7 @@ from .search import SearchStore from .tags import TagsStore from .account_data import AccountDataStore -from util.id_generators import IdGenerator, StreamIdGenerator +from util.id_generators import IdGenerator, StreamIdGenerator, ChainedIdGenerator from synapse.api.constants import PresenceState from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -122,6 +122,9 @@ class DataStore(RoomMemberStore, RoomStore, self._pushers_id_gen = IdGenerator(db_conn, "pushers", "id") self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id") self._push_rules_enable_id_gen = IdGenerator(db_conn, "push_rules_enable", "id") + self._push_rules_stream_id_gen = ChainedIdGenerator( + self._stream_id_gen, db_conn, "push_rules_stream", "stream_id" + ) events_max = self._stream_id_gen.get_max_token() event_cache_prefill, min_event_val = self._get_cache_dict( diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 2e97ac84a8..7dc67ecd57 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -766,6 +766,19 @@ class SQLBaseStore(object): """Executes a DELETE query on the named table, expecting to delete a single row. + Args: + table : string giving the table name + keyvalues : dict of column names and values to select the row with + """ + return self.runInteraction( + desc, self._simple_delete_one_txn, table, keyvalues + ) + + @staticmethod + def _simple_delete_one_txn(txn, table, keyvalues): + """Executes a DELETE query on the named table, expecting to delete a + single row. + Args: table : string giving the table name keyvalues : dict of column names and values to select the row with @@ -775,13 +788,11 @@ class SQLBaseStore(object): " AND ".join("%s = ?" % (k, ) for k in keyvalues) ) - def func(txn): - txn.execute(sql, keyvalues.values()) - if txn.rowcount == 0: - raise StoreError(404, "No row found") - if txn.rowcount > 1: - raise StoreError(500, "more than one row matched") - return self.runInteraction(desc, func) + txn.execute(sql, keyvalues.values()) + if txn.rowcount == 0: + raise StoreError(404, "No row found") + if txn.rowcount > 1: + raise StoreError(500, "more than one row matched") @staticmethod def _simple_delete_txn(txn, table, keyvalues): diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 56e69495b1..f3ebd49492 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -99,30 +99,31 @@ class PushRuleStore(SQLBaseStore): results.setdefault(row['user_name'], {})[row['rule_id']] = row['enabled'] defer.returnValue(results) + @defer.inlineCallbacks def add_push_rule( self, user_id, rule_id, priority_class, conditions, actions, before=None, after=None ): conditions_json = json.dumps(conditions) actions_json = json.dumps(actions) - - if before or after: - return self.runInteraction( - "_add_push_rule_relative_txn", - self._add_push_rule_relative_txn, - user_id, rule_id, priority_class, - conditions_json, actions_json, before, after, - ) - else: - return self.runInteraction( - "_add_push_rule_highest_priority_txn", - self._add_push_rule_highest_priority_txn, - user_id, rule_id, priority_class, - conditions_json, actions_json, - ) + with self._push_rules_stream_id_gen.get_next() as (stream_id, stream_ordering): + if before or after: + yield self.runInteraction( + "_add_push_rule_relative_txn", + self._add_push_rule_relative_txn, + stream_id, stream_ordering, user_id, rule_id, priority_class, + conditions_json, actions_json, before, after, + ) + else: + yield self.runInteraction( + "_add_push_rule_highest_priority_txn", + self._add_push_rule_highest_priority_txn, + stream_id, stream_ordering, user_id, rule_id, priority_class, + conditions_json, actions_json, + ) def _add_push_rule_relative_txn( - self, txn, user_id, rule_id, priority_class, + self, txn, stream_id, stream_ordering, user_id, rule_id, priority_class, conditions_json, actions_json, before, after ): # Lock the table since otherwise we'll have annoying races between the @@ -174,12 +175,12 @@ class PushRuleStore(SQLBaseStore): txn.execute(sql, (user_id, priority_class, new_rule_priority)) self._upsert_push_rule_txn( - txn, user_id, rule_id, priority_class, new_rule_priority, - conditions_json, actions_json, + txn, stream_id, stream_ordering, user_id, rule_id, priority_class, + new_rule_priority, conditions_json, actions_json, ) def _add_push_rule_highest_priority_txn( - self, txn, user_id, rule_id, priority_class, + self, txn, stream_id, stream_ordering, user_id, rule_id, priority_class, conditions_json, actions_json ): # Lock the table since otherwise we'll have annoying races between the @@ -201,13 +202,13 @@ class PushRuleStore(SQLBaseStore): self._upsert_push_rule_txn( txn, - user_id, rule_id, priority_class, new_prio, + stream_id, stream_ordering, user_id, rule_id, priority_class, new_prio, conditions_json, actions_json, ) def _upsert_push_rule_txn( - self, txn, user_id, rule_id, priority_class, - priority, conditions_json, actions_json + self, txn, stream_id, stream_ordering, user_id, rule_id, priority_class, + priority, conditions_json, actions_json, update_stream=True ): """Specialised version of _simple_upsert_txn that picks a push_rule_id using the _push_rule_id_gen if it needs to insert the rule. It assumes @@ -242,6 +243,23 @@ class PushRuleStore(SQLBaseStore): }, ) + if update_stream: + self._simple_insert_txn( + txn, + table="push_rules_stream", + values={ + "stream_id": stream_id, + "stream_ordering": stream_ordering, + "user_id": user_id, + "rule_id": rule_id, + "op": "ADD", + "priority_class": priority_class, + "priority": priority, + "conditions": conditions_json, + "actions": actions_json, + } + ) + txn.call_after( self.get_push_rules_for_user.invalidate, (user_id,) ) @@ -260,25 +278,47 @@ class PushRuleStore(SQLBaseStore): user_id (str): The matrix ID of the push rule owner rule_id (str): The rule_id of the rule to be deleted """ - yield self._simple_delete_one( - "push_rules", - {'user_name': user_id, 'rule_id': rule_id}, - desc="delete_push_rule", - ) + def delete_push_rule_txn(txn, stream_id, stream_ordering): + self._simple_delete_one_txn( + txn, + "push_rules", + {'user_name': user_id, 'rule_id': rule_id}, + ) + self._simple_insert_txn( + txn, + table="push_rules_stream", + values={ + "stream_id": stream_id, + "stream_ordering": stream_ordering, + "user_id": user_id, + "rule_id": rule_id, + "op": "DELETE", + } + ) + txn.call_after( + self.get_push_rules_for_user.invalidate, (user_id,) + ) + txn.call_after( + self.get_push_rules_enabled_for_user.invalidate, (user_id,) + ) - self.get_push_rules_for_user.invalidate((user_id,)) - self.get_push_rules_enabled_for_user.invalidate((user_id,)) + with self._push_rules_stream_id_gen.get_next() as (stream_id, stream_ordering): + yield self.runInteraction( + "delete_push_rule", delete_push_rule_txn, stream_id, stream_ordering + ) @defer.inlineCallbacks def set_push_rule_enabled(self, user_id, rule_id, enabled): - ret = yield self.runInteraction( - "_set_push_rule_enabled_txn", - self._set_push_rule_enabled_txn, - user_id, rule_id, enabled - ) - defer.returnValue(ret) + with self._push_rules_stream_id_gen.get_next() as (stream_id, stream_ordering): + yield self.runInteraction( + "_set_push_rule_enabled_txn", + self._set_push_rule_enabled_txn, + stream_id, stream_ordering, user_id, rule_id, enabled + ) - def _set_push_rule_enabled_txn(self, txn, user_id, rule_id, enabled): + def _set_push_rule_enabled_txn( + self, txn, stream_id, stream_ordering, user_id, rule_id, enabled + ): new_id = self._push_rules_enable_id_gen.get_next() self._simple_upsert_txn( txn, @@ -287,6 +327,19 @@ class PushRuleStore(SQLBaseStore): {'enabled': 1 if enabled else 0}, {'id': new_id}, ) + + self._simple_insert_txn( + txn, + "push_rules_stream", + values={ + "stream_id": stream_id, + "stream_ordering": stream_ordering, + "user_id": user_id, + "rule_id": rule_id, + "op": "ENABLE" if enabled else "DISABLE", + } + ) + txn.call_after( self.get_push_rules_for_user.invalidate, (user_id,) ) @@ -294,18 +347,20 @@ class PushRuleStore(SQLBaseStore): self.get_push_rules_enabled_for_user.invalidate, (user_id,) ) + @defer.inlineCallbacks def set_push_rule_actions(self, user_id, rule_id, actions, is_default_rule): actions_json = json.dumps(actions) - def set_push_rule_actions_txn(txn): + def set_push_rule_actions_txn(txn, stream_id, stream_ordering): if is_default_rule: # Add a dummy rule to the rules table with the user specified # actions. priority_class = -1 priority = 1 self._upsert_push_rule_txn( - txn, user_id, rule_id, priority_class, priority, - "[]", actions_json + txn, stream_id, stream_ordering, user_id, rule_id, + priority_class, priority, "[]", actions_json, + update_stream=False ) else: self._simple_update_one_txn( @@ -315,8 +370,46 @@ class PushRuleStore(SQLBaseStore): {'actions': actions_json}, ) + self._simple_insert_txn( + txn, + "push_rules_stream", + values={ + "stream_id": stream_id, + "stream_ordering": stream_ordering, + "user_id": user_id, + "rule_id": rule_id, + "op": "ACTIONS", + "actions": actions_json, + } + ) + + txn.call_after( + self.get_push_rules_for_user.invalidate, (user_id,) + ) + txn.call_after( + self.get_push_rules_enabled_for_user.invalidate, (user_id,) + ) + + with self._push_rules_stream_id_gen.get_next() as (stream_id, stream_ordering): + yield self.runInteraction( + "set_push_rule_actions", set_push_rule_actions_txn, + stream_id, stream_ordering + ) + + def get_all_push_rule_updates(self, last_id, current_id, limit): + """Get all the push rules changes that have happend on the server""" + def get_all_push_rule_updates_txn(txn): + sql = ( + "SELECT stream_id, stream_ordering, user_id, rule_id," + " op, priority_class, priority, conditions, actions" + " FROM push_rules_stream" + " WHERE ? < stream_id and stream_id <= ?" + " ORDER BY stream_id ASC LIMIT ?" + ) + txn.execute(sql, (last_id, current_id, limit)) + return txn.fetchall() return self.runInteraction( - "set_push_rule_actions", set_push_rule_actions_txn, + "get_all_push_rule_updates", get_all_push_rule_updates_txn ) diff --git a/synapse/storage/schema/delta/30/push_rule_stream.sql b/synapse/storage/schema/delta/30/push_rule_stream.sql new file mode 100644 index 0000000000..e8418bb35f --- /dev/null +++ b/synapse/storage/schema/delta/30/push_rule_stream.sql @@ -0,0 +1,38 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +CREATE TABLE push_rules_stream( + stream_id BIGINT NOT NULL, + stream_ordering BIGINT NOT NULL, + user_id TEXT NOT NULL, + rule_id TEXT NOT NULL, + op TEXT NOT NULL, -- One of "ENABLE", "DISABLE", "ACTIONS", "ADD", "DELETE" + priority_class SMALLINT, + priority INTEGER, + conditions TEXT, + actions TEXT +); + +-- The extra data for each operation is: +-- * ENABLE, DISABLE, DELETE: [] +-- * ACTIONS: ["actions"] +-- * ADD: ["priority_class", "priority", "actions", "conditions"] + +-- Index for replication queries. +CREATE INDEX push_rules_stream_id ON push_rules_stream(stream_id); +-- Index for /sync queries. +CREATE INDEX push_rules_stream_user_stream_id on push_rules_stream(user_id, stream_id); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index efe3f68e6e..af425ba9a4 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -20,23 +20,21 @@ import threading class IdGenerator(object): def __init__(self, db_conn, table, column): - self.table = table - self.column = column self._lock = threading.Lock() - cur = db_conn.cursor() - self._next_id = self._load_next_id(cur) - cur.close() - - def _load_next_id(self, txn): - txn.execute("SELECT MAX(%s) FROM %s" % (self.column, self.table,)) - val, = txn.fetchone() - return val + 1 if val else 1 + self._next_id = _load_max_id(db_conn, table, column) def get_next(self): with self._lock: - i = self._next_id self._next_id += 1 - return i + return self._next_id + + +def _load_max_id(db_conn, table, column): + cur = db_conn.cursor() + cur.execute("SELECT MAX(%s) FROM %s" % (column, table,)) + val, = cur.fetchone() + cur.close() + return val if val else 1 class StreamIdGenerator(object): @@ -52,23 +50,10 @@ class StreamIdGenerator(object): # ... persist event ... """ def __init__(self, db_conn, table, column): - self.table = table - self.column = column - self._lock = threading.Lock() - - cur = db_conn.cursor() - self._current_max = self._load_current_max(cur) - cur.close() - + self._current_max = _load_max_id(db_conn, table, column) self._unfinished_ids = deque() - def _load_current_max(self, txn): - txn.execute("SELECT MAX(%s) FROM %s" % (self.column, self.table)) - rows = txn.fetchall() - val, = rows[0] - return int(val) if val else 1 - def get_next(self): """ Usage: @@ -124,3 +109,50 @@ class StreamIdGenerator(object): return self._unfinished_ids[0] - 1 return self._current_max + + +class ChainedIdGenerator(object): + """Used to generate new stream ids where the stream must be kept in sync + with another stream. It generates pairs of IDs, the first element is an + integer ID for this stream, the second element is the ID for the stream + that this stream needs to be kept in sync with.""" + + def __init__(self, chained_generator, db_conn, table, column): + self.chained_generator = chained_generator + self._lock = threading.Lock() + self._current_max = _load_max_id(db_conn, table, column) + self._unfinished_ids = deque() + + def get_next(self): + """ + Usage: + with stream_id_gen.get_next() as (stream_id, chained_id): + # ... persist event ... + """ + with self._lock: + self._current_max += 1 + next_id = self._current_max + chained_id = self.chained_generator.get_max_token() + + self._unfinished_ids.append((next_id, chained_id)) + + @contextlib.contextmanager + def manager(): + try: + yield (next_id, chained_id) + finally: + with self._lock: + self._unfinished_ids.remove((next_id, chained_id)) + + return manager() + + def get_max_token(self): + """Returns the maximum stream id such that all stream ids less than or + equal to it have been successfully persisted. + """ + with self._lock: + if self._unfinished_ids: + stream_id, chained_id = self._unfinished_ids[0] + return (stream_id - 1, chained_id) + + return (self._current_max, self.chained_generator.get_max_token()) From dda2058d90a3ad96f2d9f8fb36fc57f8eec55680 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Mar 2016 14:03:10 +0000 Subject: [PATCH 249/349] Add SYNAPSE_CACHE_FACTOR to README --- README.rst | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 39a338c790..8a745259bf 100644 --- a/README.rst +++ b/README.rst @@ -565,4 +565,21 @@ sphinxcontrib-napoleon:: Building internal API documentation:: python setup.py build_sphinx - \ No newline at end of file + + + +Halp!! Synapse eats all my RAM! +=============================== + +Synapse's architecture is quite RAM hungry currently - we deliberately +cache a lot of recent room data and metadata in RAM in order to speed up +common requests. We'll improve this in future, but for now the easiest +way to either reduce the RAM usage (at the risk of slowing things down) +is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment +variable. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1.0 will max out +at around 3-4GB of resident memory - this is what we currently run the +matrix.org on. The default setting is currently 0.1, which is probably +around a ~700MB footprint. You can dial it down further to 0.02 if +desired, which targets roughly ~512MB. Conversely you can dial it up if +you need performance for lots of users and have a box with a lot of RAM. + From 27185de75255191dcc9bf30b1bde9b6fc9a27100 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Mar 2016 14:30:50 +0000 Subject: [PATCH 250/349] Set SYNAPSE_CACHE_FACTOR=1 in jenkins --- jenkins.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/jenkins.sh b/jenkins.sh index 359fd22771..9646ac0b01 100755 --- a/jenkins.sh +++ b/jenkins.sh @@ -3,6 +3,7 @@ : ${WORKSPACE:="$(pwd)"} export PYTHONDONTWRITEBYTECODE=yep +export SYNAPSE_CACHE_FACTOR=1 # Output test results as junit xml export TRIAL_FLAGS="--reporter=subunit" From 9ff940a0ef22304732f31fb3d2256afc2aca6bc4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 Mar 2016 15:40:30 +0000 Subject: [PATCH 251/349] Address review comments --- synapse/handlers/_base.py | 20 ++++++++++++++------ synapse/handlers/room.py | 5 +++-- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index c1f40a4ec0..363b10e3b7 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -208,14 +208,22 @@ class BaseHandler(object): builder.sender, builder.room_id ) - # if we have the prev_member_event in context, we didn't receive - # the invite over federation. (More likely is that the inviting - # user, and all other local users, have left, making - # is_host_in_room return false). + # The prev_member_event may already be in context.current_state, + # despite us not being present in the room; in particular, if + # inviting user, and all other local users, have already left. # - context_events = (e.event_id for e in context.current_state.values()) + # In that case, we have all the information we need, and we don't + # want to drop "context" - not least because we may need to handle + # the invite locally, which will require us to have the whole + # context (not just prev_member_event) to auth it. + # + context_event_ids = ( + e.event_id for e in context.current_state.values() + ) - if prev_member_event and prev_member_event.event_id not in context_events: + if (prev_member_event and + prev_member_event.event_id not in context_event_ids + ): # The prev_member_event is missing from context, so it must # have arrived over federation and is an outlier. We forcibly # set our context to the invite we received over federation diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 714bff1750..de72398588 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -528,7 +528,7 @@ class RoomMemberHandler(BaseHandler): if not inviter: raise SynapseError(404, "Not a known room") - if inviter.domain == self.server_name: + if self.hs.is_mine(inviter): # the inviter was on our server, but has now left. Carry on # with the normal rejection codepath. # @@ -537,7 +537,8 @@ class RoomMemberHandler(BaseHandler): pass else: # send the rejection to the inviter's HS. - remote_room_hosts = [inviter.domain] + remote_room_hosts = remote_room_hosts or [] + remote_room_hosts.append(inviter.domain) action = "remote_reject" federation_handler = self.hs.get_handlers().federation_handler From 863d3f26b326b1129cbf7edfb8c5344f3e25a849 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 Mar 2016 15:52:50 +0000 Subject: [PATCH 252/349] fix pyflakes quibble --- synapse/handlers/_base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 363b10e3b7..a6f890e0b6 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -221,7 +221,8 @@ class BaseHandler(object): e.event_id for e in context.current_state.values() ) - if (prev_member_event and + if ( + prev_member_event and prev_member_event.event_id not in context_event_ids ): # The prev_member_event is missing from context, so it must From 47c361d2f88879d5e4b042a49282c93d7e38f9fb Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 2 Mar 2016 15:57:54 +0000 Subject: [PATCH 253/349] add 800x600 thumbnails to make vector look prettier (and anyone else who likes big thumbnails) --- synapse/config/repository.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 2fcf872449..2e96c09013 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -97,4 +97,7 @@ class ContentRepositoryConfig(Config): - width: 640 height: 480 method: scale + - width: 800 + height: 600 + method: scale """ % locals() From fc1f932cc04ed6675fb839b58d81d5ac6c23b4c5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 2 Mar 2016 16:44:14 +0000 Subject: [PATCH 254/349] Move arg default to the start of the function Also don't overwrite the list that gets passed in. --- synapse/handlers/room.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index de72398588..9ef3219e39 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -493,6 +493,8 @@ class RoomMemberHandler(BaseHandler): Raises: SynapseError if there was a problem changing the membership. """ + remote_room_hosts = remote_room_hosts or [] + target_user = UserID.from_string(event.state_key) room_id = event.room_id @@ -537,8 +539,7 @@ class RoomMemberHandler(BaseHandler): pass else: # send the rejection to the inviter's HS. - remote_room_hosts = remote_room_hosts or [] - remote_room_hosts.append(inviter.domain) + remote_room_hosts = remote_room_hosts + [inviter.doman] action = "remote_reject" federation_handler = self.hs.get_handlers().federation_handler From 2223204ebaf7624f4d640f2c56d3a4eb7ff6d98e Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 2 Mar 2016 17:26:20 +0000 Subject: [PATCH 255/349] Hook push rules up to the replication API --- synapse/replication/resource.py | 28 ++++++++++++++++++++++++++-- synapse/storage/push_rule.py | 6 ++++++ tests/replication/test_resource.py | 6 ++++-- 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/synapse/replication/resource.py b/synapse/replication/resource.py index e0d039518d..15b7898a45 100644 --- a/synapse/replication/resource.py +++ b/synapse/replication/resource.py @@ -36,6 +36,7 @@ STREAM_NAMES = ( ("receipts",), ("user_account_data", "room_account_data", "tag_account_data",), ("backfill",), + ("push_rules",), ) @@ -63,6 +64,7 @@ class ReplicationResource(Resource): * "room_account_data: Per room per user account data. * "tag_account_data": Per room per user tags. * "backfill": Old events that have been backfilled from other servers. + * "push_rules": Per user changes to push rules. The API takes two additional query parameters: @@ -117,14 +119,16 @@ class ReplicationResource(Resource): def current_replication_token(self): stream_token = yield self.sources.get_current_token() backfill_token = yield self.store.get_current_backfill_token() + push_rules_token, room_stream_token = self.store.get_push_rules_stream_token() defer.returnValue(_ReplicationToken( - stream_token.room_stream_id, + room_stream_token, int(stream_token.presence_key), int(stream_token.typing_key), int(stream_token.receipt_key), int(stream_token.account_data_key), backfill_token, + push_rules_token, )) @request_handler @@ -146,6 +150,7 @@ class ReplicationResource(Resource): yield self.presence(writer, current_token) # TODO: implement limit yield self.typing(writer, current_token) # TODO: implement limit yield self.receipts(writer, current_token, limit) + yield self.push_rules(writer, current_token, limit) self.streams(writer, current_token) logger.info("Replicated %d rows", writer.total) @@ -277,6 +282,21 @@ class ReplicationResource(Resource): "position", "user_id", "room_id", "tags" )) + @defer.inlineCallbacks + def push_rules(self, writer, current_token, limit): + current_position = current_token.push_rules + + push_rules = parse_integer(writer.request, "push_rules") + + if push_rules is not None: + rows = yield self.store.get_all_push_rule_updates( + push_rules, current_position, limit + ) + writer.write_header_and_rows("push_rules", rows, ( + "position", "stream_ordering", "user_id", "rule_id", "op", + "priority_class", "priority", "conditions", "actions" + )) + class _Writer(object): """Writes the streams as a JSON object as the response to the request""" @@ -307,12 +327,16 @@ class _Writer(object): class _ReplicationToken(collections.namedtuple("_ReplicationToken", ( "events", "presence", "typing", "receipts", "account_data", "backfill", + "push_rules" ))): __slots__ = [] def __new__(cls, *args): if len(args) == 1: - return cls(*(int(value) for value in args[0].split("_"))) + streams = [int(value) for value in args[0].split("_")] + if len(streams) < len(cls._fields): + streams.extend([0] * (len(cls._fields) - len(streams))) + return cls(*streams) else: return super(_ReplicationToken, cls).__new__(cls, *args) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index f3ebd49492..e034024108 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -412,6 +412,12 @@ class PushRuleStore(SQLBaseStore): "get_all_push_rule_updates", get_all_push_rule_updates_txn ) + def get_push_rules_stream_token(self): + """Get the position of the push rules stream. + Returns a pair of a stream id for the push_rules stream and the + room stream ordering it corresponds to.""" + return self._push_rules_stream_id_gen.get_max_token() + class RuleNotFoundException(Exception): pass diff --git a/tests/replication/test_resource.py b/tests/replication/test_resource.py index 38daaf87e2..a30d59a865 100644 --- a/tests/replication/test_resource.py +++ b/tests/replication/test_resource.py @@ -35,7 +35,8 @@ class ReplicationResourceCase(unittest.TestCase): "send_message", ]), ) - self.user = UserID.from_string("@seeing:red") + self.user_id = "@seeing:red" + self.user = UserID.from_string(self.user_id) self.hs.get_ratelimiter().send_message.return_value = (True, 0) @@ -101,7 +102,7 @@ class ReplicationResourceCase(unittest.TestCase): event_id = yield self.send_text_message(room_id, "Hello, World") get = self.get(receipts="-1") yield self.hs.get_handlers().receipts_handler.received_client_receipt( - room_id, "m.read", self.user.to_string(), event_id + room_id, "m.read", self.user_id, event_id ) code, body = yield get self.assertEquals(code, 200) @@ -129,6 +130,7 @@ class ReplicationResourceCase(unittest.TestCase): test_timeout_room_account_data = _test_timeout("room_account_data") test_timeout_tag_account_data = _test_timeout("tag_account_data") test_timeout_backfill = _test_timeout("backfill") + test_timeout_push_rules = _test_timeout("push_rules") @defer.inlineCallbacks def send_text_message(self, room_id, message): From ff8b87118dcfb153d972e29c2b77b195244d5ddc Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 2 Mar 2016 18:06:45 +0000 Subject: [PATCH 256/349] Stop using checkpw as it seems to have vanished from bcrypt. Use `bcrypt.hashpw(password, hashed) == hashed` as per the bcrypt README. --- synapse/handlers/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 62e82a2570..7a4afe446d 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -477,4 +477,4 @@ class AuthHandler(BaseHandler): Returns: Whether self.hash(password) == stored_hash (bool). """ - return bcrypt.checkpw(password, stored_hash) + return bcrypt.hashpw(password, stored_hash) == stored_hash From 74cd80e53026d9f603e9e4f9a41cef54de239e16 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 3 Mar 2016 10:28:58 +0000 Subject: [PATCH 257/349] Fix typo --- synapse/handlers/room.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 9ef3219e39..ad7c83f477 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -539,7 +539,7 @@ class RoomMemberHandler(BaseHandler): pass else: # send the rejection to the inviter's HS. - remote_room_hosts = remote_room_hosts + [inviter.doman] + remote_room_hosts = remote_room_hosts + [inviter.domain] action = "remote_reject" federation_handler = self.hs.get_handlers().federation_handler From b139e510412430cb7decd58a098871f7ded9ef0c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 3 Mar 2016 11:38:36 +0000 Subject: [PATCH 258/349] jenkins.sh: set -x Also move the options from the shebang line to the body of the script, so that they take effect even if somebody explicitly runs "bash jenkins.sh" --- jenkins.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/jenkins.sh b/jenkins.sh index 9646ac0b01..b826d510c9 100755 --- a/jenkins.sh +++ b/jenkins.sh @@ -1,4 +1,6 @@ -#!/bin/bash -eu +#!/bin/bash + +set -eux : ${WORKSPACE:="$(pwd)"} From ddf9e7b3027eee61086ebfb447c5fa33e9b640fe Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 3 Mar 2016 14:57:45 +0000 Subject: [PATCH 259/349] Hook up the push rules to the notifier --- synapse/handlers/message.py | 4 +-- synapse/notifier.py | 2 +- synapse/rest/client/v1/push_rule.py | 44 +++++++++++++++++++---------- synapse/streams/events.py | 4 +++ synapse/types.py | 7 +++++ 5 files changed, 43 insertions(+), 18 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index afa7c9c36c..2fa12c8f2b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -647,8 +647,8 @@ class MessageHandler(BaseHandler): user_id, messages, is_peeking=is_peeking ) - start_token = StreamToken(token[0], 0, 0, 0, 0) - end_token = StreamToken(token[1], 0, 0, 0, 0) + start_token = StreamToken.START.copy_and_replace("room_key", token[0]) + end_token = StreamToken.START.copy_and_replace("room_key", token[1]) time_now = self.clock.time_msec() diff --git a/synapse/notifier.py b/synapse/notifier.py index 3c36a20868..9b69b0333a 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -284,7 +284,7 @@ class Notifier(object): @defer.inlineCallbacks def wait_for_events(self, user_id, timeout, callback, room_ids=None, - from_token=StreamToken("s0", "0", "0", "0", "0")): + from_token=StreamToken.START): """Wait until the callback returns a non empty response or the timeout fires. """ diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 970a019223..cf68725ca1 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -36,6 +36,11 @@ class PushRuleRestServlet(ClientV1RestServlet): SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = ( "Unrecognised request: You probably wanted a trailing slash") + def __init__(self, hs): + super(PushRuleRestServlet, self).__init__(hs) + self.store = hs.get_datastore() + self.notifier = hs.get_notifier() + @defer.inlineCallbacks def on_PUT(self, request): spec = _rule_spec_from_path(request.postpath) @@ -51,8 +56,11 @@ class PushRuleRestServlet(ClientV1RestServlet): content = _parse_json(request) + user_id = requester.user.to_string() + if 'attr' in spec: - yield self.set_rule_attr(requester.user.to_string(), spec, content) + yield self.set_rule_attr(user_id, spec, content) + self.notify_user(user_id) defer.returnValue((200, {})) if spec['rule_id'].startswith('.'): @@ -77,8 +85,8 @@ class PushRuleRestServlet(ClientV1RestServlet): after = _namespaced_rule_id(spec, after[0]) try: - yield self.hs.get_datastore().add_push_rule( - user_id=requester.user.to_string(), + yield self.store.add_push_rule( + user_id=user_id, rule_id=_namespaced_rule_id_from_spec(spec), priority_class=priority_class, conditions=conditions, @@ -86,6 +94,7 @@ class PushRuleRestServlet(ClientV1RestServlet): before=before, after=after ) + self.notify_user(user_id) except InconsistentRuleException as e: raise SynapseError(400, e.message) except RuleNotFoundException as e: @@ -98,13 +107,15 @@ class PushRuleRestServlet(ClientV1RestServlet): spec = _rule_spec_from_path(request.postpath) requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() namespaced_rule_id = _namespaced_rule_id_from_spec(spec) try: - yield self.hs.get_datastore().delete_push_rule( - requester.user.to_string(), namespaced_rule_id + yield self.store.delete_push_rule( + user_id, namespaced_rule_id ) + self.notify_user(user_id) defer.returnValue((200, {})) except StoreError as e: if e.code == 404: @@ -115,14 +126,12 @@ class PushRuleRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) - user = requester.user + user_id = requester.user.to_string() # we build up the full structure and then decide which bits of it # to send which means doing unnecessary work sometimes but is # is probably not going to make a whole lot of difference - rawrules = yield self.hs.get_datastore().get_push_rules_for_user( - user.to_string() - ) + rawrules = yield self.store.get_push_rules_for_user(user_id) ruleslist = [] for rawrule in rawrules: @@ -138,8 +147,7 @@ class PushRuleRestServlet(ClientV1RestServlet): rules['global'] = _add_empty_priority_class_arrays(rules['global']) - enabled_map = yield self.hs.get_datastore().\ - get_push_rules_enabled_for_user(user.to_string()) + enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id) for r in ruleslist: rulearray = None @@ -152,9 +160,9 @@ class PushRuleRestServlet(ClientV1RestServlet): pattern_type = c.pop("pattern_type", None) if pattern_type == "user_id": - c["pattern"] = user.to_string() + c["pattern"] = user_id elif pattern_type == "user_localpart": - c["pattern"] = user.localpart + c["pattern"] = requester.user.localpart rulearray = rules['global'][template_name] @@ -188,6 +196,12 @@ class PushRuleRestServlet(ClientV1RestServlet): def on_OPTIONS(self, _): return 200, {} + def notify_user(self, user_id): + stream_id = self.store.get_push_rules_stream_token() + self.notifier.on_new_event( + "push_rules_key", stream_id, users=[user_id] + ) + def set_rule_attr(self, user_id, spec, val): if spec['attr'] == 'enabled': if isinstance(val, dict) and "enabled" in val: @@ -198,7 +212,7 @@ class PushRuleRestServlet(ClientV1RestServlet): # bools directly, so let's not break them. raise SynapseError(400, "Value for 'enabled' must be boolean") namespaced_rule_id = _namespaced_rule_id_from_spec(spec) - return self.hs.get_datastore().set_push_rule_enabled( + return self.store.set_push_rule_enabled( user_id, namespaced_rule_id, val ) elif spec['attr'] == 'actions': @@ -210,7 +224,7 @@ class PushRuleRestServlet(ClientV1RestServlet): if is_default_rule: if namespaced_rule_id not in BASE_RULE_IDS: raise SynapseError(404, "Unknown rule %r" % (namespaced_rule_id,)) - return self.hs.get_datastore().set_push_rule_actions( + return self.store.set_push_rule_actions( user_id, namespaced_rule_id, actions, is_default_rule ) else: diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 5ddf4e988b..d4c0bb6732 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -38,9 +38,12 @@ class EventSources(object): name: cls(hs) for name, cls in EventSources.SOURCE_TYPES.items() } + self.store = hs.get_datastore() @defer.inlineCallbacks def get_current_token(self, direction='f'): + push_rules_key, _ = self.store.get_push_rules_stream_token() + token = StreamToken( room_key=( yield self.sources["room"].get_current_key(direction) @@ -57,5 +60,6 @@ class EventSources(object): account_data_key=( yield self.sources["account_data"].get_current_key() ), + push_rules_key=push_rules_key, ) defer.returnValue(token) diff --git a/synapse/types.py b/synapse/types.py index d5bd95cbd3..5b166835bd 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -115,6 +115,7 @@ class StreamToken( "typing_key", "receipt_key", "account_data_key", + "push_rules_key", )) ): _SEPARATOR = "_" @@ -150,6 +151,7 @@ class StreamToken( or (int(other.typing_key) < int(self.typing_key)) or (int(other.receipt_key) < int(self.receipt_key)) or (int(other.account_data_key) < int(self.account_data_key)) + or (int(other.push_rules_key) < int(self.push_rules_key)) ) def copy_and_advance(self, key, new_value): @@ -174,6 +176,11 @@ class StreamToken( return StreamToken(**d) +StreamToken.START = StreamToken( + *(["s0"] + ["0"] * (len(StreamToken._fields) - 1)) +) + + class RoomStreamToken(namedtuple("_StreamToken", "topological stream")): """Tokens are positions between events. The token "s1" comes after event 1. From 3406eba4ef40de888ebb5b22c0ea4925b2dddeb1 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 3 Mar 2016 16:11:59 +0000 Subject: [PATCH 260/349] Move the code for formatting push rules into a separate function --- synapse/push/clientformat.py | 112 ++++++++++++++++++++++++++++ synapse/rest/client/v1/push_rule.py | 90 +--------------------- 2 files changed, 116 insertions(+), 86 deletions(-) create mode 100644 synapse/push/clientformat.py diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py new file mode 100644 index 0000000000..ae9db9ec2f --- /dev/null +++ b/synapse/push/clientformat.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.push.baserules import list_with_base_rules + +from synapse.push.rulekinds import ( + PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP +) + +import copy +import simplejson as json + + +def format_push_rules_for_user(user, rawrules, enabled_map): + """Converts a list of rawrules and a enabled map into nested dictionaries + to match the Matrix client-server format for push rules""" + + ruleslist = [] + for rawrule in rawrules: + rule = dict(rawrule) + rule["conditions"] = json.loads(rawrule["conditions"]) + rule["actions"] = json.loads(rawrule["actions"]) + ruleslist.append(rule) + + # We're going to be mutating this a lot, so do a deep copy + ruleslist = copy.deepcopy(list_with_base_rules(ruleslist)) + + rules = {'global': {}, 'device': {}} + + rules['global'] = _add_empty_priority_class_arrays(rules['global']) + + for r in ruleslist: + rulearray = None + + template_name = _priority_class_to_template_name(r['priority_class']) + + # Remove internal stuff. + for c in r["conditions"]: + c.pop("_id", None) + + pattern_type = c.pop("pattern_type", None) + if pattern_type == "user_id": + c["pattern"] = user.to_string() + elif pattern_type == "user_localpart": + c["pattern"] = user.localpart + + rulearray = rules['global'][template_name] + + template_rule = _rule_to_template(r) + if template_rule: + if r['rule_id'] in enabled_map: + template_rule['enabled'] = enabled_map[r['rule_id']] + elif 'enabled' in r: + template_rule['enabled'] = r['enabled'] + else: + template_rule['enabled'] = True + rulearray.append(template_rule) + + return rules + + +def _add_empty_priority_class_arrays(d): + for pc in PRIORITY_CLASS_MAP.keys(): + d[pc] = [] + return d + + +def _rule_to_template(rule): + unscoped_rule_id = None + if 'rule_id' in rule: + unscoped_rule_id = _rule_id_from_namespaced(rule['rule_id']) + + template_name = _priority_class_to_template_name(rule['priority_class']) + if template_name in ['override', 'underride']: + templaterule = {k: rule[k] for k in ["conditions", "actions"]} + elif template_name in ["sender", "room"]: + templaterule = {'actions': rule['actions']} + unscoped_rule_id = rule['conditions'][0]['pattern'] + elif template_name == 'content': + if len(rule["conditions"]) != 1: + return None + thecond = rule["conditions"][0] + if "pattern" not in thecond: + return None + templaterule = {'actions': rule['actions']} + templaterule["pattern"] = thecond["pattern"] + + if unscoped_rule_id: + templaterule['rule_id'] = unscoped_rule_id + if 'default' in rule: + templaterule['default'] = rule['default'] + return templaterule + + +def _rule_id_from_namespaced(in_rule_id): + return in_rule_id.split('/')[-1] + + +def _priority_class_to_template_name(pc): + return PRIORITY_CLASS_INVERSE_MAP[pc] diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index cf68725ca1..edfe28c79b 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -22,12 +22,10 @@ from .base import ClientV1RestServlet, client_path_patterns from synapse.storage.push_rule import ( InconsistentRuleException, RuleNotFoundException ) -from synapse.push.baserules import list_with_base_rules, BASE_RULE_IDS -from synapse.push.rulekinds import ( - PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP -) +from synapse.push.clientformat import format_push_rules_for_user +from synapse.push.baserules import BASE_RULE_IDS +from synapse.push.rulekinds import PRIORITY_CLASS_MAP -import copy import simplejson as json @@ -133,48 +131,9 @@ class PushRuleRestServlet(ClientV1RestServlet): # is probably not going to make a whole lot of difference rawrules = yield self.store.get_push_rules_for_user(user_id) - ruleslist = [] - for rawrule in rawrules: - rule = dict(rawrule) - rule["conditions"] = json.loads(rawrule["conditions"]) - rule["actions"] = json.loads(rawrule["actions"]) - ruleslist.append(rule) - - # We're going to be mutating this a lot, so do a deep copy - ruleslist = copy.deepcopy(list_with_base_rules(ruleslist)) - - rules = {'global': {}, 'device': {}} - - rules['global'] = _add_empty_priority_class_arrays(rules['global']) - enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id) - for r in ruleslist: - rulearray = None - - template_name = _priority_class_to_template_name(r['priority_class']) - - # Remove internal stuff. - for c in r["conditions"]: - c.pop("_id", None) - - pattern_type = c.pop("pattern_type", None) - if pattern_type == "user_id": - c["pattern"] = user_id - elif pattern_type == "user_localpart": - c["pattern"] = requester.user.localpart - - rulearray = rules['global'][template_name] - - template_rule = _rule_to_template(r) - if template_rule: - if r['rule_id'] in enabled_map: - template_rule['enabled'] = enabled_map[r['rule_id']] - elif 'enabled' in r: - template_rule['enabled'] = r['enabled'] - else: - template_rule['enabled'] = True - rulearray.append(template_rule) + rules = format_push_rules_for_user(requester.user, rawrules, enabled_map) path = request.postpath[1:] @@ -322,12 +281,6 @@ def _check_actions(actions): raise InvalidRuleException("Unrecognised action") -def _add_empty_priority_class_arrays(d): - for pc in PRIORITY_CLASS_MAP.keys(): - d[pc] = [] - return d - - def _filter_ruleset_with_path(ruleset, path): if path == []: raise UnrecognizedRequestError( @@ -376,37 +329,6 @@ def _priority_class_from_spec(spec): return pc -def _priority_class_to_template_name(pc): - return PRIORITY_CLASS_INVERSE_MAP[pc] - - -def _rule_to_template(rule): - unscoped_rule_id = None - if 'rule_id' in rule: - unscoped_rule_id = _rule_id_from_namespaced(rule['rule_id']) - - template_name = _priority_class_to_template_name(rule['priority_class']) - if template_name in ['override', 'underride']: - templaterule = {k: rule[k] for k in ["conditions", "actions"]} - elif template_name in ["sender", "room"]: - templaterule = {'actions': rule['actions']} - unscoped_rule_id = rule['conditions'][0]['pattern'] - elif template_name == 'content': - if len(rule["conditions"]) != 1: - return None - thecond = rule["conditions"][0] - if "pattern" not in thecond: - return None - templaterule = {'actions': rule['actions']} - templaterule["pattern"] = thecond["pattern"] - - if unscoped_rule_id: - templaterule['rule_id'] = unscoped_rule_id - if 'default' in rule: - templaterule['default'] = rule['default'] - return templaterule - - def _namespaced_rule_id_from_spec(spec): return _namespaced_rule_id(spec, spec['rule_id']) @@ -415,10 +337,6 @@ def _namespaced_rule_id(spec, rule_id): return "global/%s/%s" % (spec['template'], rule_id) -def _rule_id_from_namespaced(in_rule_id): - return in_rule_id.split('/')[-1] - - class InvalidRuleException(Exception): pass From 690596b770c75838d0a64c7b522369d61a9a66c7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Mar 2016 14:47:40 +0000 Subject: [PATCH 261/349] Add jenkins-sqlite.sh --- jenkins-sqlite.sh | 61 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100755 jenkins-sqlite.sh diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh new file mode 100755 index 0000000000..1e5a1dbaea --- /dev/null +++ b/jenkins-sqlite.sh @@ -0,0 +1,61 @@ +#!/bin/bash -eu + +: ${WORKSPACE:="$(pwd)"} + +export PYTHONDONTWRITEBYTECODE=yep +export SYNAPSE_CACHE_FACTOR=1 + +# Output test results as junit xml +export TRIAL_FLAGS="--reporter=subunit" +export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml" +# Write coverage reports to a separate file for each process +export COVERAGE_OPTS="-p" +export DUMP_COVERAGE_COMMAND="coverage help" + +# Output flake8 violations to violations.flake8.log +# Don't exit with non-0 status code on Jenkins, +# so that the build steps continue and a later step can decided whether to +# UNSTABLE or FAILURE this build. +export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?" + +rm .coverage* || echo "No coverage files to remove" + +tox --notest + +: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"} + +TOX_BIN=$WORKSPACE/.tox/py27/bin + +if [[ ! -e .sytest-base ]]; then + git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror +else + (cd .sytest-base; git fetch -p) +fi + +rm -rf sytest +git clone .sytest-base sytest --shared +cd sytest + +git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) + +: ${PERL5LIB:=$WORKSPACE/perl5/lib/perl5} +: ${PERL_MB_OPT:=--install_base=$WORKSPACE/perl5} +: ${PERL_MM_OPT:=INSTALL_BASE=$WORKSPACE/perl5} +export PERL5LIB PERL_MB_OPT PERL_MM_OPT + +./install-deps.pl + +: ${PORT_BASE:=8000} + +echo >&2 "Running sytest with SQLite3"; +./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \ + --python $TOX_BIN/python --all --port-base $PORT_BASE > results-sqlite3.tap + +cd .. +cp sytest/.coverage.* . + +# Combine the coverage reports +echo "Combining:" .coverage.* +$TOX_BIN/python -m coverage combine +# Output coverage to coverage.xml +$TOX_BIN/coverage xml -o coverage.xml From 91f4ac602bbf63fcdd88eb4f5668f998dfd6c599 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Mar 2016 14:51:09 +0000 Subject: [PATCH 262/349] Exclude all jenkins build scripts --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index 5668665db7..211bde2fc4 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -21,5 +21,6 @@ recursive-include synapse/static *.html recursive-include synapse/static *.js exclude jenkins.sh +exclude jenkins*.sh prune demo/etc From 6789b63131eb36532a0be0aa7a376c1683037afa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Mar 2016 15:02:07 +0000 Subject: [PATCH 263/349] Use different PORT_BASE --- jenkins-sqlite.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index 1e5a1dbaea..20209cf7b6 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -45,7 +45,7 @@ export PERL5LIB PERL_MB_OPT PERL_MM_OPT ./install-deps.pl -: ${PORT_BASE:=8000} +: ${PORT_BASE:=9000} echo >&2 "Running sytest with SQLite3"; ./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \ From 246b8c6e4a7e2f568e81f994ed2d0662db952732 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Mar 2016 15:16:19 +0000 Subject: [PATCH 264/349] Change port-base in jenkins-sqlite.sh --- jenkins-sqlite.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index 20209cf7b6..2d98a0af93 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -45,7 +45,7 @@ export PERL5LIB PERL_MB_OPT PERL_MM_OPT ./install-deps.pl -: ${PORT_BASE:=9000} +: ${PORT_BASE:=8500} echo >&2 "Running sytest with SQLite3"; ./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \ From fc9c7b6cbc27b17aa08cc77832249a00cdbe0338 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Mar 2016 15:26:07 +0000 Subject: [PATCH 265/349] Add jenkins-postgres.sh --- jenkins-postgres.sh | 87 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100755 jenkins-postgres.sh diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh new file mode 100755 index 0000000000..7dca840e39 --- /dev/null +++ b/jenkins-postgres.sh @@ -0,0 +1,87 @@ +#!/bin/bash -eu + +: ${WORKSPACE:="$(pwd)"} + +export PYTHONDONTWRITEBYTECODE=yep +export SYNAPSE_CACHE_FACTOR=1 + +# Output test results as junit xml +export TRIAL_FLAGS="--reporter=subunit" +export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml" +# Write coverage reports to a separate file for each process +export COVERAGE_OPTS="-p" +export DUMP_COVERAGE_COMMAND="coverage help" + +# Output flake8 violations to violations.flake8.log +# Don't exit with non-0 status code on Jenkins, +# so that the build steps continue and a later step can decided whether to +# UNSTABLE or FAILURE this build. +export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?" + +rm .coverage* || echo "No coverage files to remove" + +tox --notest + +: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"} + +TOX_BIN=$WORKSPACE/.tox/py27/bin + +if [[ ! -e .sytest-base ]]; then + git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror +else + (cd .sytest-base; git fetch -p) +fi + +rm -rf sytest +git clone .sytest-base sytest --shared +cd sytest + +git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) + +: ${PERL5LIB:=$WORKSPACE/perl5/lib/perl5} +: ${PERL_MB_OPT:=--install_base=$WORKSPACE/perl5} +: ${PERL_MM_OPT:=INSTALL_BASE=$WORKSPACE/perl5} +export PERL5LIB PERL_MB_OPT PERL_MM_OPT + +./install-deps.pl + +: ${PORT_BASE:=8000} + + +if [[ -z "$POSTGRES_DB_1" ]]; then + echo >&2 "Variable POSTGRES_DB_1 not set" + exit 1 +fi + +if [[ -z "$POSTGRES_DB_2" ]]; then + echo >&2 "Variable POSTGRES_DB_2 not set" + exit 1 +fi + +cat > localhost-$(($PORT_BASE + 1))/database.yaml << EOF +name: psycopg2 +args: + database: $POSTGRES_DB_1 +EOF + +cat > localhost-$(($PORT_BASE + 2))/database.yaml << EOF +name: psycopg2 +args: + database: $POSTGRES_DB_2 +EOF + + +# Run if both postgresql databases exist +echo >&2 "Running sytest with PostgreSQL"; +$TOX_BIN/pip install psycopg2 +./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \ + --python $TOX_BIN/python --all --port-base $PORT_BASE > results-postgresql.tap + +cd .. +cp sytest/.coverage.* . + +# Combine the coverage reports +echo "Combining:" .coverage.* +$TOX_BIN/python -m coverage combine +# Output coverage to coverage.xml +$TOX_BIN/coverage xml -o coverage.xml From 7678ec3f9b9a529c7d8c8aa8a38572c5cf2283d6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Mar 2016 15:42:07 +0000 Subject: [PATCH 266/349] Mkdir --- jenkins-postgres.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index 7dca840e39..1708cbfaaa 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -58,6 +58,9 @@ if [[ -z "$POSTGRES_DB_2" ]]; then exit 1 fi +mkdir -p "localhost-$(($PORT_BASE + 1))" +mkdir -p "localhost-$(($PORT_BASE + 2))" + cat > localhost-$(($PORT_BASE + 1))/database.yaml << EOF name: psycopg2 args: From c037170faa6fcda689fdd8c3777cd9ef64c24016 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Mar 2016 16:12:45 +0000 Subject: [PATCH 267/349] Split up jenkins tests --- jenkins-flake8.sh | 20 ++++++++++++++++++++ jenkins-unittests.sh | 23 +++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100755 jenkins-flake8.sh create mode 100755 jenkins-unittests.sh diff --git a/jenkins-flake8.sh b/jenkins-flake8.sh new file mode 100755 index 0000000000..cbcb0ae4c0 --- /dev/null +++ b/jenkins-flake8.sh @@ -0,0 +1,20 @@ +#!/bin/bash -eu + +: ${WORKSPACE:="$(pwd)"} + +export PYTHONDONTWRITEBYTECODE=yep +export SYNAPSE_CACHE_FACTOR=1 + +# Output test results as junit xml +export TRIAL_FLAGS="--reporter=subunit" +export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml" +# Write coverage reports to a separate file for each process +export COVERAGE_OPTS="-p" +export DUMP_COVERAGE_COMMAND="coverage help" + +# Output flake8 violations to violations.flake8.log +export PEP8SUFFIX="--output-file=violations.flake8.log" + +rm .coverage* || echo "No coverage files to remove" + +tox -e packaging -e pep8 diff --git a/jenkins-unittests.sh b/jenkins-unittests.sh new file mode 100755 index 0000000000..2fa2f1b1de --- /dev/null +++ b/jenkins-unittests.sh @@ -0,0 +1,23 @@ +#!/bin/bash -eu + +: ${WORKSPACE:="$(pwd)"} + +export PYTHONDONTWRITEBYTECODE=yep +export SYNAPSE_CACHE_FACTOR=1 + +# Output test results as junit xml +export TRIAL_FLAGS="--reporter=subunit" +export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml" +# Write coverage reports to a separate file for each process +export COVERAGE_OPTS="-p" +export DUMP_COVERAGE_COMMAND="coverage help" + +# Output flake8 violations to violations.flake8.log +# Don't exit with non-0 status code on Jenkins, +# so that the build steps continue and a later step can decided whether to +# UNSTABLE or FAILURE this build. +export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?" + +rm .coverage* || echo "No coverage files to remove" + +tox -e py27 From b4022cc487921ec46942a6a72fb174bb7aa1e459 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Thu, 3 Mar 2016 16:43:42 +0000 Subject: [PATCH 268/349] Pass whole requester to ratelimiting This will enable more detailed decisions --- synapse/handlers/_base.py | 15 ++++-- synapse/handlers/directory.py | 20 +++++--- synapse/handlers/federation.py | 4 +- synapse/handlers/message.py | 8 +-- synapse/handlers/profile.py | 17 ++++--- synapse/handlers/room.py | 76 ++++++++++++++++------------ synapse/rest/client/v1/directory.py | 6 ++- synapse/rest/client/v1/profile.py | 4 +- synapse/rest/client/v1/room.py | 8 +-- tests/handlers/test_profile.py | 16 ++++-- tests/replication/test_resource.py | 17 ++++--- tests/rest/client/v1/test_profile.py | 4 +- tests/utils.py | 5 ++ 13 files changed, 124 insertions(+), 76 deletions(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index bdade98bf7..2333fc0c09 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -160,10 +160,10 @@ class BaseHandler(object): ) defer.returnValue(res.get(user_id, [])) - def ratelimit(self, user_id): + def ratelimit(self, requester): time_now = self.clock.time() allowed, time_allowed = self.ratelimiter.send_message( - user_id, time_now, + requester.user.to_string(), time_now, msg_rate_hz=self.hs.config.rc_messages_per_second, burst_count=self.hs.config.rc_message_burst_count, ) @@ -263,11 +263,18 @@ class BaseHandler(object): return False @defer.inlineCallbacks - def handle_new_client_event(self, event, context, ratelimit=True, extra_users=[]): + def handle_new_client_event( + self, + requester, + event, + context, + ratelimit=True, + extra_users=[] + ): # We now need to go and hit out to wherever we need to hit out to. if ratelimit: - self.ratelimit(event.sender) + self.ratelimit(requester) self.auth.check(event, auth_events=context.current_state) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index e0a778e7ff..88166f0187 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -212,17 +212,21 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def send_room_alias_update_event(self, user_id, room_id): + def send_room_alias_update_event(self, requester, user_id, room_id): aliases = yield self.store.get_aliases_for_room(room_id) msg_handler = self.hs.get_handlers().message_handler - yield msg_handler.create_and_send_nonmember_event({ - "type": EventTypes.Aliases, - "state_key": self.hs.hostname, - "room_id": room_id, - "sender": user_id, - "content": {"aliases": aliases}, - }, ratelimit=False) + yield msg_handler.create_and_send_nonmember_event( + requester, + { + "type": EventTypes.Aliases, + "state_key": self.hs.hostname, + "room_id": room_id, + "sender": user_id, + "content": {"aliases": aliases}, + }, + ratelimit=False + ) @defer.inlineCallbacks def get_association_from_room_alias(self, room_alias): diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3655b9e5e2..6e50b0963e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1657,7 +1657,7 @@ class FederationHandler(BaseHandler): self.auth.check(event, context.current_state) yield self._check_signature(event, auth_events=context.current_state) member_handler = self.hs.get_handlers().room_member_handler - yield member_handler.send_membership_event(event, context, from_client=False) + yield member_handler.send_membership_event(None, event, context) else: destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id)) yield self.replication_layer.forward_third_party_invite( @@ -1686,7 +1686,7 @@ class FederationHandler(BaseHandler): # TODO: Make sure the signatures actually are correct. event.signatures.update(returned_invite.signatures) member_handler = self.hs.get_handlers().room_member_handler - yield member_handler.send_membership_event(event, context, from_client=False) + yield member_handler.send_membership_event(None, event, context) @defer.inlineCallbacks def add_display_name_to_third_party_invite(self, event_dict, event, context): diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index afa7c9c36c..cace1cb82a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -215,7 +215,7 @@ class MessageHandler(BaseHandler): defer.returnValue((event, context)) @defer.inlineCallbacks - def send_nonmember_event(self, event, context, ratelimit=True): + def send_nonmember_event(self, requester, event, context, ratelimit=True): """ Persists and notifies local clients and federation of an event. @@ -241,6 +241,7 @@ class MessageHandler(BaseHandler): defer.returnValue(prev_state) yield self.handle_new_client_event( + requester=requester, event=event, context=context, ratelimit=ratelimit, @@ -268,9 +269,9 @@ class MessageHandler(BaseHandler): @defer.inlineCallbacks def create_and_send_nonmember_event( self, + requester, event_dict, ratelimit=True, - token_id=None, txn_id=None ): """ @@ -280,10 +281,11 @@ class MessageHandler(BaseHandler): """ event, context = yield self.create_event( event_dict, - token_id=token_id, + token_id=requester.access_token_id, txn_id=txn_id ) yield self.send_nonmember_event( + requester, event, context, ratelimit=ratelimit, diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index c9ad5944e6..b45eafbb49 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -89,13 +89,13 @@ class ProfileHandler(BaseHandler): defer.returnValue(result["displayname"]) @defer.inlineCallbacks - def set_displayname(self, target_user, auth_user, new_displayname): + def set_displayname(self, target_user, requester, new_displayname): """target_user is the user whose displayname is to be changed; auth_user is the user attempting to make this change.""" if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this Home Server") - if target_user != auth_user: + if target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") if new_displayname == '': @@ -109,7 +109,7 @@ class ProfileHandler(BaseHandler): "displayname": new_displayname, }) - yield self._update_join_states(target_user) + yield self._update_join_states(requester) @defer.inlineCallbacks def get_avatar_url(self, target_user): @@ -139,13 +139,13 @@ class ProfileHandler(BaseHandler): defer.returnValue(result["avatar_url"]) @defer.inlineCallbacks - def set_avatar_url(self, target_user, auth_user, new_avatar_url): + def set_avatar_url(self, target_user, requester, new_avatar_url): """target_user is the user whose avatar_url is to be changed; auth_user is the user attempting to make this change.""" if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this Home Server") - if target_user != auth_user: + if target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") yield self.store.set_profile_avatar_url( @@ -156,7 +156,7 @@ class ProfileHandler(BaseHandler): "avatar_url": new_avatar_url, }) - yield self._update_join_states(target_user) + yield self._update_join_states(requester) @defer.inlineCallbacks def collect_presencelike_data(self, user, state): @@ -199,11 +199,12 @@ class ProfileHandler(BaseHandler): defer.returnValue(response) @defer.inlineCallbacks - def _update_join_states(self, user): + def _update_join_states(self, requester): + user = requester.user if not self.hs.is_mine(user): return - self.ratelimit(user.to_string()) + self.ratelimit(requester) joins = yield self.store.get_rooms_for_user( user.to_string(), diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d2de23a6cc..91fe306cf4 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -18,7 +18,7 @@ from twisted.internet import defer from ._base import BaseHandler -from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken +from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken, Requester from synapse.api.constants import ( EventTypes, Membership, JoinRules, RoomCreationPreset, ) @@ -90,7 +90,7 @@ class RoomCreationHandler(BaseHandler): """ user_id = requester.user.to_string() - self.ratelimit(user_id) + self.ratelimit(requester) if "room_alias_name" in config: for wchar in string.whitespace: @@ -185,23 +185,29 @@ class RoomCreationHandler(BaseHandler): if "name" in config: name = config["name"] - yield msg_handler.create_and_send_nonmember_event({ - "type": EventTypes.Name, - "room_id": room_id, - "sender": user_id, - "state_key": "", - "content": {"name": name}, - }, ratelimit=False) + yield msg_handler.create_and_send_nonmember_event( + requester, + { + "type": EventTypes.Name, + "room_id": room_id, + "sender": user_id, + "state_key": "", + "content": {"name": name}, + }, + ratelimit=False) if "topic" in config: topic = config["topic"] - yield msg_handler.create_and_send_nonmember_event({ - "type": EventTypes.Topic, - "room_id": room_id, - "sender": user_id, - "state_key": "", - "content": {"topic": topic}, - }, ratelimit=False) + yield msg_handler.create_and_send_nonmember_event( + requester, + { + "type": EventTypes.Topic, + "room_id": room_id, + "sender": user_id, + "state_key": "", + "content": {"topic": topic}, + }, + ratelimit=False) for invitee in invite_list: room_member_handler.update_membership( @@ -231,7 +237,7 @@ class RoomCreationHandler(BaseHandler): if room_alias: result["room_alias"] = room_alias.to_string() yield directory_handler.send_room_alias_update_event( - user_id, room_id + requester, user_id, room_id ) defer.returnValue(result) @@ -263,7 +269,11 @@ class RoomCreationHandler(BaseHandler): @defer.inlineCallbacks def send(etype, content, **kwargs): event = create(etype, content, **kwargs) - yield msg_handler.create_and_send_nonmember_event(event, ratelimit=False) + yield msg_handler.create_and_send_nonmember_event( + creator, + event, + ratelimit=False + ) config = RoomCreationHandler.PRESETS_DICT[preset_config] @@ -454,12 +464,11 @@ class RoomMemberHandler(BaseHandler): member_handler = self.hs.get_handlers().room_member_handler yield member_handler.send_membership_event( + requester, event, context, - is_guest=requester.is_guest, ratelimit=ratelimit, remote_room_hosts=remote_room_hosts, - from_client=True, ) if action == "forget": @@ -468,17 +477,19 @@ class RoomMemberHandler(BaseHandler): @defer.inlineCallbacks def send_membership_event( self, + requester, event, context, - is_guest=False, remote_room_hosts=None, ratelimit=True, - from_client=True, ): """ Change the membership status of a user in a room. Args: + requester (Requester): The local user who requested the membership + event. If None, certain checks, like whether this homeserver can + act as the sender, will be skipped. event (SynapseEvent): The membership event. context: The context of the event. is_guest (bool): Whether the sender is a guest. @@ -486,19 +497,21 @@ class RoomMemberHandler(BaseHandler): the room, and could be danced with in order to join this homeserver for the first time. ratelimit (bool): Whether to rate limit this request. - from_client (bool): Whether this request is the result of a local - client request (rather than over federation). If so, we will - perform extra checks, like that this homeserver can act as this - client. Raises: SynapseError if there was a problem changing the membership. """ target_user = UserID.from_string(event.state_key) room_id = event.room_id - if from_client: + if requester is not None: sender = UserID.from_string(event.sender) + assert sender == requester.user, ( + "Sender (%s) must be same as requester (%s)" % + (sender, requester.user) + ) assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,) + else: + requester = Requester(target_user, None, False) message_handler = self.hs.get_handlers().message_handler prev_event = message_handler.deduplicate_state_event(event, context) @@ -508,7 +521,7 @@ class RoomMemberHandler(BaseHandler): action = "send" if event.membership == Membership.JOIN: - if is_guest and not self._can_guest_join(context.current_state): + if requester.is_guest and not self._can_guest_join(context.current_state): # This should be an auth check, but guests are a local concept, # so don't really fit into the general auth process. raise AuthError(403, "Guest access not allowed") @@ -551,6 +564,7 @@ class RoomMemberHandler(BaseHandler): ) else: yield self.handle_new_client_event( + requester, event, context, extra_users=[target_user], @@ -669,12 +683,12 @@ class RoomMemberHandler(BaseHandler): ) else: yield self._make_and_store_3pid_invite( + requester, id_server, medium, address, room_id, inviter, - requester.access_token_id, txn_id=txn_id ) @@ -732,12 +746,12 @@ class RoomMemberHandler(BaseHandler): @defer.inlineCallbacks def _make_and_store_3pid_invite( self, + requester, id_server, medium, address, room_id, user, - token_id, txn_id ): room_state = yield self.hs.get_state_handler().get_current_state(room_id) @@ -787,6 +801,7 @@ class RoomMemberHandler(BaseHandler): msg_handler = self.hs.get_handlers().message_handler yield msg_handler.create_and_send_nonmember_event( + requester, { "type": EventTypes.ThirdPartyInvite, "content": { @@ -801,7 +816,6 @@ class RoomMemberHandler(BaseHandler): "sender": user.to_string(), "state_key": token, }, - token_id=token_id, txn_id=txn_id, ) diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 74ec1e50e0..8c1a2614a0 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -75,7 +75,11 @@ class ClientDirectoryServer(ClientV1RestServlet): yield dir_handler.create_association( user_id, room_alias, room_id, servers ) - yield dir_handler.send_room_alias_update_event(user_id, room_id) + yield dir_handler.send_room_alias_update_event( + requester, + user_id, + room_id + ) except SynapseError as e: raise e except: diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 3c5a212920..953764bd8e 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -51,7 +51,7 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): defer.returnValue((400, "Unable to parse name")) yield self.handlers.profile_handler.set_displayname( - user, requester.user, new_name) + user, requester, new_name) defer.returnValue((200, {})) @@ -88,7 +88,7 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): defer.returnValue((400, "Unable to parse name")) yield self.handlers.profile_handler.set_avatar_url( - user, requester.user, new_name) + user, requester, new_name) defer.returnValue((200, {})) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index f5ed4f7302..cbf3673eff 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -158,12 +158,12 @@ class RoomStateEventRestServlet(ClientV1RestServlet): if event_type == EventTypes.Member: yield self.handlers.room_member_handler.send_membership_event( + requester, event, context, - is_guest=requester.is_guest, ) else: - yield msg_handler.send_nonmember_event(event, context) + yield msg_handler.send_nonmember_event(requester, event, context) defer.returnValue((200, {"event_id": event.event_id})) @@ -183,13 +183,13 @@ class RoomSendEventRestServlet(ClientV1RestServlet): msg_handler = self.handlers.message_handler event = yield msg_handler.create_and_send_nonmember_event( + requester, { "type": event_type, "content": content, "room_id": room_id, "sender": requester.user.to_string(), }, - token_id=requester.access_token_id, txn_id=txn_id, ) @@ -504,6 +504,7 @@ class RoomRedactEventRestServlet(ClientV1RestServlet): msg_handler = self.handlers.message_handler event = yield msg_handler.create_and_send_nonmember_event( + requester, { "type": EventTypes.Redaction, "content": content, @@ -511,7 +512,6 @@ class RoomRedactEventRestServlet(ClientV1RestServlet): "sender": requester.user.to_string(), "redacts": event_id, }, - token_id=requester.access_token_id, txn_id=txn_id, ) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index a87703bbfd..4f2c14e4ff 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -23,7 +23,7 @@ from synapse.api.errors import AuthError from synapse.handlers.profile import ProfileHandler from synapse.types import UserID -from tests.utils import setup_test_homeserver +from tests.utils import setup_test_homeserver, requester_for_user class ProfileHandlers(object): @@ -84,7 +84,11 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_set_my_name(self): - yield self.handler.set_displayname(self.frank, self.frank, "Frank Jr.") + yield self.handler.set_displayname( + self.frank, + requester_for_user(self.frank), + "Frank Jr." + ) self.assertEquals( (yield self.store.get_profile_displayname(self.frank.localpart)), @@ -93,7 +97,11 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_set_my_name_noauth(self): - d = self.handler.set_displayname(self.frank, self.bob, "Frank Jr.") + d = self.handler.set_displayname( + self.frank, + requester_for_user(self.bob), + "Frank Jr." + ) yield self.assertFailure(d, AuthError) @@ -136,7 +144,7 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_set_my_avatar(self): yield self.handler.set_avatar_url( - self.frank, self.frank, "http://my.server/pic.gif" + self.frank, requester_for_user(self.frank), "http://my.server/pic.gif" ) self.assertEquals( diff --git a/tests/replication/test_resource.py b/tests/replication/test_resource.py index 38daaf87e2..daabc563b4 100644 --- a/tests/replication/test_resource.py +++ b/tests/replication/test_resource.py @@ -18,7 +18,7 @@ from synapse.types import Requester, UserID from twisted.internet import defer from tests import unittest -from tests.utils import setup_test_homeserver +from tests.utils import setup_test_homeserver, requester_for_user from mock import Mock, NonCallableMock import json import contextlib @@ -133,12 +133,15 @@ class ReplicationResourceCase(unittest.TestCase): @defer.inlineCallbacks def send_text_message(self, room_id, message): handler = self.hs.get_handlers().message_handler - event = yield handler.create_and_send_nonmember_event({ - "type": "m.room.message", - "content": {"body": "message", "msgtype": "m.text"}, - "room_id": room_id, - "sender": self.user.to_string(), - }) + event = yield handler.create_and_send_nonmember_event( + requester_for_user(self.user), + { + "type": "m.room.message", + "content": {"body": "message", "msgtype": "m.text"}, + "room_id": room_id, + "sender": self.user.to_string(), + } + ) defer.returnValue(event.event_id) @defer.inlineCallbacks diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 0785965de2..1d210f9bf8 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -86,7 +86,7 @@ class ProfileTestCase(unittest.TestCase): self.assertEquals(200, code) self.assertEquals(mocked_set.call_args[0][0].localpart, "1234ABCD") - self.assertEquals(mocked_set.call_args[0][1].localpart, "1234ABCD") + self.assertEquals(mocked_set.call_args[0][1].user.localpart, "1234ABCD") self.assertEquals(mocked_set.call_args[0][2], "Frank Jr.") @defer.inlineCallbacks @@ -155,5 +155,5 @@ class ProfileTestCase(unittest.TestCase): self.assertEquals(200, code) self.assertEquals(mocked_set.call_args[0][0].localpart, "1234ABCD") - self.assertEquals(mocked_set.call_args[0][1].localpart, "1234ABCD") + self.assertEquals(mocked_set.call_args[0][1].user.localpart, "1234ABCD") self.assertEquals(mocked_set.call_args[0][2], "http://my.server/pic.gif") diff --git a/tests/utils.py b/tests/utils.py index c67fa1ca35..291b549053 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -20,6 +20,7 @@ from synapse.storage.prepare_database import prepare_database from synapse.storage.engines import create_engine from synapse.server import HomeServer from synapse.federation.transport import server +from synapse.types import Requester from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.logcontext import LoggingContext @@ -510,3 +511,7 @@ class DeferredMockCallable(object): "call(%s)" % _format_call(c[0], c[1]) for c in calls ]) ) + + +def requester_for_user(user): + return Requester(user, None, False) From 62d808beccaf4ef94e743c57b9570bea99071eb7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 3 Mar 2016 11:38:36 +0000 Subject: [PATCH 270/349] jenkins-*.sh: set -x Also move the options from the shebang line to the body of the script, so that they take effect even if somebody explicitly runs "bash jenkins.sh" --- jenkins-flake8.sh | 4 +++- jenkins-postgres.sh | 4 +++- jenkins-sqlite.sh | 4 +++- jenkins-unittests.sh | 4 +++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/jenkins-flake8.sh b/jenkins-flake8.sh index cbcb0ae4c0..11f1cab6c8 100755 --- a/jenkins-flake8.sh +++ b/jenkins-flake8.sh @@ -1,4 +1,6 @@ -#!/bin/bash -eu +#!/bin/bash + +set -eux : ${WORKSPACE:="$(pwd)"} diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index 1708cbfaaa..d1fed590a2 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -1,4 +1,6 @@ -#!/bin/bash -eu +#!/bin/bash + +set -eux : ${WORKSPACE:="$(pwd)"} diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index 2d98a0af93..57fd8de54d 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -1,4 +1,6 @@ -#!/bin/bash -eu +#!/bin/bash + +set -eux : ${WORKSPACE:="$(pwd)"} diff --git a/jenkins-unittests.sh b/jenkins-unittests.sh index 2fa2f1b1de..104d511994 100755 --- a/jenkins-unittests.sh +++ b/jenkins-unittests.sh @@ -1,4 +1,6 @@ -#!/bin/bash -eu +#!/bin/bash + +set -eux : ${WORKSPACE:="$(pwd)"} From a92b4ea76fab9475483f345d998206d00f64b20f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 4 Mar 2016 00:06:03 +0000 Subject: [PATCH 271/349] Make sure we add all invited members before returning from createRoom add a missing yield. --- synapse/handlers/room.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 91fe306cf4..2fb417b0c5 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -210,7 +210,7 @@ class RoomCreationHandler(BaseHandler): ratelimit=False) for invitee in invite_list: - room_member_handler.update_membership( + yield room_member_handler.update_membership( requester, UserID.from_string(invitee), room_id, From 5fc59f009cca676ed8c9c932abf6cddc6614eae9 Mon Sep 17 00:00:00 2001 From: Patrik Oldsberg Date: Tue, 23 Feb 2016 14:22:07 +0100 Subject: [PATCH 272/349] config,handlers/_base: added homeserver config for what state is included in a room invite Signed-off-by: Patrik Oldsberg --- synapse/config/api.py | 40 ++++++++++++++++++++++++++++++++++++ synapse/config/homeserver.py | 3 ++- synapse/handlers/_base.py | 8 ++------ 3 files changed, 44 insertions(+), 7 deletions(-) create mode 100644 synapse/config/api.py diff --git a/synapse/config/api.py b/synapse/config/api.py new file mode 100644 index 0000000000..20ba33226a --- /dev/null +++ b/synapse/config/api.py @@ -0,0 +1,40 @@ +# Copyright 2015, 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import Config + +from synapse.api.constants import EventTypes + + +class ApiConfig(Config): + + def read_config(self, config): + self.room_invite_state_types = config.get("room_invite_state_types", [ + EventTypes.JoinRules, + EventTypes.CanonicalAlias, + EventTypes.RoomAvatar, + EventTypes.Name, + ]) + + def default_config(cls, **kwargs): + return """\ + ## API Configuration ## + + # A list of event types that will be included in the room_invite_state + room_invite_state_types: + - "{JoinRules}" + - "{CanonicalAlias}" + - "{RoomAvatar}" + - "{Name}" + """.format(**vars(EventTypes)) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 3c333b4172..a08c170f1d 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -23,6 +23,7 @@ from .captcha import CaptchaConfig from .voip import VoipConfig from .registration import RegistrationConfig from .metrics import MetricsConfig +from .api import ApiConfig from .appservice import AppServiceConfig from .key import KeyConfig from .saml2 import SAML2Config @@ -32,7 +33,7 @@ from .password import PasswordConfig class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, RatelimitConfig, ContentRepositoryConfig, CaptchaConfig, - VoipConfig, RegistrationConfig, MetricsConfig, + VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig, AppServiceConfig, KeyConfig, SAML2Config, CasConfig, PasswordConfig,): pass diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index c6a74b0e3d..884572df97 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -333,12 +333,8 @@ class BaseHandler(object): "sender": e.sender, } for k, e in context.current_state.items() - if e.type in ( - EventTypes.JoinRules, - EventTypes.CanonicalAlias, - EventTypes.RoomAvatar, - EventTypes.Name, - ) or is_inviter_member_event(e) + if e.type in self.hs.config.room_invite_state_types + or is_inviter_member_event(e) ] invitee = UserID.from_string(event.state_key) From bb0e82fff186b509e82184ba679b7b8eb6db26c5 Mon Sep 17 00:00:00 2001 From: Patrik Oldsberg Date: Tue, 23 Feb 2016 14:48:12 +0100 Subject: [PATCH 273/349] tests/utils: added room_invite_state_types to test config Signed-off-by: Patrik Oldsberg --- tests/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/utils.py b/tests/utils.py index 291b549053..52405502e9 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -51,6 +51,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): config.macaroon_secret_key = "not even a little secret" config.server_name = "server.under.test" config.trusted_third_party_id_servers = [] + config.room_invite_state_types = [] config.database_config = {"name": "sqlite3"} From ed61a491692635191b93f075db207a121eb37b66 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 4 Mar 2016 14:29:58 +0000 Subject: [PATCH 274/349] Add profile information to invites --- synapse/federation/federation_client.py | 2 +- synapse/federation/transport/client.py | 1 + synapse/handlers/message.py | 17 +++++++++++++++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index e30e2da58d..83c1f46586 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -114,7 +114,7 @@ class FederationClient(FederationBase): @log_function def make_query(self, destination, query_type, args, - retry_on_dns_fail=True): + retry_on_dns_fail=False): """Sends a federation Query to a remote homeserver of the given type and arguments. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 2b5d40ea7f..2237e3413c 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -160,6 +160,7 @@ class TransportLayerClient(object): path=path, args=args, retry_on_dns_fail=retry_on_dns_fail, + timeout=10000, ) defer.returnValue(content) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index cace1cb82a..2b1cf1c3af 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -196,12 +196,25 @@ class MessageHandler(BaseHandler): if builder.type == EventTypes.Member: membership = builder.content.get("membership", None) + target = UserID.from_string(builder.state_key) + if membership == Membership.JOIN: - joinee = UserID.from_string(builder.state_key) # If event doesn't include a display name, add one. yield collect_presencelike_data( - self.distributor, joinee, builder.content + self.distributor, target, builder.content ) + elif membership == Membership.INVITE: + profile = self.hs.get_handlers().profile_handler + content = builder.content + + try: + content["displayname"] = yield profile.get_displayname(target) + content["avatar_url"] = yield profile.get_avatar_url(target) + except Exception as e: + logger.info( + "Failed to get profile infomration for %r: %s", + target, e + ) if token_id is not None: builder.internal_metadata.token_id = token_id From 1b4f4a936fb416d81203fcd66be690f9a04b2b62 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 4 Mar 2016 14:44:01 +0000 Subject: [PATCH 275/349] Hook up the push rules stream to account_data in /sync --- synapse/handlers/sync.py | 22 +++++ synapse/rest/client/v1/push_rule.py | 2 +- synapse/storage/__init__.py | 5 ++ synapse/storage/push_rule.py | 125 +++++++++++++--------------- 4 files changed, 85 insertions(+), 69 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index fded6e4009..92eab20c7c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -20,6 +20,7 @@ from synapse.api.constants import Membership, EventTypes from synapse.util import unwrapFirstError from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.metrics import Measure +from synapse.push.clientformat import format_push_rules_for_user from twisted.internet import defer @@ -224,6 +225,10 @@ class SyncHandler(BaseHandler): ) ) + account_data['m.push_rules'] = yield self.push_rules_for_user( + sync_config.user + ) + tags_by_room = yield self.store.get_tags_for_user( sync_config.user.to_string() ) @@ -322,6 +327,14 @@ class SyncHandler(BaseHandler): defer.returnValue(room_sync) + @defer.inlineCallbacks + def push_rules_for_user(self, user): + user_id = user.to_string() + rawrules = yield self.store.get_push_rules_for_user(user_id) + enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id) + rules = format_push_rules_for_user(user, rawrules, enabled_map) + defer.returnValue(rules) + def account_data_for_user(self, account_data): account_data_events = [] @@ -481,6 +494,15 @@ class SyncHandler(BaseHandler): ) ) + push_rules_changed = yield self.store.have_push_rules_changed_for_user( + user_id, int(since_token.push_rules_key) + ) + + if push_rules_changed: + account_data["m.push_rules"] = yield self.push_rules_for_user( + sync_config.user + ) + # Get a list of membership change events that have happened. rooms_changed = yield self.store.get_membership_changes_for_user( user_id, since_token.room_key, now_token.room_key diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index edfe28c79b..981d7708db 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -156,7 +156,7 @@ class PushRuleRestServlet(ClientV1RestServlet): return 200, {} def notify_user(self, user_id): - stream_id = self.store.get_push_rules_stream_token() + stream_id, _ = self.store.get_push_rules_stream_token() self.notifier.on_new_event( "push_rules_key", stream_id, users=[user_id] ) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index e2d7b52569..7b7b03d052 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -160,6 +160,11 @@ class DataStore(RoomMemberStore, RoomStore, prefilled_cache=presence_cache_prefill ) + self.push_rules_stream_cache = StreamChangeCache( + "PushRulesStreamChangeCache", + self._push_rules_stream_id_gen.get_max_token()[0], + ) + super(DataStore, self).__init__(hs) def take_presence_startup_info(self): diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index e034024108..792fcbdf5b 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -244,15 +244,10 @@ class PushRuleStore(SQLBaseStore): ) if update_stream: - self._simple_insert_txn( - txn, - table="push_rules_stream", - values={ - "stream_id": stream_id, - "stream_ordering": stream_ordering, - "user_id": user_id, - "rule_id": rule_id, - "op": "ADD", + self._insert_push_rules_update_txn( + txn, stream_id, stream_ordering, user_id, rule_id, + op="ADD", + data={ "priority_class": priority_class, "priority": priority, "conditions": conditions_json, @@ -260,13 +255,6 @@ class PushRuleStore(SQLBaseStore): } ) - txn.call_after( - self.get_push_rules_for_user.invalidate, (user_id,) - ) - txn.call_after( - self.get_push_rules_enabled_for_user.invalidate, (user_id,) - ) - @defer.inlineCallbacks def delete_push_rule(self, user_id, rule_id): """ @@ -284,22 +272,10 @@ class PushRuleStore(SQLBaseStore): "push_rules", {'user_name': user_id, 'rule_id': rule_id}, ) - self._simple_insert_txn( - txn, - table="push_rules_stream", - values={ - "stream_id": stream_id, - "stream_ordering": stream_ordering, - "user_id": user_id, - "rule_id": rule_id, - "op": "DELETE", - } - ) - txn.call_after( - self.get_push_rules_for_user.invalidate, (user_id,) - ) - txn.call_after( - self.get_push_rules_enabled_for_user.invalidate, (user_id,) + + self._insert_push_rules_update_txn( + txn, stream_id, stream_ordering, user_id, rule_id, + op="DELETE" ) with self._push_rules_stream_id_gen.get_next() as (stream_id, stream_ordering): @@ -328,23 +304,9 @@ class PushRuleStore(SQLBaseStore): {'id': new_id}, ) - self._simple_insert_txn( - txn, - "push_rules_stream", - values={ - "stream_id": stream_id, - "stream_ordering": stream_ordering, - "user_id": user_id, - "rule_id": rule_id, - "op": "ENABLE" if enabled else "DISABLE", - } - ) - - txn.call_after( - self.get_push_rules_for_user.invalidate, (user_id,) - ) - txn.call_after( - self.get_push_rules_enabled_for_user.invalidate, (user_id,) + self._insert_push_rules_update_txn( + txn, stream_id, stream_ordering, user_id, rule_id, + op="ENABLE" if enabled else "DISABLE" ) @defer.inlineCallbacks @@ -370,24 +332,9 @@ class PushRuleStore(SQLBaseStore): {'actions': actions_json}, ) - self._simple_insert_txn( - txn, - "push_rules_stream", - values={ - "stream_id": stream_id, - "stream_ordering": stream_ordering, - "user_id": user_id, - "rule_id": rule_id, - "op": "ACTIONS", - "actions": actions_json, - } - ) - - txn.call_after( - self.get_push_rules_for_user.invalidate, (user_id,) - ) - txn.call_after( - self.get_push_rules_enabled_for_user.invalidate, (user_id,) + self._insert_push_rules_update_txn( + txn, stream_id, stream_ordering, user_id, rule_id, + op="ACTIONS", data={"actions": actions_json} ) with self._push_rules_stream_id_gen.get_next() as (stream_id, stream_ordering): @@ -396,6 +343,31 @@ class PushRuleStore(SQLBaseStore): stream_id, stream_ordering ) + def _insert_push_rules_update_txn( + self, txn, stream_id, stream_ordering, user_id, rule_id, op, data=None + ): + values = { + "stream_id": stream_id, + "stream_ordering": stream_ordering, + "user_id": user_id, + "rule_id": rule_id, + "op": op, + } + if data is not None: + values.update(data) + + self._simple_insert_txn(txn, "push_rules_stream", values=values) + + txn.call_after( + self.get_push_rules_for_user.invalidate, (user_id,) + ) + txn.call_after( + self.get_push_rules_enabled_for_user.invalidate, (user_id,) + ) + txn.call_after( + self.push_rules_stream_cache.entity_has_changed, user_id, stream_id + ) + def get_all_push_rule_updates(self, last_id, current_id, limit): """Get all the push rules changes that have happend on the server""" def get_all_push_rule_updates_txn(txn): @@ -403,7 +375,7 @@ class PushRuleStore(SQLBaseStore): "SELECT stream_id, stream_ordering, user_id, rule_id," " op, priority_class, priority, conditions, actions" " FROM push_rules_stream" - " WHERE ? < stream_id and stream_id <= ?" + " WHERE ? < stream_id AND stream_id <= ?" " ORDER BY stream_id ASC LIMIT ?" ) txn.execute(sql, (last_id, current_id, limit)) @@ -418,6 +390,23 @@ class PushRuleStore(SQLBaseStore): room stream ordering it corresponds to.""" return self._push_rules_stream_id_gen.get_max_token() + def have_push_rules_changed_for_user(self, user_id, last_id): + if not self.push_rules_stream_cache.has_entity_changed(user_id, last_id): + logger.error("FNARG") + return defer.succeed(False) + else: + def have_push_rules_changed_txn(txn): + sql = ( + "SELECT COUNT(stream_id) FROM push_rules_stream" + " WHERE user_id = ? AND ? < stream_id" + ) + txn.execute(sql, (user_id, last_id)) + count, = txn.fetchone() + return bool(count) + return self.runInteraction( + "have_push_rules_changed", have_push_rules_changed_txn + ) + class RuleNotFoundException(Exception): pass From 3110c37d024980b194ca6356ab6c05d9f240cb4f Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 4 Mar 2016 14:48:17 +0000 Subject: [PATCH 276/349] Fix unit tests --- tests/rest/client/v1/test_rooms.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index afca5303ba..fe4fd71272 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1032,7 +1032,7 @@ class RoomMessageListTestCase(RestTestCase): @defer.inlineCallbacks def test_topo_token_is_accepted(self): - token = "t1-0_0_0_0_0" + token = "t1-0_0_0_0_0_0" (code, response) = yield self.mock_resource.trigger_get( "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)) @@ -1044,7 +1044,7 @@ class RoomMessageListTestCase(RestTestCase): @defer.inlineCallbacks def test_stream_token_is_accepted_for_fwd_pagianation(self): - token = "s0_0_0_0_0" + token = "s0_0_0_0_0_0" (code, response) = yield self.mock_resource.trigger_get( "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)) From 13cbd31040787110bdc6d5d4ccc38234139e4b27 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 4 Mar 2016 15:22:39 +0000 Subject: [PATCH 278/349] Spelling --- synapse/handlers/message.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 2b1cf1c3af..e92c74d07b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -212,7 +212,7 @@ class MessageHandler(BaseHandler): content["avatar_url"] = yield profile.get_avatar_url(target) except Exception as e: logger.info( - "Failed to get profile infomration for %r: %s", + "Failed to get profile information for %r: %s", target, e ) From 125f674eae8c98b2aaab61847dcecd1392258e30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 4 Mar 2016 15:27:55 +0000 Subject: [PATCH 279/349] Always include kicks and bans in full /sync --- synapse/handlers/sync.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index fded6e4009..26a66814eb 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -209,9 +209,9 @@ class SyncHandler(BaseHandler): key=None ) - membership_list = (Membership.INVITE, Membership.JOIN) - if sync_config.filter_collection.include_leave: - membership_list += (Membership.LEAVE, Membership.BAN) + membership_list = ( + Membership.INVITE, Membership.JOIN, Membership.LEAVE, Membership.BAN + ) room_list = yield self.store.get_rooms_for_user_where_membership_is( user_id=sync_config.user.to_string(), @@ -257,6 +257,12 @@ class SyncHandler(BaseHandler): invite=invite, )) elif event.membership in (Membership.LEAVE, Membership.BAN): + # Always send down rooms we were banned or kicked from. + if not sync_config.filter_collection.include_leave: + if event.membership == Membership.LEAVE: + if sync_config.user.to_string() == event.sender: + continue + leave_token = now_token.copy_and_replace( "room_key", "s%d" % (event.stream_ordering,) ) From 7e9fc9b6af2052441a54613627aebeb4999d1efe Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 4 Mar 2016 15:54:09 +0000 Subject: [PATCH 280/349] /FNARG/d --- synapse/storage/push_rule.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 792fcbdf5b..57e1ca5509 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -392,7 +392,6 @@ class PushRuleStore(SQLBaseStore): def have_push_rules_changed_for_user(self, user_id, last_id): if not self.push_rules_stream_cache.has_entity_changed(user_id, last_id): - logger.error("FNARG") return defer.succeed(False) else: def have_push_rules_changed_txn(txn): From ebcbb23226904f080e6a9c1e2f2901886c286445 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 4 Mar 2016 16:15:23 +0000 Subject: [PATCH 281/349] s/stream_ordering/event_stream_ordering/ in push --- synapse/replication/resource.py | 2 +- synapse/storage/push_rule.py | 54 ++++++++++--------- .../schema/delta/30/push_rule_stream.sql | 2 +- 3 files changed, 31 insertions(+), 27 deletions(-) diff --git a/synapse/replication/resource.py b/synapse/replication/resource.py index 15b7898a45..adc1eb1d0b 100644 --- a/synapse/replication/resource.py +++ b/synapse/replication/resource.py @@ -293,7 +293,7 @@ class ReplicationResource(Resource): push_rules, current_position, limit ) writer.write_header_and_rows("push_rules", rows, ( - "position", "stream_ordering", "user_id", "rule_id", "op", + "position", "event_stream_ordering", "user_id", "rule_id", "op", "priority_class", "priority", "conditions", "actions" )) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 57e1ca5509..9dbad2fd5f 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -106,24 +106,25 @@ class PushRuleStore(SQLBaseStore): ): conditions_json = json.dumps(conditions) actions_json = json.dumps(actions) - with self._push_rules_stream_id_gen.get_next() as (stream_id, stream_ordering): + with self._push_rules_stream_id_gen.get_next() as ids: + stream_id, event_stream_ordering = ids if before or after: yield self.runInteraction( "_add_push_rule_relative_txn", self._add_push_rule_relative_txn, - stream_id, stream_ordering, user_id, rule_id, priority_class, + stream_id, event_stream_ordering, user_id, rule_id, priority_class, conditions_json, actions_json, before, after, ) else: yield self.runInteraction( "_add_push_rule_highest_priority_txn", self._add_push_rule_highest_priority_txn, - stream_id, stream_ordering, user_id, rule_id, priority_class, + stream_id, event_stream_ordering, user_id, rule_id, priority_class, conditions_json, actions_json, ) def _add_push_rule_relative_txn( - self, txn, stream_id, stream_ordering, user_id, rule_id, priority_class, + self, txn, stream_id, event_stream_ordering, user_id, rule_id, priority_class, conditions_json, actions_json, before, after ): # Lock the table since otherwise we'll have annoying races between the @@ -175,12 +176,12 @@ class PushRuleStore(SQLBaseStore): txn.execute(sql, (user_id, priority_class, new_rule_priority)) self._upsert_push_rule_txn( - txn, stream_id, stream_ordering, user_id, rule_id, priority_class, + txn, stream_id, event_stream_ordering, user_id, rule_id, priority_class, new_rule_priority, conditions_json, actions_json, ) def _add_push_rule_highest_priority_txn( - self, txn, stream_id, stream_ordering, user_id, rule_id, priority_class, + self, txn, stream_id, event_stream_ordering, user_id, rule_id, priority_class, conditions_json, actions_json ): # Lock the table since otherwise we'll have annoying races between the @@ -202,12 +203,12 @@ class PushRuleStore(SQLBaseStore): self._upsert_push_rule_txn( txn, - stream_id, stream_ordering, user_id, rule_id, priority_class, new_prio, + stream_id, event_stream_ordering, user_id, rule_id, priority_class, new_prio, conditions_json, actions_json, ) def _upsert_push_rule_txn( - self, txn, stream_id, stream_ordering, user_id, rule_id, priority_class, + self, txn, stream_id, event_stream_ordering, user_id, rule_id, priority_class, priority, conditions_json, actions_json, update_stream=True ): """Specialised version of _simple_upsert_txn that picks a push_rule_id @@ -245,7 +246,7 @@ class PushRuleStore(SQLBaseStore): if update_stream: self._insert_push_rules_update_txn( - txn, stream_id, stream_ordering, user_id, rule_id, + txn, stream_id, event_stream_ordering, user_id, rule_id, op="ADD", data={ "priority_class": priority_class, @@ -266,7 +267,7 @@ class PushRuleStore(SQLBaseStore): user_id (str): The matrix ID of the push rule owner rule_id (str): The rule_id of the rule to be deleted """ - def delete_push_rule_txn(txn, stream_id, stream_ordering): + def delete_push_rule_txn(txn, stream_id, event_stream_ordering): self._simple_delete_one_txn( txn, "push_rules", @@ -274,26 +275,28 @@ class PushRuleStore(SQLBaseStore): ) self._insert_push_rules_update_txn( - txn, stream_id, stream_ordering, user_id, rule_id, + txn, stream_id, event_stream_ordering, user_id, rule_id, op="DELETE" ) - with self._push_rules_stream_id_gen.get_next() as (stream_id, stream_ordering): + with self._push_rules_stream_id_gen.get_next() as ids: + stream_id, event_stream_ordering = ids yield self.runInteraction( - "delete_push_rule", delete_push_rule_txn, stream_id, stream_ordering + "delete_push_rule", delete_push_rule_txn, stream_id, event_stream_ordering ) @defer.inlineCallbacks def set_push_rule_enabled(self, user_id, rule_id, enabled): - with self._push_rules_stream_id_gen.get_next() as (stream_id, stream_ordering): + with self._push_rules_stream_id_gen.get_next() as ids: + stream_id, event_stream_ordering = ids yield self.runInteraction( "_set_push_rule_enabled_txn", self._set_push_rule_enabled_txn, - stream_id, stream_ordering, user_id, rule_id, enabled + stream_id, event_stream_ordering, user_id, rule_id, enabled ) def _set_push_rule_enabled_txn( - self, txn, stream_id, stream_ordering, user_id, rule_id, enabled + self, txn, stream_id, event_stream_ordering, user_id, rule_id, enabled ): new_id = self._push_rules_enable_id_gen.get_next() self._simple_upsert_txn( @@ -305,7 +308,7 @@ class PushRuleStore(SQLBaseStore): ) self._insert_push_rules_update_txn( - txn, stream_id, stream_ordering, user_id, rule_id, + txn, stream_id, event_stream_ordering, user_id, rule_id, op="ENABLE" if enabled else "DISABLE" ) @@ -313,14 +316,14 @@ class PushRuleStore(SQLBaseStore): def set_push_rule_actions(self, user_id, rule_id, actions, is_default_rule): actions_json = json.dumps(actions) - def set_push_rule_actions_txn(txn, stream_id, stream_ordering): + def set_push_rule_actions_txn(txn, stream_id, event_stream_ordering): if is_default_rule: # Add a dummy rule to the rules table with the user specified # actions. priority_class = -1 priority = 1 self._upsert_push_rule_txn( - txn, stream_id, stream_ordering, user_id, rule_id, + txn, stream_id, event_stream_ordering, user_id, rule_id, priority_class, priority, "[]", actions_json, update_stream=False ) @@ -333,22 +336,23 @@ class PushRuleStore(SQLBaseStore): ) self._insert_push_rules_update_txn( - txn, stream_id, stream_ordering, user_id, rule_id, + txn, stream_id, event_stream_ordering, user_id, rule_id, op="ACTIONS", data={"actions": actions_json} ) - with self._push_rules_stream_id_gen.get_next() as (stream_id, stream_ordering): + with self._push_rules_stream_id_gen.get_next() as ids: + stream_id, event_stream_ordering = ids yield self.runInteraction( "set_push_rule_actions", set_push_rule_actions_txn, - stream_id, stream_ordering + stream_id, event_stream_ordering ) def _insert_push_rules_update_txn( - self, txn, stream_id, stream_ordering, user_id, rule_id, op, data=None + self, txn, stream_id, event_stream_ordering, user_id, rule_id, op, data=None ): values = { "stream_id": stream_id, - "stream_ordering": stream_ordering, + "event_stream_ordering": event_stream_ordering, "user_id": user_id, "rule_id": rule_id, "op": op, @@ -372,7 +376,7 @@ class PushRuleStore(SQLBaseStore): """Get all the push rules changes that have happend on the server""" def get_all_push_rule_updates_txn(txn): sql = ( - "SELECT stream_id, stream_ordering, user_id, rule_id," + "SELECT stream_id, event_stream_ordering, user_id, rule_id," " op, priority_class, priority, conditions, actions" " FROM push_rules_stream" " WHERE ? < stream_id AND stream_id <= ?" diff --git a/synapse/storage/schema/delta/30/push_rule_stream.sql b/synapse/storage/schema/delta/30/push_rule_stream.sql index e8418bb35f..735aa8d5f6 100644 --- a/synapse/storage/schema/delta/30/push_rule_stream.sql +++ b/synapse/storage/schema/delta/30/push_rule_stream.sql @@ -17,7 +17,7 @@ CREATE TABLE push_rules_stream( stream_id BIGINT NOT NULL, - stream_ordering BIGINT NOT NULL, + event_stream_ordering BIGINT NOT NULL, user_id TEXT NOT NULL, rule_id TEXT NOT NULL, op TEXT NOT NULL, -- One of "ENABLE", "DISABLE", "ACTIONS", "ADD", "DELETE" From deda48068c24083750a9bfc21d114c12e8347969 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 4 Mar 2016 16:19:42 +0000 Subject: [PATCH 282/349] prefill the push rules stream change cache --- synapse/storage/__init__.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 7b7b03d052..ab2f115adf 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -160,9 +160,16 @@ class DataStore(RoomMemberStore, RoomStore, prefilled_cache=presence_cache_prefill ) + push_rules_prefill, push_rules_id = self._get_cache_dict( + db_conn, "presence_stream", + entity_column="user_id", + stream_column="stream_id", + max_value=self._push_rules_stream_id_gen.get_max_token()[0], + ) + self.push_rules_stream_cache = StreamChangeCache( - "PushRulesStreamChangeCache", - self._push_rules_stream_id_gen.get_max_token()[0], + "PushRulesStreamChangeCache", push_rules_id, + prefilled_cache=push_rules_prefill, ) super(DataStore, self).__init__(hs) From 9848b54cac2c7e077317eec85ee0de2cb567c561 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 4 Mar 2016 16:20:22 +0000 Subject: [PATCH 283/349] Prefill from the correct stream --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index ab2f115adf..6f37a85d09 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -161,7 +161,7 @@ class DataStore(RoomMemberStore, RoomStore, ) push_rules_prefill, push_rules_id = self._get_cache_dict( - db_conn, "presence_stream", + db_conn, "push_rules_stream", entity_column="user_id", stream_column="stream_id", max_value=self._push_rules_stream_id_gen.get_max_token()[0], From 2ab0bf4b97c451f7709dcc9b747b1d6071c4f87f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 4 Mar 2016 16:54:32 +0000 Subject: [PATCH 284/349] Send history visibility on boundary changes --- synapse/handlers/_base.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 2d56af5027..90eabb6eb7 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -29,6 +29,14 @@ import logging logger = logging.getLogger(__name__) +VISIBILITY_PRIORITY = ( + "world_readable", + "shared", + "invited", + "joined", +) + + class BaseHandler(object): """ Common base class for the event handlers. @@ -85,10 +93,28 @@ class BaseHandler(object): else: visibility = "shared" + if visibility not in VISIBILITY_PRIORITY: + visibility = "shared" + # if it was world_readable, it's easy: everyone can read it if visibility == "world_readable": return True + # Always allow history visibility events on boundaries. This is done + # by setting the effective visibility to the least restrictive + # of the old vs new. + if event.type == EventTypes.RoomHistoryVisibility: + prev_content = event.unsigned.get("prev_content", {}) + prev_visibility = prev_content.get("history_visibility", None) + + if prev_visibility not in VISIBILITY_PRIORITY: + prev_visibility = "shared" + + new_priority = VISIBILITY_PRIORITY.index(visibility) + old_priority = VISIBILITY_PRIORITY.index(prev_visibility) + if old_priority < new_priority: + visibility = prev_visibility + # get the user's membership at the time of the event. (or rather, # just *after* the event. Which means that people can see their # own join events, but not (currently) their own leave events.) From 874fd432575044c6cd93ee4f311412f677e6b7fe Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 7 Mar 2016 17:13:56 +0000 Subject: [PATCH 285/349] Send the user ID matching the guest access token, since there is no Matrix API to discover what user ID an access token is for. --- synapse/api/auth.py | 4 ++-- synapse/handlers/room.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 183245443c..3038df4ab8 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -534,7 +534,7 @@ class Auth(object): ) access_token = request.args["access_token"][0] - user_info = yield self._get_user_by_access_token(access_token) + user_info = yield self.get_user_by_access_token(access_token) user = user_info["user"] token_id = user_info["token_id"] is_guest = user_info["is_guest"] @@ -595,7 +595,7 @@ class Auth(object): defer.returnValue(user_id) @defer.inlineCallbacks - def _get_user_by_access_token(self, token): + def get_user_by_access_token(self, token): """ Get a registered user's ID. Args: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 0cb6c521c4..57113ae4a5 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -884,6 +884,10 @@ class RoomMemberHandler(BaseHandler): inviter_user_id=inviter_user_id, ) + guest_user_info = yield self.hs.get_auth().get_user_by_access_token( + guest_access_token + ) + is_url = "%s%s/_matrix/identity/api/v1/store-invite" % ( id_server_scheme, id_server, ) @@ -900,6 +904,7 @@ class RoomMemberHandler(BaseHandler): "sender": inviter_user_id, "sender_display_name": inviter_display_name, "sender_avatar_url": inviter_avatar_url, + "guest_user_id": guest_user_info["user"].to_string(), "guest_access_token": guest_access_token, } ) From 316c00936f1420927fe46aff3e0ab7c476f2f4ed Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 7 Mar 2016 17:32:36 +0000 Subject: [PATCH 286/349] Fix tests --- tests/rest/client/v1/test_rooms.py | 32 +++++++++++++------------- tests/rest/client/v1/test_typing.py | 4 ++-- tests/rest/client/v2_alpha/__init__.py | 4 ++-- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index fe4fd71272..4ab8b35e6b 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -54,13 +54,13 @@ class RoomPermissionsTestCase(RestTestCase): hs.get_handlers().federation_handler = Mock() - def _get_user_by_access_token(token=None, allow_guest=False): + def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.auth_user_id), "token_id": 1, "is_guest": False, } - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token + hs.get_v1auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -419,13 +419,13 @@ class RoomsMemberListTestCase(RestTestCase): self.auth_user_id = self.user_id - def _get_user_by_access_token(token=None, allow_guest=False): + def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.auth_user_id), "token_id": 1, "is_guest": False, } - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token + hs.get_v1auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -501,13 +501,13 @@ class RoomsCreateTestCase(RestTestCase): hs.get_handlers().federation_handler = Mock() - def _get_user_by_access_token(token=None, allow_guest=False): + def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.auth_user_id), "token_id": 1, "is_guest": False, } - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token + hs.get_v1auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -593,14 +593,14 @@ class RoomTopicTestCase(RestTestCase): hs.get_handlers().federation_handler = Mock() - def _get_user_by_access_token(token=None, allow_guest=False): + def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.auth_user_id), "token_id": 1, "is_guest": False, } - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token + hs.get_v1auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -708,13 +708,13 @@ class RoomMemberStateTestCase(RestTestCase): hs.get_handlers().federation_handler = Mock() - def _get_user_by_access_token(token=None, allow_guest=False): + def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.auth_user_id), "token_id": 1, "is_guest": False, } - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token + hs.get_v1auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -840,13 +840,13 @@ class RoomMessagesTestCase(RestTestCase): hs.get_handlers().federation_handler = Mock() - def _get_user_by_access_token(token=None, allow_guest=False): + def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.auth_user_id), "token_id": 1, "is_guest": False, } - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token + hs.get_v1auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -942,13 +942,13 @@ class RoomInitialSyncTestCase(RestTestCase): hs.get_handlers().federation_handler = Mock() - def _get_user_by_access_token(token=None, allow_guest=False): + def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.auth_user_id), "token_id": 1, "is_guest": False, } - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token + hs.get_v1auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) @@ -1014,13 +1014,13 @@ class RoomMessageListTestCase(RestTestCase): hs.get_handlers().federation_handler = Mock() - def _get_user_by_access_token(token=None, allow_guest=False): + def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.auth_user_id), "token_id": 1, "is_guest": False, } - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token + hs.get_v1auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index 16d788ff61..d0037a53ef 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -61,14 +61,14 @@ class RoomTypingTestCase(RestTestCase): hs.get_handlers().federation_handler = Mock() - def _get_user_by_access_token(token=None, allow_guest=False): + def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.auth_user_id), "token_id": 1, "is_guest": False, } - hs.get_v1auth()._get_user_by_access_token = _get_user_by_access_token + hs.get_v1auth().get_user_by_access_token = get_user_by_access_token def _insert_client_ip(*args, **kwargs): return defer.succeed(None) diff --git a/tests/rest/client/v2_alpha/__init__.py b/tests/rest/client/v2_alpha/__init__.py index 84334dce34..5170217d9e 100644 --- a/tests/rest/client/v2_alpha/__init__.py +++ b/tests/rest/client/v2_alpha/__init__.py @@ -43,13 +43,13 @@ class V2AlphaRestTestCase(unittest.TestCase): resource_for_federation=self.mock_resource, ) - def _get_user_by_access_token(token=None, allow_guest=False): + def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.USER_ID), "token_id": 1, "is_guest": False, } - hs.get_auth()._get_user_by_access_token = _get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token for r in self.TO_REGISTER: r.register_servlets(hs, self.mock_resource) From 239badea9be1dd7857833408209ef22dd99773de Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 7 Mar 2016 20:13:10 +0000 Subject: [PATCH 287/349] Use syntax that works on both py2.7 and py3 --- synapse/app/homeserver.py | 2 +- synapse/app/synctl.py | 6 +++--- synapse/config/__main__.py | 2 +- synapse/config/_base.py | 2 +- synapse/handlers/federation.py | 2 +- synapse/handlers/register.py | 2 +- synapse/rest/client/v1/login.py | 2 +- synapse/util/caches/expiringcache.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 021dc1d610..fcdc8e6e10 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -722,7 +722,7 @@ def run(hs): if hs.config.daemonize: if hs.config.print_pidfile: - print hs.config.pid_file + print (hs.config.pid_file) daemon = Daemonize( app="synapse-homeserver", diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py index 9249e36d82..ab3a31d7b7 100755 --- a/synapse/app/synctl.py +++ b/synapse/app/synctl.py @@ -29,13 +29,13 @@ NORMAL = "\x1b[m" def start(configfile): - print "Starting ...", + print ("Starting ...") args = SYNAPSE args.extend(["--daemonize", "-c", configfile]) try: subprocess.check_call(args) - print GREEN + "started" + NORMAL + print (GREEN + "started" + NORMAL) except subprocess.CalledProcessError as e: print ( RED + @@ -48,7 +48,7 @@ def stop(pidfile): if os.path.exists(pidfile): pid = int(open(pidfile).read()) os.kill(pid, signal.SIGTERM) - print GREEN + "stopped" + NORMAL + print (GREEN + "stopped" + NORMAL) def main(): diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py index 0a3b70e11f..58c97a70af 100644 --- a/synapse/config/__main__.py +++ b/synapse/config/__main__.py @@ -28,7 +28,7 @@ if __name__ == "__main__": sys.stderr.write("\n" + e.message + "\n") sys.exit(1) - print getattr(config, key) + print (getattr(config, key)) sys.exit(0) else: sys.stderr.write("Unknown command %r\n" % (action,)) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 15d78ff33a..7449f36491 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -104,7 +104,7 @@ class Config(object): dir_path = cls.abspath(dir_path) try: os.makedirs(dir_path) - except OSError, e: + except OSError as e: if e.errno != errno.EEXIST: raise if not os.path.isdir(dir_path): diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6e50b0963e..27f2b40bfe 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -472,7 +472,7 @@ class FederationHandler(BaseHandler): limit=100, extremities=[e for e in extremities.keys()] ) - except SynapseError: + except SynapseError as e: logger.info( "Failed to backfill from %s because %s", dom, e, diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c5e5b28811..e2ace6a4e5 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -241,7 +241,7 @@ class RegistrationHandler(BaseHandler): password_hash=None ) yield registered_user(self.distributor, user) - except Exception, e: + except Exception as e: yield self.store.add_access_token_to_user(user_id, token) # Ignore Registration errors logger.exception(e) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index f13272da8e..c14e8af00e 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -252,7 +252,7 @@ class SAML2RestServlet(ClientV1RestServlet): SP = Saml2Client(conf) saml2_auth = SP.parse_authn_request_response( request.args['SAMLResponse'][0], BINDING_HTTP_POST) - except Exception, e: # Not authenticated + except Exception as e: # Not authenticated logger.exception(e) if saml2_auth and saml2_auth.status_ok() and not saml2_auth.not_signed: username = saml2_auth.name_id.text diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index e863a8f8a9..2b68c1ac93 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -69,7 +69,7 @@ class ExpiringCache(object): if self._max_len and len(self._cache.keys()) > self._max_len: sorted_entries = sorted( self._cache.items(), - key=lambda (k, v): v.time, + key=lambda item: item[1].time, ) for k, _ in sorted_entries[self._max_len:]: From 7bcee4733a05b239161d73461abd4d0e32caf4ac Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 8 Mar 2016 10:04:38 +0000 Subject: [PATCH 288/349] Encode unicode objects given to post_urlencode* otherwise urllib.urlencode chokes. --- synapse/http/client.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index fdd90b1c3c..b3cd5ff26a 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -103,7 +103,7 @@ class SimpleHttpClient(object): # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) - query_bytes = urllib.urlencode(args, True) + query_bytes = urllib.urlencode(encode_urlencode_args(args), True) response = yield self.request( "POST", @@ -249,7 +249,7 @@ class CaptchaServerHttpClient(SimpleHttpClient): @defer.inlineCallbacks def post_urlencoded_get_raw(self, url, args={}): - query_bytes = urllib.urlencode(args, True) + query_bytes = urllib.urlencode(encode_urlencode_args(args), True) response = yield self.request( "POST", @@ -268,6 +268,16 @@ class CaptchaServerHttpClient(SimpleHttpClient): # twisted dislikes google's response, no content length. defer.returnValue(e.response) +def encode_urlencode_args(args): + return { k: encode_urlencode_arg(v) for k, v in args.items() } + +def encode_urlencode_arg(arg): + if isinstance(arg, unicode): + return arg.encode('utf-8') + elif isinstance(arg, list): + return [ encode_urlencode_arg(i) for i in arg ] + else: + return arg def _print_ex(e): if hasattr(e, "reasons") and e.reasons: From 9a3c80a348b850a04e257b30ce41f0a7c453594b Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 8 Mar 2016 10:09:07 +0000 Subject: [PATCH 289/349] pep8 --- synapse/http/client.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index b3cd5ff26a..cbd45b2bbe 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -268,17 +268,20 @@ class CaptchaServerHttpClient(SimpleHttpClient): # twisted dislikes google's response, no content length. defer.returnValue(e.response) + def encode_urlencode_args(args): - return { k: encode_urlencode_arg(v) for k, v in args.items() } + return {k: encode_urlencode_arg(v) for k, v in args.items()} + def encode_urlencode_arg(arg): if isinstance(arg, unicode): return arg.encode('utf-8') elif isinstance(arg, list): - return [ encode_urlencode_arg(i) for i in arg ] + return [encode_urlencode_arg(i) for i in arg] else: return arg + def _print_ex(e): if hasattr(e, "reasons") and e.reasons: for ex in e.reasons: From 7076082ae677b280c5b68df37b1fee2fc72752ff Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 8 Mar 2016 11:45:50 +0000 Subject: [PATCH 290/349] Fix relative imports so they work in both py3 and py27 --- synapse/push/__init__.py | 4 ++-- synapse/push/action_generator.py | 4 ++-- synapse/push/bulk_push_rule_evaluator.py | 6 +++--- synapse/push/push_rule_evaluator.py | 4 ++-- synapse/push/pusherpool.py | 2 +- synapse/rest/client/v1/admin.py | 2 +- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/login.py | 2 +- synapse/rest/client/v1/register.py | 2 +- synapse/rest/client/v1/room.py | 2 +- synapse/rest/client/v1/voip.py | 2 +- synapse/storage/__init__.py | 2 +- synapse/storage/end_to_end_keys.py | 2 +- synapse/storage/events.py | 2 +- synapse/storage/keys.py | 2 +- synapse/storage/media_repository.py | 2 +- synapse/storage/signatures.py | 2 +- 17 files changed, 22 insertions(+), 22 deletions(-) diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 4c6c3b83a2..65ef1b68a3 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -21,7 +21,7 @@ from synapse.util.logcontext import LoggingContext from synapse.util.metrics import Measure import synapse.util.async -import push_rule_evaluator as push_rule_evaluator +from .push_rule_evaluator import evaluator_for_user_id import logging import random @@ -185,7 +185,7 @@ class Pusher(object): processed = False rule_evaluator = yield \ - push_rule_evaluator.evaluator_for_user_id( + evaluator_for_user_id( self.user_id, single_event['room_id'], self.store ) diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index c6c1dc769e..84efcdd184 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -15,7 +15,7 @@ from twisted.internet import defer -import bulk_push_rule_evaluator +from .bulk_push_rule_evaluator import evaluator_for_room_id import logging @@ -35,7 +35,7 @@ class ActionGenerator: @defer.inlineCallbacks def handle_push_actions_for_event(self, event, context, handler): - bulk_evaluator = yield bulk_push_rule_evaluator.evaluator_for_room_id( + bulk_evaluator = yield evaluator_for_room_id( event.room_id, self.hs, self.store ) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 5d8be483e5..87d5061fb0 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -18,8 +18,8 @@ import ujson as json from twisted.internet import defer -import baserules -from push_rule_evaluator import PushRuleEvaluatorForEvent +from .baserules import list_with_base_rules +from .push_rule_evaluator import PushRuleEvaluatorForEvent from synapse.api.constants import EventTypes @@ -39,7 +39,7 @@ def _get_rules(room_id, user_ids, store): rules_enabled_by_user = yield store.bulk_get_push_rules_enabled(user_ids) rules_by_user = { - uid: baserules.list_with_base_rules([ + uid: list_with_base_rules([ decode_rule_json(rule_list) for rule_list in rules_by_user.get(uid, []) ]) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 98e2a2015e..51f73a5b78 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -15,7 +15,7 @@ from twisted.internet import defer -import baserules +from .baserules import list_with_base_rules import logging import simplejson as json @@ -91,7 +91,7 @@ class PushRuleEvaluator: rule['actions'] = json.loads(raw_rule['actions']) rules.append(rule) - self.rules = baserules.list_with_base_rules(rules) + self.rules = list_with_base_rules(rules) self.enabled_map = enabled_map diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index a05aa5f661..772a095f8b 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -16,7 +16,7 @@ from twisted.internet import defer -from httppusher import HttpPusher +from .httppusher import HttpPusher from synapse.push import PusherConfigException from synapse.util.logcontext import preserve_fn diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index e2f5eb7b29..aa05b3f023 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError from synapse.types import UserID -from base import ClientV1RestServlet, client_path_patterns +from .base import ClientV1RestServlet, client_path_patterns import logging diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index ad161bdbab..36c3520567 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.streams.config import PaginationConfig -from base import ClientV1RestServlet, client_path_patterns +from .base import ClientV1RestServlet, client_path_patterns # TODO: Needs unit testing diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index c14e8af00e..f6902a60a8 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError, LoginError, Codes from synapse.types import UserID from synapse.http.server import finish_request -from base import ClientV1RestServlet, client_path_patterns +from .base import ClientV1RestServlet, client_path_patterns import simplejson as json import urllib diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 6d6d03c34c..040a7a7ffa 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, Codes from synapse.api.constants import LoginType -from base import ClientV1RestServlet, client_path_patterns +from .base import ClientV1RestServlet, client_path_patterns import synapse.util.stringutils as stringutils from synapse.util.async import run_on_reactor diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index cbf3673eff..4b7d198c52 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -16,7 +16,7 @@ """ This module contains REST servlets to do with rooms: /rooms/ """ from twisted.internet import defer -from base import ClientV1RestServlet, client_path_patterns +from .base import ClientV1RestServlet, client_path_patterns from synapse.api.errors import SynapseError, Codes, AuthError from synapse.streams.config import PaginationConfig from synapse.api.constants import EventTypes, Membership diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index ec4cf8db79..c40442f958 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -15,7 +15,7 @@ from twisted.internet import defer -from base import ClientV1RestServlet, client_path_patterns +from .base import ClientV1RestServlet, client_path_patterns import hmac diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 6f37a85d09..168eb27b03 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -45,7 +45,7 @@ from .search import SearchStore from .tags import TagsStore from .account_data import AccountDataStore -from util.id_generators import IdGenerator, StreamIdGenerator, ChainedIdGenerator +from .util.id_generators import IdGenerator, StreamIdGenerator, ChainedIdGenerator from synapse.api.constants import PresenceState from synapse.util.caches.stream_change_cache import StreamChangeCache diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 5dd32b1413..2e89066515 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from _base import SQLBaseStore +from ._base import SQLBaseStore class EndToEndKeyStore(SQLBaseStore): diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 60936500d8..552e7ca35b 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from _base import SQLBaseStore, _RollbackButIsFineException +from ._base import SQLBaseStore, _RollbackButIsFineException from twisted.internet import defer, reactor diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index fd05bfe54e..a495a8a7d9 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from _base import SQLBaseStore +from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks from twisted.internet import defer diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py index 0894384780..9d3ba32478 100644 --- a/synapse/storage/media_repository.py +++ b/synapse/storage/media_repository.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from _base import SQLBaseStore +from ._base import SQLBaseStore class MediaRepositoryStore(SQLBaseStore): diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py index 70c6a06cd1..b10f2a5787 100644 --- a/synapse/storage/signatures.py +++ b/synapse/storage/signatures.py @@ -15,7 +15,7 @@ from twisted.internet import defer -from _base import SQLBaseStore +from ._base import SQLBaseStore from unpaddedbase64 import encode_base64 from synapse.crypto.event_signing import compute_event_reference_hash From edca2d989137680093c4acdcaf6ca4029f11a335 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Tue, 8 Mar 2016 17:32:29 +0000 Subject: [PATCH 291/349] Idempotent-ise schema update script If any ASes don't have an ID, the schema will fail, and then it will error when trying to add the column again. --- synapse/storage/schema/delta/30/as_users.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py index 4cf4dd0917..4da3c59de2 100644 --- a/synapse/storage/schema/delta/30/as_users.py +++ b/synapse/storage/schema/delta/30/as_users.py @@ -20,7 +20,11 @@ logger = logging.getLogger(__name__) def run_upgrade(cur, database_engine, config, *args, **kwargs): # NULL indicates user was not registered by an appservice. - cur.execute("ALTER TABLE users ADD COLUMN appservice_id TEXT") + try: + cur.execute("ALTER TABLE users ADD COLUMN appservice_id TEXT") + except: + # Maybe we already added the column? Hope so... + pass cur.execute("SELECT name FROM users") rows = cur.fetchall() From 1748d4b739824dfee92a8ea65a0cd95839af8f25 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 8 Mar 2016 23:37:12 +0000 Subject: [PATCH 292/349] Use sytest build scripts, rather than reinventing the wheel --- jenkins-postgres.sh | 44 +++++--------------------------------------- jenkins-sqlite.sh | 15 ++------------- 2 files changed, 7 insertions(+), 52 deletions(-) diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index d1fed590a2..e9f22cc04b 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -24,9 +24,10 @@ rm .coverage* || echo "No coverage files to remove" tox --notest -: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"} - TOX_BIN=$WORKSPACE/.tox/py27/bin +$TOX_BIN/pip install psycopg2 + +: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"} if [[ ! -e .sytest-base ]]; then git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror @@ -40,47 +41,12 @@ cd sytest git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) -: ${PERL5LIB:=$WORKSPACE/perl5/lib/perl5} -: ${PERL_MB_OPT:=--install_base=$WORKSPACE/perl5} -: ${PERL_MM_OPT:=INSTALL_BASE=$WORKSPACE/perl5} -export PERL5LIB PERL_MB_OPT PERL_MM_OPT - -./install-deps.pl - : ${PORT_BASE:=8000} +./jenkins/prep_sytest_for_postgres.sh -if [[ -z "$POSTGRES_DB_1" ]]; then - echo >&2 "Variable POSTGRES_DB_1 not set" - exit 1 -fi - -if [[ -z "$POSTGRES_DB_2" ]]; then - echo >&2 "Variable POSTGRES_DB_2 not set" - exit 1 -fi - -mkdir -p "localhost-$(($PORT_BASE + 1))" -mkdir -p "localhost-$(($PORT_BASE + 2))" - -cat > localhost-$(($PORT_BASE + 1))/database.yaml << EOF -name: psycopg2 -args: - database: $POSTGRES_DB_1 -EOF - -cat > localhost-$(($PORT_BASE + 2))/database.yaml << EOF -name: psycopg2 -args: - database: $POSTGRES_DB_2 -EOF - - -# Run if both postgresql databases exist echo >&2 "Running sytest with PostgreSQL"; -$TOX_BIN/pip install psycopg2 -./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \ - --python $TOX_BIN/python --all --port-base $PORT_BASE > results-postgresql.tap +./jenkins/install_and_run.sh --python $TOX_BIN/python --port-base $PORT_BASE cd .. cp sytest/.coverage.* . diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index 57fd8de54d..a68f491cab 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -23,11 +23,10 @@ export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished w rm .coverage* || echo "No coverage files to remove" tox --notest +TOX_BIN=$WORKSPACE/.tox/py27/bin : ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"} -TOX_BIN=$WORKSPACE/.tox/py27/bin - if [[ ! -e .sytest-base ]]; then git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror else @@ -40,18 +39,8 @@ cd sytest git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) -: ${PERL5LIB:=$WORKSPACE/perl5/lib/perl5} -: ${PERL_MB_OPT:=--install_base=$WORKSPACE/perl5} -: ${PERL_MM_OPT:=INSTALL_BASE=$WORKSPACE/perl5} -export PERL5LIB PERL_MB_OPT PERL_MM_OPT - -./install-deps.pl - : ${PORT_BASE:=8500} - -echo >&2 "Running sytest with SQLite3"; -./run-tests.pl --coverage -O tap --synapse-directory $WORKSPACE \ - --python $TOX_BIN/python --all --port-base $PORT_BASE > results-sqlite3.tap +./jenkins/install_and_run.sh --python $TOX_BIN/python --port-base $PORT_BASE cd .. cp sytest/.coverage.* . From 866d0e7cb8483789b61040d7cc5ef7155bcfcf62 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 8 Mar 2016 23:47:50 +0000 Subject: [PATCH 293/349] Only build py27 tox env for integ tests --- jenkins-postgres.sh | 2 +- jenkins-sqlite.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index e9f22cc04b..14e60482cd 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -22,7 +22,7 @@ export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished w rm .coverage* || echo "No coverage files to remove" -tox --notest +tox --notest -e py27 TOX_BIN=$WORKSPACE/.tox/py27/bin $TOX_BIN/pip install psycopg2 diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index a68f491cab..9e474aa9f6 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -22,7 +22,7 @@ export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished w rm .coverage* || echo "No coverage files to remove" -tox --notest +tox --notest -e py27 TOX_BIN=$WORKSPACE/.tox/py27/bin : ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"} From 4814e7c9b2b9cc14d2890483747bcb1655579ec5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 Mar 2016 00:06:57 +0000 Subject: [PATCH 294/349] Specify synapse-directory for integ tests --- jenkins-postgres.sh | 4 +++- jenkins-sqlite.sh | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index 14e60482cd..876fabdc71 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -46,7 +46,9 @@ git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling b ./jenkins/prep_sytest_for_postgres.sh echo >&2 "Running sytest with PostgreSQL"; -./jenkins/install_and_run.sh --python $TOX_BIN/python --port-base $PORT_BASE +./jenkins/install_and_run.sh --python $TOX_BIN/python \ + --synapse-directory $WORKSPACE \ + --port-base $PORT_BASE cd .. cp sytest/.coverage.* . diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index 9e474aa9f6..ff6664276d 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -40,7 +40,9 @@ cd sytest git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) : ${PORT_BASE:=8500} -./jenkins/install_and_run.sh --python $TOX_BIN/python --port-base $PORT_BASE +./jenkins/install_and_run.sh --python $TOX_BIN/python \ + --synapse-directory $WORKSPACE \ + --port-base $PORT_BASE cd .. cp sytest/.coverage.* . From ce829c2aefc5e40eacbf6b4bce8a57fe54ae4a1c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 9 Mar 2016 00:14:19 +0000 Subject: [PATCH 295/349] Reinstate coverage checks for integ tests --- jenkins-postgres.sh | 3 ++- jenkins-sqlite.sh | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index 876fabdc71..9ac86d2593 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -46,7 +46,8 @@ git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling b ./jenkins/prep_sytest_for_postgres.sh echo >&2 "Running sytest with PostgreSQL"; -./jenkins/install_and_run.sh --python $TOX_BIN/python \ +./jenkins/install_and_run.sh --coverage \ + --python $TOX_BIN/python \ --synapse-directory $WORKSPACE \ --port-base $PORT_BASE diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index ff6664276d..345d01936c 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -40,7 +40,8 @@ cd sytest git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) : ${PORT_BASE:=8500} -./jenkins/install_and_run.sh --python $TOX_BIN/python \ +./jenkins/install_and_run.sh --coverage \ + --python $TOX_BIN/python \ --synapse-directory $WORKSPACE \ --port-base $PORT_BASE From 158a322e8274b8ed031a20a00a3700d0798ae1c2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Mar 2016 10:20:48 +0000 Subject: [PATCH 296/349] Ensure integer is an integer --- synapse/storage/util/id_generators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index af425ba9a4..610ddad423 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -34,7 +34,7 @@ def _load_max_id(db_conn, table, column): cur.execute("SELECT MAX(%s) FROM %s" % (column, table,)) val, = cur.fetchone() cur.close() - return val if val else 1 + return int(val) if val else 1 class StreamIdGenerator(object): From b7dbe5147a85bea8f14b78a27ff499fe5a0d444a Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 9 Mar 2016 11:26:26 +0000 Subject: [PATCH 297/349] Add a parse_json_object function to deduplicate all the copy+pasted _parse_json functions. Also document the parse_.* functions. --- synapse/http/servlet.py | 70 ++++++++++++++++++-- synapse/rest/client/v1/directory.py | 16 +---- synapse/rest/client/v1/login.py | 15 +---- synapse/rest/client/v1/push_rule.py | 16 +---- synapse/rest/client/v1/pusher.py | 17 +---- synapse/rest/client/v1/register.py | 14 +--- synapse/rest/client/v1/room.py | 26 +++----- synapse/rest/client/v2_alpha/_base.py | 22 ------ synapse/rest/client/v2_alpha/account.py | 8 +-- synapse/rest/client/v2_alpha/register.py | 8 +-- synapse/rest/client/v2_alpha/tokenrefresh.py | 6 +- 11 files changed, 97 insertions(+), 121 deletions(-) diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 7bd87940b4..41c519c000 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -15,14 +15,27 @@ """ This module contains base REST classes for constructing REST servlets. """ -from synapse.api.errors import SynapseError +from synapse.api.errors import SynapseError, Codes import logging +import simplejson logger = logging.getLogger(__name__) def parse_integer(request, name, default=None, required=False): + """Parse an integer parameter from the request string + + :param request: the twisted HTTP request. + :param name (str): the name of the query parameter. + :param default: value to use if the parameter is absent, defaults to None. + :param required (bool): whether to raise a 400 SynapseError if the + parameter is absent, defaults to False. + :return: An int value or the default. + :raises + SynapseError if the parameter is absent and required, or if the + parameter is present and not an integer. + """ if name in request.args: try: return int(request.args[name][0]) @@ -32,12 +45,25 @@ def parse_integer(request, name, default=None, required=False): else: if required: message = "Missing integer query parameter %r" % (name,) - raise SynapseError(400, message) + raise SynapseError(400, message, errcode=Codes.MISSING_PARAM) else: return default def parse_boolean(request, name, default=None, required=False): + """Parse a boolean parameter from the request query string + + :param request: the twisted HTTP request. + :param name (str): the name of the query parameter. + :param default: value to use if the parameter is absent, defaults to None. + :param required (bool): whether to raise a 400 SynapseError if the + parameter is absent, defaults to False. + :return: A bool value or the default. + :raises + SynapseError if the parameter is absent and required, or if the + parameter is present and not one of "true" or "false". + """ + if name in request.args: try: return { @@ -53,30 +79,64 @@ def parse_boolean(request, name, default=None, required=False): else: if required: message = "Missing boolean query parameter %r" % (name,) - raise SynapseError(400, message) + raise SynapseError(400, message, errcode=Codes.MISSING_PARAM) else: return default def parse_string(request, name, default=None, required=False, allowed_values=None, param_type="string"): + """Parse a string parameter from the request query string. + + :param request: the twisted HTTP request. + :param name (str): the name of the query parameter. + :param default: value to use if the parameter is absent, defaults to None. + :param required (bool): whether to raise a 400 SynapseError if the + parameter is absent, defaults to False. + :param allowed_values (list): List of allowed values for the string, + or None if any value is allowed, defaults to None + :return: A string value or the default. + :raises + SynapseError if the parameter is absent and required, or if the + parameter is present, must be one of a list of allowed values and + is not one of those allowed values. + """ + if name in request.args: value = request.args[name][0] if allowed_values is not None and value not in allowed_values: message = "Query parameter %r must be one of [%s]" % ( name, ", ".join(repr(v) for v in allowed_values) ) - raise SynapseError(message) + raise SynapseError(400, message) else: return value else: if required: message = "Missing %s query parameter %r" % (param_type, name) - raise SynapseError(400, message) + raise SynapseError(400, message, errcode=Codes.MISSING_PARAM) else: return default +def parse_json_object_from_request(request): + """Parse a JSON object from the body of a twisted HTTP request. + + :param request: the twisted HTTP request. + :raises + SynapseError if the request body couldn't be decoded as JSON or + if it wasn't a JSON object. + """ + try: + content = simplejson.loads(request.content.read()) + if type(content) != dict: + message = "Content must be a JSON object." + raise SynapseError(400, message, errcode=Codes.BAD_JSON) + return content + except simplejson.JSONDecodeError: + raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) + + class RestServlet(object): """ A Synapse REST Servlet. diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 8bfe9fdea8..60c5ec77aa 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -18,9 +18,10 @@ from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError, Codes from synapse.types import RoomAlias +from synapse.http.servlet import parse_json_object_from_request + from .base import ClientV1RestServlet, client_path_patterns -import simplejson as json import logging @@ -45,7 +46,7 @@ class ClientDirectoryServer(ClientV1RestServlet): @defer.inlineCallbacks def on_PUT(self, request, room_alias): - content = _parse_json(request) + content = parse_json_object_from_request(request) if "room_id" not in content: raise SynapseError(400, "Missing room_id key", errcode=Codes.BAD_JSON) @@ -135,14 +136,3 @@ class ClientDirectoryServer(ClientV1RestServlet): ) defer.returnValue((200, {})) - - -def _parse_json(request): - try: - content = json.loads(request.content.read()) - if type(content) != dict: - raise SynapseError(400, "Content must be a JSON object.", - errcode=Codes.NOT_JSON) - return content - except ValueError: - raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index f6902a60a8..fe593d07ce 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, LoginError, Codes from synapse.types import UserID from synapse.http.server import finish_request +from synapse.http.servlet import parse_json_object_from_request from .base import ClientV1RestServlet, client_path_patterns @@ -79,7 +80,7 @@ class LoginRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request): - login_submission = _parse_json(request) + login_submission = parse_json_object_from_request(request) try: if login_submission["type"] == LoginRestServlet.PASS_TYPE: if not self.password_enabled: @@ -400,18 +401,6 @@ class CasTicketServlet(ClientV1RestServlet): return (user, attributes) -def _parse_json(request): - try: - content = json.loads(request.content.read()) - if type(content) != dict: - raise SynapseError( - 400, "Content must be a JSON object.", errcode=Codes.BAD_JSON - ) - return content - except ValueError: - raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) - - def register_servlets(hs, http_server): LoginRestServlet(hs).register(http_server) if hs.config.saml2_enabled: diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 981d7708db..b5695d427a 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.api.errors import ( - SynapseError, Codes, UnrecognizedRequestError, NotFoundError, StoreError + SynapseError, UnrecognizedRequestError, NotFoundError, StoreError ) from .base import ClientV1RestServlet, client_path_patterns from synapse.storage.push_rule import ( @@ -25,8 +25,7 @@ from synapse.storage.push_rule import ( from synapse.push.clientformat import format_push_rules_for_user from synapse.push.baserules import BASE_RULE_IDS from synapse.push.rulekinds import PRIORITY_CLASS_MAP - -import simplejson as json +from synapse.http.servlet import parse_json_object_from_request class PushRuleRestServlet(ClientV1RestServlet): @@ -52,7 +51,7 @@ class PushRuleRestServlet(ClientV1RestServlet): if '/' in spec['rule_id'] or '\\' in spec['rule_id']: raise SynapseError(400, "rule_id may not contain slashes") - content = _parse_json(request) + content = parse_json_object_from_request(request) user_id = requester.user.to_string() @@ -341,14 +340,5 @@ class InvalidRuleException(Exception): pass -# XXX: C+ped from rest/room.py - surely this should be common? -def _parse_json(request): - try: - content = json.loads(request.content.read()) - return content - except ValueError: - raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) - - def register_servlets(hs, http_server): PushRuleRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 4c662e6e3c..ee029b4f77 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -17,9 +17,10 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, Codes from synapse.push import PusherConfigException +from synapse.http.servlet import parse_json_object_from_request + from .base import ClientV1RestServlet, client_path_patterns -import simplejson as json import logging logger = logging.getLogger(__name__) @@ -33,7 +34,7 @@ class PusherRestServlet(ClientV1RestServlet): requester = yield self.auth.get_user_by_req(request) user = requester.user - content = _parse_json(request) + content = parse_json_object_from_request(request) pusher_pool = self.hs.get_pusherpool() @@ -92,17 +93,5 @@ class PusherRestServlet(ClientV1RestServlet): return 200, {} -# XXX: C+ped from rest/room.py - surely this should be common? -def _parse_json(request): - try: - content = json.loads(request.content.read()) - if type(content) != dict: - raise SynapseError(400, "Content must be a JSON object.", - errcode=Codes.NOT_JSON) - return content - except ValueError: - raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) - - def register_servlets(hs, http_server): PusherRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 040a7a7ffa..c6a2ef2ccc 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -20,12 +20,12 @@ from synapse.api.errors import SynapseError, Codes from synapse.api.constants import LoginType from .base import ClientV1RestServlet, client_path_patterns import synapse.util.stringutils as stringutils +from synapse.http.servlet import parse_json_object_from_request from synapse.util.async import run_on_reactor from hashlib import sha1 import hmac -import simplejson as json import logging logger = logging.getLogger(__name__) @@ -98,7 +98,7 @@ class RegisterRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request): - register_json = _parse_json(request) + register_json = parse_json_object_from_request(request) session = (register_json["session"] if "session" in register_json else None) @@ -355,15 +355,5 @@ class RegisterRestServlet(ClientV1RestServlet): ) -def _parse_json(request): - try: - content = json.loads(request.content.read()) - if type(content) != dict: - raise SynapseError(400, "Content must be a JSON object.") - return content - except ValueError: - raise SynapseError(400, "Content not JSON.") - - def register_servlets(hs, http_server): RegisterRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 4b7d198c52..7a9f3d11b9 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -22,6 +22,7 @@ from synapse.streams.config import PaginationConfig from synapse.api.constants import EventTypes, Membership from synapse.types import UserID, RoomID, RoomAlias from synapse.events.utils import serialize_event +from synapse.http.servlet import parse_json_object_from_request import simplejson as json import logging @@ -137,7 +138,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet): def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): requester = yield self.auth.get_user_by_req(request) - content = _parse_json(request) + content = parse_json_object_from_request(request) event_dict = { "type": event_type, @@ -179,7 +180,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request, room_id, event_type, txn_id=None): requester = yield self.auth.get_user_by_req(request, allow_guest=True) - content = _parse_json(request) + content = parse_json_object_from_request(request) msg_handler = self.handlers.message_handler event = yield msg_handler.create_and_send_nonmember_event( @@ -229,7 +230,7 @@ class JoinRoomAliasServlet(ClientV1RestServlet): ) try: - content = _parse_json(request) + content = parse_json_object_from_request(request) except: # Turns out we used to ignore the body entirely, and some clients # cheekily send invalid bodies. @@ -433,7 +434,7 @@ class RoomMembershipRestServlet(ClientV1RestServlet): raise AuthError(403, "Guest access not allowed") try: - content = _parse_json(request) + content = parse_json_object_from_request(request) except: # Turns out we used to ignore the body entirely, and some clients # cheekily send invalid bodies. @@ -500,7 +501,7 @@ class RoomRedactEventRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request, room_id, event_id, txn_id=None): requester = yield self.auth.get_user_by_req(request) - content = _parse_json(request) + content = parse_json_object_from_request(request) msg_handler = self.handlers.message_handler event = yield msg_handler.create_and_send_nonmember_event( @@ -548,7 +549,7 @@ class RoomTypingRestServlet(ClientV1RestServlet): room_id = urllib.unquote(room_id) target_user = UserID.from_string(urllib.unquote(user_id)) - content = _parse_json(request) + content = parse_json_object_from_request(request) typing_handler = self.handlers.typing_notification_handler @@ -580,7 +581,7 @@ class SearchRestServlet(ClientV1RestServlet): def on_POST(self, request): requester = yield self.auth.get_user_by_req(request) - content = _parse_json(request) + content = parse_json_object_from_request(request) batch = request.args.get("next_batch", [None])[0] results = yield self.handlers.search_handler.search( @@ -592,17 +593,6 @@ class SearchRestServlet(ClientV1RestServlet): defer.returnValue((200, results)) -def _parse_json(request): - try: - content = json.loads(request.content.read()) - if type(content) != dict: - raise SynapseError(400, "Content must be a JSON object.", - errcode=Codes.NOT_JSON) - return content - except ValueError: - raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) - - def register_txn_path(servlet, regex_string, http_server, with_get=False): """Registers a transaction-based path. diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index 24af322126..b6faa2b0e6 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -17,11 +17,9 @@ """ from synapse.api.urls import CLIENT_V2_ALPHA_PREFIX -from synapse.api.errors import SynapseError import re import logging -import simplejson logger = logging.getLogger(__name__) @@ -44,23 +42,3 @@ def client_v2_patterns(path_regex, releases=(0,)): new_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/r%d" % release) patterns.append(re.compile("^" + new_prefix + path_regex)) return patterns - - -def parse_request_allow_empty(request): - content = request.content.read() - if content is None or content == '': - return None - try: - return simplejson.loads(content) - except simplejson.JSONDecodeError: - raise SynapseError(400, "Content not JSON.") - - -def parse_json_dict_from_request(request): - try: - content = simplejson.loads(request.content.read()) - if type(content) != dict: - raise SynapseError(400, "Content must be a JSON object.") - return content - except simplejson.JSONDecodeError: - raise SynapseError(400, "Content not JSON.") diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index a614b79d45..688b051580 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -17,10 +17,10 @@ from twisted.internet import defer from synapse.api.constants import LoginType from synapse.api.errors import LoginError, SynapseError, Codes -from synapse.http.servlet import RestServlet +from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.util.async import run_on_reactor -from ._base import client_v2_patterns, parse_json_dict_from_request +from ._base import client_v2_patterns import logging @@ -41,7 +41,7 @@ class PasswordRestServlet(RestServlet): def on_POST(self, request): yield run_on_reactor() - body = parse_json_dict_from_request(request) + body = parse_json_object_from_request(request) authed, result, params = yield self.auth_handler.check_auth([ [LoginType.PASSWORD], @@ -114,7 +114,7 @@ class ThreepidRestServlet(RestServlet): def on_POST(self, request): yield run_on_reactor() - body = parse_json_dict_from_request(request) + body = parse_json_object_from_request(request) threePidCreds = body.get('threePidCreds') threePidCreds = body.get('three_pid_creds', threePidCreds) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index ec5c21fa1f..b090e66bcf 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -17,9 +17,9 @@ from twisted.internet import defer from synapse.api.constants import LoginType from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError -from synapse.http.servlet import RestServlet +from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns, parse_json_dict_from_request +from ._base import client_v2_patterns import logging import hmac @@ -73,7 +73,7 @@ class RegisterRestServlet(RestServlet): ret = yield self.onEmailTokenRequest(request) defer.returnValue(ret) - body = parse_json_dict_from_request(request) + body = parse_json_object_from_request(request) # we do basic sanity checks here because the auth layer will store these # in sessions. Pull out the username/password provided to us. @@ -236,7 +236,7 @@ class RegisterRestServlet(RestServlet): @defer.inlineCallbacks def onEmailTokenRequest(self, request): - body = parse_json_dict_from_request(request) + body = parse_json_object_from_request(request) required = ['id_server', 'client_secret', 'email', 'send_attempt'] absent = [] diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py index 3553f6b040..a158c2209a 100644 --- a/synapse/rest/client/v2_alpha/tokenrefresh.py +++ b/synapse/rest/client/v2_alpha/tokenrefresh.py @@ -16,9 +16,9 @@ from twisted.internet import defer from synapse.api.errors import AuthError, StoreError, SynapseError -from synapse.http.servlet import RestServlet +from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns, parse_json_dict_from_request +from ._base import client_v2_patterns class TokenRefreshRestServlet(RestServlet): @@ -35,7 +35,7 @@ class TokenRefreshRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request): - body = parse_json_dict_from_request(request) + body = parse_json_object_from_request(request) try: old_refresh_token = body["refresh_token"] auth_handler = self.hs.get_handlers().auth_handler From 07cf96ebf7c806ba570d167e200e54e07034f5ce Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Mar 2016 11:54:56 +0000 Subject: [PATCH 298/349] Pin pysaml2 version to 3.x This is due to the fact that `from saml2 import config` fails in version 4.x --- synapse/python_dependencies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 35933324a4..0a6043ae8d 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -34,7 +34,7 @@ REQUIREMENTS = { "pydenticon": ["pydenticon"], "ujson": ["ujson"], "blist": ["blist"], - "pysaml2": ["saml2"], + "pysaml2>=3.0.0,<4.0.0": ["saml2>=3.0.0,<4.0.0"], "pymacaroons-pynacl": ["pymacaroons"], } CONDITIONAL_REQUIREMENTS = { From 3ecaabc7fd8a1a461c319248a94617cf24dd4070 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Mar 2016 15:45:34 +0000 Subject: [PATCH 299/349] Use topological orders for initial sync timeline --- synapse/storage/stream.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 8908d5b5da..613b54cd1d 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -217,8 +217,8 @@ class StreamStore(SQLBaseStore): " room_id = ?" " AND not outlier" " AND stream_ordering <= ?" - " ORDER BY stream_ordering %s LIMIT ?" - ) % (order,) + " ORDER BY topological_ordering %s, stream_ordering %s LIMIT ?" + ) % (order, order,) txn.execute(sql, (room_id, to_id, limit)) rows = self.cursor_to_dict(txn) From af2fe6110c6e249ce0c679ca39bb67d2c16c59c3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Mar 2016 16:11:53 +0000 Subject: [PATCH 300/349] Return the correct token form --- synapse/storage/stream.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 613b54cd1d..c9f70a08a7 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -232,7 +232,7 @@ class StreamStore(SQLBaseStore): get_prev_content=True ) - self._set_before_and_after(ret, rows, topo_order=False) + self._set_before_and_after(ret, rows, topo_order=from_id is None) if order.lower() == "desc": ret.reverse() From 8a88684736efcb6792d17249e2976caefe00ee3d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Mar 2016 16:51:22 +0000 Subject: [PATCH 301/349] Add comment --- synapse/storage/stream.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index c9f70a08a7..7f4a827528 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -184,6 +184,9 @@ class StreamStore(SQLBaseStore): @defer.inlineCallbacks def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0, order='DESC'): + # Note: If from_key is None then we return in topological order. This + # is because in that case we're using this as a "get the last few messages + # in a room" function, rather than "get new messages since last sync" if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream else: From 40160e24ab93ca4261df82335ab5521f134e2eda Mon Sep 17 00:00:00 2001 From: blide Date: Thu, 10 Mar 2016 02:08:37 +0300 Subject: [PATCH 302/349] Register endpoint returns refresh_token Guest registration still doesn't return refresh_token --- synapse/rest/client/v2_alpha/register.py | 13 +++++---- tests/rest/client/v2_alpha/test_register.py | 30 ++++++++++++--------- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index b090e66bcf..533ff136eb 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -187,7 +187,7 @@ class RegisterRestServlet(RestServlet): else: logger.info("bind_email not specified: not binding email") - result = self._create_registration_details(user_id, token) + result = yield self._create_registration_details(user_id, token) defer.returnValue((200, result)) def on_OPTIONS(self, _): @@ -198,7 +198,7 @@ class RegisterRestServlet(RestServlet): (user_id, token) = yield self.registration_handler.appservice_register( username, as_token ) - defer.returnValue(self._create_registration_details(user_id, token)) + defer.returnValue((yield self._create_registration_details(user_id, token))) @defer.inlineCallbacks def _do_shared_secret_registration(self, username, password, mac): @@ -225,14 +225,17 @@ class RegisterRestServlet(RestServlet): (user_id, token) = yield self.registration_handler.register( localpart=username, password=password ) - defer.returnValue(self._create_registration_details(user_id, token)) + defer.returnValue((yield self._create_registration_details(user_id, token))) + @defer.inlineCallbacks def _create_registration_details(self, user_id, token): - return { + refresh_token = yield self.auth_handler.issue_refresh_token(user_id) + defer.returnValue({ "user_id": user_id, "access_token": token, "home_server": self.hs.hostname, - } + "refresh_token": refresh_token, + }) @defer.inlineCallbacks def onEmailTokenRequest(self, request): diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index b867599079..e19952b8b6 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -62,12 +62,15 @@ class RegisterRestServletTestCase(unittest.TestCase): self.registration_handler.appservice_register = Mock( return_value=(user_id, token) ) - result = yield self.servlet.on_POST(self.request) - self.assertEquals(result, (200, { - "user_id": user_id, - "access_token": token, - "home_server": self.hs.hostname - })) + (code, result) = yield self.servlet.on_POST(self.request) + self.assertEquals(code, 200) + det_data = { + "user_id": user_id, + "access_token": token, + "home_server": self.hs.hostname + } + self.assertDictContainsSubset(det_data, result) + self.assertIn("refresh_token", result) @defer.inlineCallbacks def test_POST_appservice_registration_invalid(self): @@ -112,12 +115,15 @@ class RegisterRestServletTestCase(unittest.TestCase): }) self.registration_handler.register = Mock(return_value=(user_id, token)) - result = yield self.servlet.on_POST(self.request) - self.assertEquals(result, (200, { - "user_id": user_id, - "access_token": token, - "home_server": self.hs.hostname - })) + (code, result) = yield self.servlet.on_POST(self.request) + self.assertEquals(code, 200) + det_data = { + "user_id": user_id, + "access_token": token, + "home_server": self.hs.hostname + } + self.assertDictContainsSubset(det_data, result) + self.assertIn("refresh_token", result) def test_POST_disabled_registration(self): self.hs.config.enable_registration = False From 1be438f2a653d4c572324095fd7594c006ca98e0 Mon Sep 17 00:00:00 2001 From: blide Date: Thu, 10 Mar 2016 12:13:35 +0300 Subject: [PATCH 303/349] Flake8 fix --- tests/rest/client/v2_alpha/test_register.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index e19952b8b6..9a202e9dd7 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -65,9 +65,9 @@ class RegisterRestServletTestCase(unittest.TestCase): (code, result) = yield self.servlet.on_POST(self.request) self.assertEquals(code, 200) det_data = { - "user_id": user_id, - "access_token": token, - "home_server": self.hs.hostname + "user_id": user_id, + "access_token": token, + "home_server": self.hs.hostname } self.assertDictContainsSubset(det_data, result) self.assertIn("refresh_token", result) @@ -118,9 +118,9 @@ class RegisterRestServletTestCase(unittest.TestCase): (code, result) = yield self.servlet.on_POST(self.request) self.assertEquals(code, 200) det_data = { - "user_id": user_id, - "access_token": token, - "home_server": self.hs.hostname + "user_id": user_id, + "access_token": token, + "home_server": self.hs.hostname } self.assertDictContainsSubset(det_data, result) self.assertIn("refresh_token", result) From 9669a99d1a76f346b2cfb9b4197636ac3142f9d2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 10 Mar 2016 15:12:19 +0000 Subject: [PATCH 304/349] Update users table in a batched manner --- synapse/storage/schema/delta/30/as_users.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py index 4da3c59de2..4f6e9dd540 100644 --- a/synapse/storage/schema/delta/30/as_users.py +++ b/synapse/storage/schema/delta/30/as_users.py @@ -52,12 +52,17 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): " service (IDs %s and %s); assigning arbitrarily to %s" % (user_id, owned[user_id], appservice.id, owned[user_id]) ) - owned[user_id] = appservice.id + owned.setdefault(appservice.id, []).append(user_id) - for user_id, as_id in owned.items(): - cur.execute( - database_engine.convert_param_style( - "UPDATE users SET appservice_id = ? WHERE name = ?" - ), - (as_id, user_id) - ) + for as_id, user_ids in owned.items(): + n = 100 + user_chunks = (user_ids[i:i + 100] for i in xrange(0, len(user_ids), n)) + for chunk in user_chunks: + cur.execute( + database_engine.convert_param_style( + "UPDATE users SET appservice_id = ? WHERE name IN (%s)" % ( + ",".join("?" for _ in chunk), + ) + ), + [as_id] + chunk + ) From 465605d616c991760ce021932f0453fc6bc477ef Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Thu, 10 Mar 2016 15:58:22 +0000 Subject: [PATCH 305/349] Store appservice ID on register --- synapse/handlers/register.py | 5 ++++- synapse/storage/registration.py | 40 ++++++++++++++++++++++++++++----- 2 files changed, 38 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e2ace6a4e5..6ffb8c0da6 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -182,6 +182,8 @@ class RegistrationHandler(BaseHandler): errcode=Codes.EXCLUSIVE ) + service_id = service.id if service.is_exclusive_user(user_id) else None + yield self.check_user_id_not_appservice_exclusive( user_id, allowed_appservice=service ) @@ -190,7 +192,8 @@ class RegistrationHandler(BaseHandler): yield self.store.register( user_id=user_id, token=token, - password_hash="" + password_hash="", + appservice_id=service_id, ) yield registered_user(self.distributor, user) defer.returnValue((user_id, token)) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index ad1157f979..aa49f53458 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -76,7 +76,7 @@ class RegistrationStore(SQLBaseStore): @defer.inlineCallbacks def register(self, user_id, token, password_hash, - was_guest=False, make_guest=False): + was_guest=False, make_guest=False, appservice_id=None): """Attempts to register an account. Args: @@ -87,16 +87,32 @@ class RegistrationStore(SQLBaseStore): upgraded to a non-guest account. make_guest (boolean): True if the the new user should be guest, false to add a regular user account. + appservice_id (str): The ID of the appservice registering the user. Raises: StoreError if the user_id could not be registered. """ yield self.runInteraction( "register", - self._register, user_id, token, password_hash, was_guest, make_guest + self._register, + user_id, + token, + password_hash, + was_guest, + make_guest, + appservice_id ) self.is_guest.invalidate((user_id,)) - def _register(self, txn, user_id, token, password_hash, was_guest, make_guest): + def _register( + self, + txn, + user_id, + token, + password_hash, + was_guest, + make_guest, + appservice_id + ): now = int(self.clock.time()) next_id = self._access_tokens_id_gen.get_next() @@ -111,9 +127,21 @@ class RegistrationStore(SQLBaseStore): [password_hash, now, 1 if make_guest else 0, user_id]) else: txn.execute("INSERT INTO users " - "(name, password_hash, creation_ts, is_guest) " - "VALUES (?,?,?,?)", - [user_id, password_hash, now, 1 if make_guest else 0]) + "(" + " name," + " password_hash," + " creation_ts," + " is_guest," + " appservice_id" + ") " + "VALUES (?,?,?,?,?)", + [ + user_id, + password_hash, + now, + 1 if make_guest else 0, + appservice_id, + ]) except self.database_engine.module.IntegrityError: raise StoreError( 400, "User ID already taken.", errcode=Codes.USER_IN_USE From 2e2be463f87871cf36f5b72e7cf32e8c67caa67f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Mar 2016 10:29:05 +0000 Subject: [PATCH 306/349] Make key client send a Host header --- synapse/crypto/keyclient.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 784d02f122..54b83da9d8 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -36,6 +36,7 @@ def fetch_server_key(server_name, ssl_context_factory, path=KEY_API_V1): factory = SynapseKeyClientFactory() factory.path = path + factory.host = server_name endpoint = matrix_federation_endpoint( reactor, server_name, ssl_context_factory, timeout=30 ) @@ -81,6 +82,8 @@ class SynapseKeyClientProtocol(HTTPClient): self.host = self.transport.getHost() logger.debug("Connected to %s", self.host) self.sendCommand(b"GET", self.path) + if self.host: + self.sendHeader(b"Host", self.host) self.endHeaders() self.timer = reactor.callLater( self.timeout, From aa11db5f119b9fa88242b0df95cfddd00e196ca1 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 11 Mar 2016 13:14:18 +0000 Subject: [PATCH 307/349] Fix cache invalidation so deleting access tokens (which we did when changing password) actually takes effect without HS restart. Reinstate the code to avoid logging out the session that changed the password, removed in 415c2f05491ce65a4fc34326519754cd1edd9c54 --- synapse/handlers/auth.py | 13 ++++++++---- synapse/push/pusherpool.py | 8 +++---- synapse/rest/client/v2_alpha/account.py | 2 +- synapse/storage/registration.py | 28 ++++++++++++++++++------- 4 files changed, 34 insertions(+), 17 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7a4afe446d..a740cc3da3 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -432,13 +432,18 @@ class AuthHandler(BaseHandler): ) @defer.inlineCallbacks - def set_password(self, user_id, newpassword): + def set_password(self, user_id, newpassword, requester=None): password_hash = self.hash(newpassword) + except_access_token_ids = [requester.access_token_id] if requester else [] + yield self.store.user_set_password_hash(user_id, password_hash) - yield self.store.user_delete_access_tokens(user_id) - yield self.hs.get_pusherpool().remove_pushers_by_user(user_id) - yield self.store.flush_user(user_id) + yield self.store.user_delete_access_tokens_except( + user_id, except_access_token_ids + ) + yield self.hs.get_pusherpool().remove_pushers_by_user_except_access_tokens( + user_id, except_access_token_ids + ) @defer.inlineCallbacks def add_threepid(self, user_id, medium, address, validated_at): diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 772a095f8b..28ec94d866 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -92,14 +92,14 @@ class PusherPool: yield self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) @defer.inlineCallbacks - def remove_pushers_by_user(self, user_id): + def remove_pushers_by_user_except_access_tokens(self, user_id, except_token_ids): all = yield self.store.get_all_pushers() logger.info( - "Removing all pushers for user %s", - user_id, + "Removing all pushers for user %s except access tokens ids %r", + user_id, except_token_ids ) for p in all: - if p['user_name'] == user_id: + if p['user_name'] == user_id and p['access_token'] not in except_token_ids: logger.info( "Removing pusher for app id %s, pushkey %s, user %s", p['app_id'], p['pushkey'], p['user_name'] diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 688b051580..dd4ea45588 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -79,7 +79,7 @@ class PasswordRestServlet(RestServlet): new_password = params['new_password'] yield self.auth_handler.set_password( - user_id, new_password + user_id, new_password, requester ) defer.returnValue((200, {})) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index aa49f53458..5eef7ebcc7 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -208,14 +208,26 @@ class RegistrationStore(SQLBaseStore): ) @defer.inlineCallbacks - def flush_user(self, user_id): - rows = yield self._execute( - 'flush_user', None, - "SELECT token FROM access_tokens WHERE user_id = ?", - user_id - ) - for r in rows: - self.get_user_by_access_token.invalidate((r,)) + def user_delete_access_tokens_except(self, user_id, except_token_ids): + def f(txn): + txn.execute( + "SELECT id, token FROM access_tokens WHERE user_id = ? LIMIT 50", + (user_id,) + ) + rows = txn.fetchall() + for r in rows: + if r[0] in except_token_ids: + continue + + txn.call_after(self.get_user_by_access_token.invalidate, (r[1],)) + txn.execute( + "DELETE FROM access_tokens WHERE id in (%s)" % ",".join( + ["?" for _ in rows] + ), [r[0] for r in rows] + ) + return len(rows) == 50 + while (yield self.runInteraction("user_delete_access_tokens_except", f)): + pass @cached() def get_user_by_access_token(self, token): From 57c444b3ad9e69ae99fe694c0ef9a1961ec9366a Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 11 Mar 2016 14:25:05 +0000 Subject: [PATCH 308/349] Dear PyCharm, please indent sensibly for me. Thx. --- synapse/handlers/auth.py | 4 ++-- synapse/storage/registration.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index a740cc3da3..0f02493fa2 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -439,10 +439,10 @@ class AuthHandler(BaseHandler): yield self.store.user_set_password_hash(user_id, password_hash) yield self.store.user_delete_access_tokens_except( - user_id, except_access_token_ids + user_id, except_access_token_ids ) yield self.hs.get_pusherpool().remove_pushers_by_user_except_access_tokens( - user_id, except_access_token_ids + user_id, except_access_token_ids ) @defer.inlineCallbacks diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 5eef7ebcc7..d3d84c9b94 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -212,7 +212,7 @@ class RegistrationStore(SQLBaseStore): def f(txn): txn.execute( "SELECT id, token FROM access_tokens WHERE user_id = ? LIMIT 50", - (user_id,) + (user_id,) ) rows = txn.fetchall() for r in rows: From f523177850df7fbe480086b281f09815f3d5c656 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 11 Mar 2016 14:29:01 +0000 Subject: [PATCH 309/349] Delete old, unused methods and rename new one to just be `user_delete_access_tokens` with an `except_token_ids` argument doing what it says on the tin. --- synapse/handlers/auth.py | 2 +- synapse/storage/registration.py | 17 ++--------------- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 0f02493fa2..43f2cdc2c4 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -438,7 +438,7 @@ class AuthHandler(BaseHandler): except_access_token_ids = [requester.access_token_id] if requester else [] yield self.store.user_set_password_hash(user_id, password_hash) - yield self.store.user_delete_access_tokens_except( + yield self.store.user_delete_access_tokens( user_id, except_access_token_ids ) yield self.hs.get_pusherpool().remove_pushers_by_user_except_access_tokens( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index d3d84c9b94..266e29f5bc 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -195,20 +195,7 @@ class RegistrationStore(SQLBaseStore): }) @defer.inlineCallbacks - def user_delete_access_tokens(self, user_id): - yield self.runInteraction( - "user_delete_access_tokens", - self._user_delete_access_tokens, user_id - ) - - def _user_delete_access_tokens(self, txn, user_id): - txn.execute( - "DELETE FROM access_tokens WHERE user_id = ?", - (user_id, ) - ) - - @defer.inlineCallbacks - def user_delete_access_tokens_except(self, user_id, except_token_ids): + def user_delete_access_tokens(self, user_id, except_token_ids): def f(txn): txn.execute( "SELECT id, token FROM access_tokens WHERE user_id = ? LIMIT 50", @@ -226,7 +213,7 @@ class RegistrationStore(SQLBaseStore): ), [r[0] for r in rows] ) return len(rows) == 50 - while (yield self.runInteraction("user_delete_access_tokens_except", f)): + while (yield self.runInteraction("user_delete_access_tokens", f)): pass @cached() From af59826a2fda31a6382f60659796a1c8db6f21ce Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 11 Mar 2016 14:34:09 +0000 Subject: [PATCH 310/349] Make select more sensible when dseleting access tokens, rename pusher deletion to match access token deletion and make exception arg optional. --- synapse/handlers/auth.py | 2 +- synapse/push/pusherpool.py | 2 +- synapse/storage/registration.py | 8 +++----- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 43f2cdc2c4..5c0ea636bc 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -441,7 +441,7 @@ class AuthHandler(BaseHandler): yield self.store.user_delete_access_tokens( user_id, except_access_token_ids ) - yield self.hs.get_pusherpool().remove_pushers_by_user_except_access_tokens( + yield self.hs.get_pusherpool().remove_pushers_by_user( user_id, except_access_token_ids ) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 28ec94d866..0b463c6fdb 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -92,7 +92,7 @@ class PusherPool: yield self.remove_pusher(p['app_id'], p['pushkey'], p['user_name']) @defer.inlineCallbacks - def remove_pushers_by_user_except_access_tokens(self, user_id, except_token_ids): + def remove_pushers_by_user(self, user_id, except_token_ids=[]): all = yield self.store.get_all_pushers() logger.info( "Removing all pushers for user %s except access tokens ids %r", diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 266e29f5bc..3a050621d9 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -198,14 +198,12 @@ class RegistrationStore(SQLBaseStore): def user_delete_access_tokens(self, user_id, except_token_ids): def f(txn): txn.execute( - "SELECT id, token FROM access_tokens WHERE user_id = ? LIMIT 50", - (user_id,) + "SELECT id, token FROM access_tokens " + "WHERE user_id = ? AND id not in LIMIT 50", + (user_id,except_token_ids) ) rows = txn.fetchall() for r in rows: - if r[0] in except_token_ids: - continue - txn.call_after(self.get_user_by_access_token.invalidate, (r[1],)) txn.execute( "DELETE FROM access_tokens WHERE id in (%s)" % ",".join( From 2dee03aee513eddaf50a4249747beac67445b3cd Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 11 Mar 2016 14:38:23 +0000 Subject: [PATCH 311/349] more pep8 --- synapse/storage/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 3a050621d9..5d45f0c651 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -200,7 +200,7 @@ class RegistrationStore(SQLBaseStore): txn.execute( "SELECT id, token FROM access_tokens " "WHERE user_id = ? AND id not in LIMIT 50", - (user_id,except_token_ids) + (user_id, except_token_ids) ) rows = txn.fetchall() for r in rows: From c08122843982307dad8d099784bc1a4bc0202eae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Mar 2016 15:09:17 +0000 Subject: [PATCH 312/349] Fix SQL statement --- synapse/storage/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 5d45f0c651..5e7a4e371d 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -199,7 +199,7 @@ class RegistrationStore(SQLBaseStore): def f(txn): txn.execute( "SELECT id, token FROM access_tokens " - "WHERE user_id = ? AND id not in LIMIT 50", + "WHERE user_id = ? AND id NOT IN ? LIMIT 50", (user_id, except_token_ids) ) rows = txn.fetchall() From b13035cc91410634421820e5175d0596f5a67549 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Mar 2016 16:27:50 +0000 Subject: [PATCH 313/349] Implement logout --- synapse/rest/__init__.py | 2 + synapse/rest/client/v1/logout.py | 72 ++++++++++++++++++++++++++++++++ synapse/storage/registration.py | 49 +++++++++++++++------- 3 files changed, 109 insertions(+), 14 deletions(-) create mode 100644 synapse/rest/client/v1/logout.py diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 433237c204..6688fa8fa0 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -30,6 +30,7 @@ from synapse.rest.client.v1 import ( push_rule, register as v1_register, login as v1_login, + logout, ) from synapse.rest.client.v2_alpha import ( @@ -72,6 +73,7 @@ class ClientRestResource(JsonResource): admin.register_servlets(hs, client_resource) pusher.register_servlets(hs, client_resource) push_rule.register_servlets(hs, client_resource) + logout.register_servlets(hs, client_resource) # "v2" sync.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py new file mode 100644 index 0000000000..9bff02ee4e --- /dev/null +++ b/synapse/rest/client/v1/logout.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.api.errors import AuthError, Codes + +from .base import ClientV1RestServlet, client_path_patterns + +import logging + + +logger = logging.getLogger(__name__) + + +class LogoutRestServlet(ClientV1RestServlet): + PATTERNS = client_path_patterns("/logout$") + + def __init__(self, hs): + super(LogoutRestServlet, self).__init__(hs) + self.store = hs.get_datastore() + + def on_OPTIONS(self, request): + return (200, {}) + + @defer.inlineCallbacks + def on_POST(self, request): + try: + access_token = request.args["access_token"][0] + except KeyError: + raise AuthError( + self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.", + errcode=Codes.MISSING_TOKEN + ) + yield self.store.delete_access_token(access_token) + defer.returnValue((200, {})) + + +class LogoutAllRestServlet(ClientV1RestServlet): + PATTERNS = client_path_patterns("/logout/all$") + + def __init__(self, hs): + super(LogoutAllRestServlet, self).__init__(hs) + self.store = hs.get_datastore() + self.auth = hs.get_auth() + + def on_OPTIONS(self, request): + return (200, {}) + + @defer.inlineCallbacks + def on_POST(self, request): + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + yield self.store.user_delete_access_tokens(user_id) + defer.returnValue((200, {})) + + +def register_servlets(hs, http_server): + LogoutRestServlet(hs).register(http_server) + LogoutAllRestServlet(hs).register(http_server) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 5e7a4e371d..18898c44eb 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -195,24 +195,45 @@ class RegistrationStore(SQLBaseStore): }) @defer.inlineCallbacks - def user_delete_access_tokens(self, user_id, except_token_ids): + def user_delete_access_tokens(self, user_id, except_token_ids=[]): def f(txn): txn.execute( - "SELECT id, token FROM access_tokens " - "WHERE user_id = ? AND id NOT IN ? LIMIT 50", - (user_id, except_token_ids) + "SELECT token FROM access_tokens" + " WHERE user_id = ? AND id NOT IN (%s)" % ( + ",".join(["?" for _ in except_token_ids]), + ), + [user_id] + except_token_ids ) - rows = txn.fetchall() - for r in rows: - txn.call_after(self.get_user_by_access_token.invalidate, (r[1],)) - txn.execute( - "DELETE FROM access_tokens WHERE id in (%s)" % ",".join( - ["?" for _ in rows] - ), [r[0] for r in rows] + + while True: + rows = txn.fetchmany(100) + if not rows: + break + + for row in rows: + txn.call_after(self.get_user_by_access_token.invalidate, (row[0],)) + + txn.execute( + "DELETE FROM access_tokens WHERE token in (%s)" % ( + ",".join(["?" for _ in rows]), + ), [r[0] for r in rows] + ) + + yield self.runInteraction("user_delete_access_tokens", f) + + def delete_access_token(self, access_token): + def f(txn): + self._simple_delete_one_txn( + txn, + table="access_tokens", + keyvalues={ + "token": access_token + }, ) - return len(rows) == 50 - while (yield self.runInteraction("user_delete_access_tokens", f)): - pass + + txn.call_after(self.get_user_by_access_token.invalidate, (access_token,)) + + return self.runInteraction("delete_access_token", f) @cached() def get_user_by_access_token(self, token): From e9c1cabac26cf8e28152ebdb3caf29d4457eea0e Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 11 Mar 2016 16:41:03 +0000 Subject: [PATCH 315/349] Use parse_json_object_from_request to parse JSON out of request bodies --- synapse/federation/transport/server.py | 4 ++-- synapse/http/servlet.py | 17 ++++++++++----- synapse/rest/client/v1/presence.py | 13 +++++------- synapse/rest/client/v1/profile.py | 8 +++---- synapse/rest/client/v1/room.py | 14 ++++--------- synapse/rest/client/v2_alpha/account_data.py | 21 ++++--------------- synapse/rest/client/v2_alpha/filter.py | 10 ++------- synapse/rest/client/v2_alpha/keys.py | 22 +++++++------------- synapse/rest/client/v2_alpha/tags.py | 12 +++-------- synapse/rest/key/v2/remote_key_resource.py | 12 ++--------- tests/rest/client/v1/test_profile.py | 6 ++++-- 11 files changed, 49 insertions(+), 90 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 6e92e2f8f4..208bff8d4f 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.api.urls import FEDERATION_PREFIX as PREFIX from synapse.api.errors import Codes, SynapseError from synapse.http.server import JsonResource +from synapse.http.servlet import parse_json_object_from_request from synapse.util.ratelimitutils import FederationRateLimiter import functools @@ -419,8 +420,7 @@ class On3pidBindServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, request): - content_bytes = request.content.read() - content = json.loads(content_bytes) + content = parse_json_object_from_request(request) if "invites" in content: last_exception = None for invite in content["invites"]: diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 41c519c000..1996f8b136 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -128,14 +128,21 @@ def parse_json_object_from_request(request): if it wasn't a JSON object. """ try: - content = simplejson.loads(request.content.read()) - if type(content) != dict: - message = "Content must be a JSON object." - raise SynapseError(400, message, errcode=Codes.BAD_JSON) - return content + content_bytes = request.content.read() + except: + raise SynapseError(400, "Error reading JSON content.") + + try: + content = simplejson.loads(content_bytes) except simplejson.JSONDecodeError: raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) + if type(content) != dict: + message = "Content must be a JSON object." + raise SynapseError(400, message, errcode=Codes.BAD_JSON) + + return content + class RestServlet(object): diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index bbfa1d6ac4..27d9ed586b 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -19,9 +19,9 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, AuthError from synapse.types import UserID +from synapse.http.servlet import parse_json_object_from_request from .base import ClientV1RestServlet, client_path_patterns -import simplejson as json import logging logger = logging.getLogger(__name__) @@ -56,9 +56,10 @@ class PresenceStatusRestServlet(ClientV1RestServlet): raise AuthError(403, "Can only set your own presence state") state = {} - try: - content = json.loads(request.content.read()) + content = parse_json_object_from_request(request) + + try: state["presence"] = content.pop("presence") if "status_msg" in content: @@ -113,11 +114,7 @@ class PresenceListRestServlet(ClientV1RestServlet): raise SynapseError( 400, "Cannot modify another user's presence list") - try: - content = json.loads(request.content.read()) - except: - logger.exception("JSON parse error") - raise SynapseError(400, "Unable to parse content") + content = parse_json_object_from_request(request) if "invite" in content: for u in content["invite"]: diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 953764bd8e..65c4e2ebef 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -18,8 +18,7 @@ from twisted.internet import defer from .base import ClientV1RestServlet, client_path_patterns from synapse.types import UserID - -import simplejson as json +from synapse.http.servlet import parse_json_object_from_request class ProfileDisplaynameRestServlet(ClientV1RestServlet): @@ -44,8 +43,9 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=True) user = UserID.from_string(user_id) + content = parse_json_object_from_request(request) + try: - content = json.loads(request.content.read()) new_name = content["displayname"] except: defer.returnValue((400, "Unable to parse name")) @@ -81,8 +81,8 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): requester = yield self.auth.get_user_by_req(request) user = UserID.from_string(user_id) + content = parse_json_object_from_request(request) try: - content = json.loads(request.content.read()) new_name = content["avatar_url"] except: defer.returnValue((400, "Unable to parse name")) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 7a9f3d11b9..a1fa7daf79 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -24,7 +24,6 @@ from synapse.types import UserID, RoomID, RoomAlias from synapse.events.utils import serialize_event from synapse.http.servlet import parse_json_object_from_request -import simplejson as json import logging import urllib @@ -72,15 +71,10 @@ class RoomCreateRestServlet(ClientV1RestServlet): defer.returnValue((200, info)) def get_room_config(self, request): - try: - user_supplied_config = json.loads(request.content.read()) - if "visibility" not in user_supplied_config: - # default visibility - user_supplied_config["visibility"] = "public" - return user_supplied_config - except (ValueError, TypeError): - raise SynapseError(400, "Body must be JSON.", - errcode=Codes.BAD_JSON) + user_supplied_config = parse_json_object_from_request(request) + # default visibility + user_supplied_config.setdefault("visibility", "public") + return user_supplied_config def on_OPTIONS(self, request): return (200, {}) diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index 1456881c1a..b16079cece 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -15,15 +15,13 @@ from ._base import client_v2_patterns -from synapse.http.servlet import RestServlet -from synapse.api.errors import AuthError, SynapseError +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.api.errors import AuthError from twisted.internet import defer import logging -import simplejson as json - logger = logging.getLogger(__name__) @@ -47,11 +45,7 @@ class AccountDataServlet(RestServlet): if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add account data for other users.") - try: - content_bytes = request.content.read() - body = json.loads(content_bytes) - except: - raise SynapseError(400, "Invalid JSON") + body = parse_json_object_from_request(request) max_id = yield self.store.add_account_data_for_user( user_id, account_data_type, body @@ -86,14 +80,7 @@ class RoomAccountDataServlet(RestServlet): if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add account data for other users.") - try: - content_bytes = request.content.read() - body = json.loads(content_bytes) - except: - raise SynapseError(400, "Invalid JSON") - - if not isinstance(body, dict): - raise ValueError("Expected a JSON object") + body = parse_json_object_from_request(request) max_id = yield self.store.add_account_data_to_room( user_id, room_id, account_data_type, body diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 7c94f6ec41..510f8b2c74 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -16,12 +16,11 @@ from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError -from synapse.http.servlet import RestServlet +from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import UserID from ._base import client_v2_patterns -import simplejson as json import logging @@ -84,12 +83,7 @@ class CreateFilterRestServlet(RestServlet): if not self.hs.is_mine(target_user): raise SynapseError(400, "Can only create filters for local users") - try: - content = json.loads(request.content.read()) - - # TODO(paul): check for required keys and invalid keys - except: - raise SynapseError(400, "Invalid filter definition") + content = parse_json_object_from_request(request) filter_id = yield self.filtering.add_user_filter( user_localpart=target_user.localpart, diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index f989b08614..89ab39491c 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -15,16 +15,15 @@ from twisted.internet import defer -from synapse.api.errors import SynapseError -from synapse.http.servlet import RestServlet +from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import UserID from canonicaljson import encode_canonical_json from ._base import client_v2_patterns -import simplejson as json import logging +import simplejson as json logger = logging.getLogger(__name__) @@ -68,10 +67,9 @@ class KeyUploadServlet(RestServlet): user_id = requester.user.to_string() # TODO: Check that the device_id matches that in the authentication # or derive the device_id from the authentication instead. - try: - body = json.loads(request.content.read()) - except: - raise SynapseError(400, "Invalid key JSON") + + body = parse_json_object_from_request(request) + time_now = self.clock.time_msec() # TODO: Validate the JSON to make sure it has the right keys. @@ -173,10 +171,7 @@ class KeyQueryServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, user_id, device_id): yield self.auth.get_user_by_req(request) - try: - body = json.loads(request.content.read()) - except: - raise SynapseError(400, "Invalid key JSON") + body = parse_json_object_from_request(request) result = yield self.handle_request(body) defer.returnValue(result) @@ -272,10 +267,7 @@ class OneTimeKeyServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, user_id, device_id, algorithm): yield self.auth.get_user_by_req(request) - try: - body = json.loads(request.content.read()) - except: - raise SynapseError(400, "Invalid key JSON") + body = parse_json_object_from_request(request) result = yield self.handle_request(body) defer.returnValue(result) diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index 79c436a8cf..dac8603b07 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -15,15 +15,13 @@ from ._base import client_v2_patterns -from synapse.http.servlet import RestServlet -from synapse.api.errors import AuthError, SynapseError +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.api.errors import AuthError from twisted.internet import defer import logging -import simplejson as json - logger = logging.getLogger(__name__) @@ -72,11 +70,7 @@ class TagServlet(RestServlet): if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add tags for other users.") - try: - content_bytes = request.content.read() - body = json.loads(content_bytes) - except: - raise SynapseError(400, "Invalid tag JSON") + body = parse_json_object_from_request(request) max_id = yield self.store.add_tag_to_room(user_id, room_id, tag, body) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 81ef1f4702..9552016fec 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -13,7 +13,7 @@ # limitations under the License. from synapse.http.server import request_handler, respond_with_json_bytes -from synapse.http.servlet import parse_integer +from synapse.http.servlet import parse_integer, parse_json_object_from_request from synapse.api.errors import SynapseError, Codes from twisted.web.resource import Resource @@ -22,7 +22,6 @@ from twisted.internet import defer from io import BytesIO -import json import logging logger = logging.getLogger(__name__) @@ -126,14 +125,7 @@ class RemoteKey(Resource): @request_handler @defer.inlineCallbacks def async_render_POST(self, request): - try: - content = json.loads(request.content.read()) - if type(content) != dict: - raise ValueError() - except ValueError: - raise SynapseError( - 400, "Content must be JSON object.", errcode=Codes.NOT_JSON - ) + content = parse_json_object_from_request(request) query = content["server_keys"] diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 1d210f9bf8..af02fce8fb 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -95,7 +95,8 @@ class ProfileTestCase(unittest.TestCase): mocked_set.side_effect = AuthError(400, "message") (code, response) = yield self.mock_resource.trigger( - "PUT", "/profile/%s/displayname" % ("@4567:test"), '"Frank Jr."' + "PUT", "/profile/%s/displayname" % ("@4567:test"), + '{"displayname": "Frank Jr."}' ) self.assertTrue( @@ -121,7 +122,8 @@ class ProfileTestCase(unittest.TestCase): mocked_set.side_effect = SynapseError(400, "message") (code, response) = yield self.mock_resource.trigger( - "PUT", "/profile/%s/displayname" % ("@opaque:elsewhere"), None + "PUT", "/profile/%s/displayname" % ("@opaque:elsewhere"), + '{"displayname":"bob"}' ) self.assertTrue( From 15122da0e275ba18ec4633129715067a637f38af Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Mar 2016 16:45:27 +0000 Subject: [PATCH 316/349] Thats not how transactions work. --- synapse/storage/registration.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 18898c44eb..bd4eb88a92 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -197,26 +197,29 @@ class RegistrationStore(SQLBaseStore): @defer.inlineCallbacks def user_delete_access_tokens(self, user_id, except_token_ids=[]): def f(txn): - txn.execute( - "SELECT token FROM access_tokens" - " WHERE user_id = ? AND id NOT IN (%s)" % ( + sql = "SELECT token FROM access_tokens WHERE user_id = ?" + clauses = [user_id] + + if except_token_ids: + sql += " AND id NOT IN (%s)" % ( ",".join(["?" for _ in except_token_ids]), - ), - [user_id] + except_token_ids - ) + ) + clauses += except_token_ids - while True: - rows = txn.fetchmany(100) - if not rows: - break + txn.execute(sql, clauses) - for row in rows: + rows = txn.fetchall() + + n = 100 + chunks = [rows[i:i + n] for i in xrange(0, len(rows), n)] + for chunk in chunks: + for row in chunk: txn.call_after(self.get_user_by_access_token.invalidate, (row[0],)) txn.execute( "DELETE FROM access_tokens WHERE token in (%s)" % ( - ",".join(["?" for _ in rows]), - ), [r[0] for r in rows] + ",".join(["?" for _ in chunk]), + ), [r[0] for r in chunk] ) yield self.runInteraction("user_delete_access_tokens", f) From 398cd1edfbefa207e44047bd63adcdcc6e859f2e Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 14 Mar 2016 14:16:41 +0000 Subject: [PATCH 317/349] Fix regression where synapse checked whether push rules were valid JSON before the compatibility hack that handled clients sending invalid JSON --- synapse/http/servlet.py | 21 +++++++++++++++++---- synapse/rest/client/v1/push_rule.py | 4 ++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 1996f8b136..1c8bd8666f 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -119,13 +119,13 @@ def parse_string(request, name, default=None, required=False, return default -def parse_json_object_from_request(request): - """Parse a JSON object from the body of a twisted HTTP request. +def parse_json_value_from_request(request): + """Parse a JSON value from the body of a twisted HTTP request. :param request: the twisted HTTP request. + :returns: The JSON value. :raises - SynapseError if the request body couldn't be decoded as JSON or - if it wasn't a JSON object. + SynapseError if the request body couldn't be decoded as JSON. """ try: content_bytes = request.content.read() @@ -137,6 +137,19 @@ def parse_json_object_from_request(request): except simplejson.JSONDecodeError: raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) + return content + + +def parse_json_object_from_request(request): + """Parse a JSON object from the body of a twisted HTTP request. + + :param request: the twisted HTTP request. + :raises + SynapseError if the request body couldn't be decoded as JSON or + if it wasn't a JSON object. + """ + content = parse_json_value_from_request(request) + if type(content) != dict: message = "Content must be a JSON object." raise SynapseError(400, message, errcode=Codes.BAD_JSON) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index b5695d427a..02d837ee6a 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -25,7 +25,7 @@ from synapse.storage.push_rule import ( from synapse.push.clientformat import format_push_rules_for_user from synapse.push.baserules import BASE_RULE_IDS from synapse.push.rulekinds import PRIORITY_CLASS_MAP -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import parse_json_value_from_request class PushRuleRestServlet(ClientV1RestServlet): @@ -51,7 +51,7 @@ class PushRuleRestServlet(ClientV1RestServlet): if '/' in spec['rule_id'] or '\\' in spec['rule_id']: raise SynapseError(400, "rule_id may not contain slashes") - content = parse_json_object_from_request(request) + content = parse_json_value_from_request(request) user_id = requester.user.to_string() From a547e2df8517d22010f7978da88dfe81bbd2b207 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 14 Mar 2016 15:30:19 +0000 Subject: [PATCH 318/349] Return list, not generator. --- synapse/util/caches/stream_change_cache.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index a1aec7aa55..ea8a74ca69 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -99,9 +99,7 @@ class StreamChangeCache(object): keys = self._cache.keys() i = keys.bisect_right(stream_pos) - return ( - self._cache[k] for k in keys[i:] - ) + return [self._cache[k] for k in keys[i:]] else: return None From 590fbbef03c21308c8ff038541b7ef6d33f9c5fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 14 Mar 2016 15:50:40 +0000 Subject: [PATCH 319/349] Add config to create guest account on 3pid invite Currently, when a 3pid invite request is sent to an identity server, it includes a provisioned guest access token. This allows the link in the, say, invite email to include the guest access token ensuring that the same account is used each time the link is clicked. This flow has a number of flaws, including when using different servers or servers that have guest access disabled. For now, we keep this implementation but hide it behind a config option until a better flow is implemented. --- synapse/config/registration.py | 4 +++ synapse/handlers/room.py | 57 +++++++++++++++++++--------------- 2 files changed, 36 insertions(+), 25 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index ab062d528c..87e500c97a 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -37,6 +37,10 @@ class RegistrationConfig(Config): self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"] self.allow_guest_access = config.get("allow_guest_access", False) + self.invite_3pid_guest = ( + self.allow_guest_access and config.get("invite_3pid_guest", False) + ) + def default_config(self, **kwargs): registration_shared_secret = random_string_with_symbols(50) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 57113ae4a5..051468989f 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -877,36 +877,43 @@ class RoomMemberHandler(BaseHandler): user. """ - registration_handler = self.hs.get_handlers().registration_handler - guest_access_token = yield registration_handler.guest_access_token_for( - medium=medium, - address=address, - inviter_user_id=inviter_user_id, - ) - - guest_user_info = yield self.hs.get_auth().get_user_by_access_token( - guest_access_token - ) - is_url = "%s%s/_matrix/identity/api/v1/store-invite" % ( id_server_scheme, id_server, ) + + invite_config = { + "medium": medium, + "address": address, + "room_id": room_id, + "room_alias": room_alias, + "room_avatar_url": room_avatar_url, + "room_join_rules": room_join_rules, + "room_name": room_name, + "sender": inviter_user_id, + "sender_display_name": inviter_display_name, + "sender_avatar_url": inviter_avatar_url, + } + + if self.hs.config.invite_3pid_guest: + registration_handler = self.hs.get_handlers().registration_handler + guest_access_token = yield registration_handler.guest_access_token_for( + medium=medium, + address=address, + inviter_user_id=inviter_user_id, + ) + + guest_user_info = yield self.hs.get_auth().get_user_by_access_token( + guest_access_token + ) + + invite_config.update({ + "guest_access_token": guest_access_token, + "guest_user_id": guest_user_info["user"].to_string(), + }) + data = yield self.hs.get_simple_http_client().post_urlencoded_get_json( is_url, - { - "medium": medium, - "address": address, - "room_id": room_id, - "room_alias": room_alias, - "room_avatar_url": room_avatar_url, - "room_join_rules": room_join_rules, - "room_name": room_name, - "sender": inviter_user_id, - "sender_display_name": inviter_display_name, - "sender_avatar_url": inviter_avatar_url, - "guest_user_id": guest_user_info["user"].to_string(), - "guest_access_token": guest_access_token, - } + invite_config ) # TODO: Check for success token = data["token"] From 9e982750ee5d0872c2157a444070878f2e3a6e4f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 15 Mar 2016 13:24:31 +0000 Subject: [PATCH 320/349] Persist rejection of invites over federation --- synapse/handlers/federation.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 27f2b40bfe..86ed37e9f3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -813,7 +813,23 @@ class FederationHandler(BaseHandler): target_hosts, signed_event ) - defer.returnValue(None) + + context = yield self.state_handler.compute_event_context(event) + + event_stream_id, max_stream_id = yield self.store.persist_event( + event, + context=context, + backfilled=False, + ) + + target_user = UserID.from_string(event.state_key) + with PreserveLoggingContext(): + self.notifier.on_new_room_event( + event, event_stream_id, max_stream_id, + extra_users=[target_user], + ) + + defer.returnValue(event) @defer.inlineCallbacks def _make_and_verify_event(self, target_hosts, room_id, user_id, membership, From e5f0e5893127b9474ed8ea38827a9d143cbff1e8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 15 Mar 2016 13:48:40 +0000 Subject: [PATCH 321/349] Remove needless PreserveLoggingContext --- synapse/handlers/federation.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 86ed37e9f3..f599e817aa 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -823,11 +823,10 @@ class FederationHandler(BaseHandler): ) target_user = UserID.from_string(event.state_key) - with PreserveLoggingContext(): - self.notifier.on_new_room_event( - event, event_stream_id, max_stream_id, - extra_users=[target_user], - ) + self.notifier.on_new_room_event( + event, event_stream_id, max_stream_id, + extra_users=[target_user], + ) defer.returnValue(event) From b6e8420aeed9921ba7d0fd4c8ebaf1b64d5f677c Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 15 Mar 2016 17:01:43 +0000 Subject: [PATCH 322/349] Add replication stream for pushers --- synapse/replication/resource.py | 25 +++++++- synapse/storage/__init__.py | 5 +- synapse/storage/pusher.py | 63 ++++++++++++++----- .../schema/delta/30/deleted_pushers.sql | 24 +++++++ synapse/storage/util/id_generators.py | 7 ++- tests/replication/test_resource.py | 1 + 6 files changed, 107 insertions(+), 18 deletions(-) create mode 100644 synapse/storage/schema/delta/30/deleted_pushers.sql diff --git a/synapse/replication/resource.py b/synapse/replication/resource.py index adc1eb1d0b..8c1ae0fbc7 100644 --- a/synapse/replication/resource.py +++ b/synapse/replication/resource.py @@ -37,6 +37,7 @@ STREAM_NAMES = ( ("user_account_data", "room_account_data", "tag_account_data",), ("backfill",), ("push_rules",), + ("pushers",), ) @@ -65,6 +66,7 @@ class ReplicationResource(Resource): * "tag_account_data": Per room per user tags. * "backfill": Old events that have been backfilled from other servers. * "push_rules": Per user changes to push rules. + * "pushers": Per user changes to their pushers. The API takes two additional query parameters: @@ -120,6 +122,7 @@ class ReplicationResource(Resource): stream_token = yield self.sources.get_current_token() backfill_token = yield self.store.get_current_backfill_token() push_rules_token, room_stream_token = self.store.get_push_rules_stream_token() + pushers_token = self.store.get_pushers_stream_token() defer.returnValue(_ReplicationToken( room_stream_token, @@ -129,6 +132,7 @@ class ReplicationResource(Resource): int(stream_token.account_data_key), backfill_token, push_rules_token, + pushers_token, )) @request_handler @@ -151,6 +155,7 @@ class ReplicationResource(Resource): yield self.typing(writer, current_token) # TODO: implement limit yield self.receipts(writer, current_token, limit) yield self.push_rules(writer, current_token, limit) + yield self.pushers(writer, current_token, limit) self.streams(writer, current_token) logger.info("Replicated %d rows", writer.total) @@ -297,6 +302,24 @@ class ReplicationResource(Resource): "priority_class", "priority", "conditions", "actions" )) + @defer.inlineCallbacks + def pushers(self, writer, current_token, limit): + current_position = current_token.pushers + + pushers = parse_integer(writer.request, "pushers") + if pushers is not None: + updated, deleted = yield self.store.get_all_updated_pushers( + pushers, current_position, limit + ) + writer.write_header_and_rows("pushers", updated, ( + "position", "user_id", "access_token", "profile_tag", "kind", + "app_id", "app_display_name", "device_display_name", "pushkey", + "ts", "lang", "data" + )) + writer.write_header_and_rows("deleted", deleted, ( + "position", "user_id", "app_id", "pushkey" + )) + class _Writer(object): """Writes the streams as a JSON object as the response to the request""" @@ -327,7 +350,7 @@ class _Writer(object): class _ReplicationToken(collections.namedtuple("_ReplicationToken", ( "events", "presence", "typing", "receipts", "account_data", "backfill", - "push_rules" + "push_rules", "pushers" ))): __slots__ = [] diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 168eb27b03..250ba536ea 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -119,12 +119,15 @@ class DataStore(RoomMemberStore, RoomStore, self._state_groups_id_gen = IdGenerator(db_conn, "state_groups", "id") self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id") self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id") - self._pushers_id_gen = IdGenerator(db_conn, "pushers", "id") self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id") self._push_rules_enable_id_gen = IdGenerator(db_conn, "push_rules_enable", "id") self._push_rules_stream_id_gen = ChainedIdGenerator( self._stream_id_gen, db_conn, "push_rules_stream", "stream_id" ) + self._pushers_id_gen = StreamIdGenerator( + db_conn, "pushers", "id", + extra_tables=[("deleted_pushers", "stream_id")], + ) events_max = self._stream_id_gen.get_max_token() event_cache_prefill, min_event_val = self._get_cache_dict( diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 7693ab9082..29da3bbd13 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -16,8 +16,6 @@ from ._base import SQLBaseStore from twisted.internet import defer -from synapse.api.errors import StoreError - from canonicaljson import encode_canonical_json import logging @@ -79,12 +77,41 @@ class PusherStore(SQLBaseStore): rows = yield self.runInteraction("get_all_pushers", get_pushers) defer.returnValue(rows) + def get_pushers_stream_token(self): + return self._pushers_id_gen.get_max_token() + + def get_all_updated_pushers(self, last_id, current_id, limit): + def get_all_updated_pushers_txn(txn): + sql = ( + "SELECT id, user_name, access_token, profile_tag, kind," + " app_id, app_display_name, device_display_name, pushkey, ts," + " lang, data" + " FROM pushers" + " WHERE ? < id AND id <= ?" + " ORDER BY id ASC LIMIT ?" + ) + txn.execute(sql, (last_id, current_id, limit)) + updated = txn.fetchall() + + sql = ( + "SELECT stream_id, user_id, app_id, pushkey" + " FROM deleted_pushers" + " WHERE ? < stream_id AND stream_id <= ?" + " ORDER BY stream_id ASC LIMIT ?" + ) + txn.execute(sql, (last_id, current_id, limit)) + deleted = txn.fetchall() + + return (updated, deleted) + return self.runInteraction( + "get_all_updated_pushers", get_all_updated_pushers_txn + ) + @defer.inlineCallbacks def add_pusher(self, user_id, access_token, kind, app_id, app_display_name, device_display_name, pushkey, pushkey_ts, lang, data, profile_tag=""): - try: - next_id = self._pushers_id_gen.get_next() + with self._pushers_id_gen.get_next() as stream_id: yield self._simple_upsert( "pushers", dict( @@ -101,23 +128,29 @@ class PusherStore(SQLBaseStore): lang=lang, data=encode_canonical_json(data), profile_tag=profile_tag, - ), - insertion_values=dict( - id=next_id, + id=stream_id, ), desc="add_pusher", ) - except Exception as e: - logger.error("create_pusher with failed: %s", e) - raise StoreError(500, "Problem creating pusher.") @defer.inlineCallbacks def delete_pusher_by_app_id_pushkey_user_id(self, app_id, pushkey, user_id): - yield self._simple_delete_one( - "pushers", - {"app_id": app_id, "pushkey": pushkey, 'user_name': user_id}, - desc="delete_pusher_by_app_id_pushkey_user_id", - ) + def delete_pusher_txn(txn, stream_id): + self._simple_delete_one( + txn, + "pushers", + {"app_id": app_id, "pushkey": pushkey, "user_name": user_id} + ) + self._simple_upsert_txn( + txn, + "deleted_pushers", + {"app_id": app_id, "pushkey": pushkey, "user_id": user_id}, + {"stream_id", stream_id}, + ) + with self._pushers_id_gen.get_next() as stream_id: + yield self.runInteraction( + "delete_pusher", delete_pusher_txn, stream_id + ) @defer.inlineCallbacks def update_pusher_last_token(self, app_id, pushkey, user_id, last_token): diff --git a/synapse/storage/schema/delta/30/deleted_pushers.sql b/synapse/storage/schema/delta/30/deleted_pushers.sql new file mode 100644 index 0000000000..cdcf79ac81 --- /dev/null +++ b/synapse/storage/schema/delta/30/deleted_pushers.sql @@ -0,0 +1,24 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS deleted_pushers( + stream_id BIGINT NOT NULL, + app_id TEXT NOT NULL, + pushkey TEXT NOT NULL, + user_id TEXT NOT NULL, + UNIQUE (app_id, pushkey, user_id) +); + +CREATE INDEX deleted_pushers_stream_id ON deleted_pushers (stream_id); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 610ddad423..a02dfc7d58 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -49,9 +49,14 @@ class StreamIdGenerator(object): with stream_id_gen.get_next() as stream_id: # ... persist event ... """ - def __init__(self, db_conn, table, column): + def __init__(self, db_conn, table, column, extra_tables=[]): self._lock = threading.Lock() self._current_max = _load_max_id(db_conn, table, column) + for table, column in extra_tables: + self._current_max = max( + self._current_max, + _load_max_id(db_conn, table, column) + ) self._unfinished_ids = deque() def get_next(self): diff --git a/tests/replication/test_resource.py b/tests/replication/test_resource.py index 4a42eb3365..f4b5fb3328 100644 --- a/tests/replication/test_resource.py +++ b/tests/replication/test_resource.py @@ -131,6 +131,7 @@ class ReplicationResourceCase(unittest.TestCase): test_timeout_tag_account_data = _test_timeout("tag_account_data") test_timeout_backfill = _test_timeout("backfill") test_timeout_push_rules = _test_timeout("push_rules") + test_timeout_pushers = _test_timeout("pushers") @defer.inlineCallbacks def send_text_message(self, room_id, message): From 12904932c41c73714543b817157f09073fcc2625 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 15 Mar 2016 17:41:06 +0000 Subject: [PATCH 323/349] Hook up adding a pusher to the notifier for replication. --- synapse/notifier.py | 6 ++++++ synapse/rest/client/v1/pusher.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/synapse/notifier.py b/synapse/notifier.py index 9b69b0333a..f00cd8c588 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -282,6 +282,12 @@ class Notifier(object): self.notify_replication() + def on_new_replication_data(self): + """Used to inform replication listeners that something has happend + without waking up any of the normal user event streams""" + with PreserveLoggingContext(): + self.notify_replication() + @defer.inlineCallbacks def wait_for_events(self, user_id, timeout, callback, room_ids=None, from_token=StreamToken.START): diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index ee029b4f77..9881f068c3 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -29,6 +29,10 @@ logger = logging.getLogger(__name__) class PusherRestServlet(ClientV1RestServlet): PATTERNS = client_path_patterns("/pushers/set$") + def __init__(self, hs): + super(PusherRestServlet, self).__init__(hs) + self.notifier = hs.get_notifier() + @defer.inlineCallbacks def on_POST(self, request): requester = yield self.auth.get_user_by_req(request) @@ -87,6 +91,8 @@ class PusherRestServlet(ClientV1RestServlet): raise SynapseError(400, "Config Error: " + pce.message, errcode=Codes.MISSING_PARAM) + self.notifier.on_new_replication_data() + defer.returnValue((200, {})) def on_OPTIONS(self, _): From ee32d622cec56f2ab7b11577d15e4b805477d13f Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 15 Mar 2016 17:47:36 +0000 Subject: [PATCH 324/349] Fix a couple of errors when deleting pushers --- synapse/storage/pusher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 29da3bbd13..87b2ac5773 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -136,7 +136,7 @@ class PusherStore(SQLBaseStore): @defer.inlineCallbacks def delete_pusher_by_app_id_pushkey_user_id(self, app_id, pushkey, user_id): def delete_pusher_txn(txn, stream_id): - self._simple_delete_one( + self._simple_delete_one_txn( txn, "pushers", {"app_id": app_id, "pushkey": pushkey, "user_name": user_id} @@ -145,7 +145,7 @@ class PusherStore(SQLBaseStore): txn, "deleted_pushers", {"app_id": app_id, "pushkey": pushkey, "user_id": user_id}, - {"stream_id", stream_id}, + {"stream_id": stream_id}, ) with self._pushers_id_gen.get_next() as stream_id: yield self.runInteraction( From a877209c8b0c7c476ee6676c6d00c4cacdc83207 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 16 Mar 2016 09:45:37 +0000 Subject: [PATCH 325/349] Password reset docs and script Replace the bash/perl gen_password script with a python one, and write a note on how to use it. --- README.rst | 20 ++++++++++++++++++++ scripts/gen_password | 1 - scripts/hash_password | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 1 deletion(-) delete mode 100644 scripts/gen_password create mode 100755 scripts/hash_password diff --git a/README.rst b/README.rst index 8a745259bf..a48a0802b2 100644 --- a/README.rst +++ b/README.rst @@ -525,6 +525,26 @@ Logging In To An Existing Account Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into the form and click the Login button. +Password reset +============== + +Synapse does not yet support a password-reset function (see +https://matrix.org/jira/browse/SYN-11). In the meantime, it is possible to +manually reset a user's password via direct database access. + +First calculate the hash of the new password: + + $ source ~/.synapse/bin/activate + $ ./scripts/hash_password + Password: + Confirm password: + $2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + +Then update the `users` table in the database: + + UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' + WHERE name='@test:test.com'; + Identity Servers ================ diff --git a/scripts/gen_password b/scripts/gen_password deleted file mode 100644 index 7afd3a5dfd..0000000000 --- a/scripts/gen_password +++ /dev/null @@ -1 +0,0 @@ -perl -MCrypt::Random -MCrypt::Eksblowfish::Bcrypt -e 'print Crypt::Eksblowfish::Bcrypt::bcrypt("secret", "\$2\$12\$" . Crypt::Eksblowfish::Bcrypt::en_base64(Crypt::Random::makerandom_octet(Length=>16)))."\n"' diff --git a/scripts/hash_password b/scripts/hash_password new file mode 100755 index 0000000000..e784600989 --- /dev/null +++ b/scripts/hash_password @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +import argparse +import bcrypt +import getpass + +bcrypt_rounds=12 + +def prompt_for_pass(): + password = getpass.getpass("Password: ") + + if not password: + raise Exception("Password cannot be blank.") + + confirm_password = getpass.getpass("Confirm password: ") + + if password != confirm_password: + raise Exception("Passwords do not match.") + + return password + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Calculate the hash of a new password, so that passwords" + " can be reset") + parser.add_argument( + "-p", "--password", + default=None, + help="New password for user. Will prompt if omitted.", + ) + + args = parser.parse_args() + password = args.password + + if not password: + password = prompt_for_pass() + + print bcrypt.hashpw(password, bcrypt.gensalt(bcrypt_rounds)) + From ba660ecde20544ac1cfc163a5586f4f202627afa Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 16 Mar 2016 10:35:00 +0000 Subject: [PATCH 326/349] Add a comment to offer a hint to an explanation for why we have a unique constraint on (app_id, pushkey, user_id) --- synapse/storage/schema/delta/30/deleted_pushers.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/schema/delta/30/deleted_pushers.sql b/synapse/storage/schema/delta/30/deleted_pushers.sql index cdcf79ac81..712c454aa1 100644 --- a/synapse/storage/schema/delta/30/deleted_pushers.sql +++ b/synapse/storage/schema/delta/30/deleted_pushers.sql @@ -18,6 +18,7 @@ CREATE TABLE IF NOT EXISTS deleted_pushers( app_id TEXT NOT NULL, pushkey TEXT NOT NULL, user_id TEXT NOT NULL, + /* We only track the most recent delete for each app_id, pushkey and user_id. */ UNIQUE (app_id, pushkey, user_id) ); From 660ae8e0f3c7f667b3a24b02f095d60c2b09531f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 16 Mar 2016 10:40:38 +0000 Subject: [PATCH 327/349] Clarify that we do have reset functionality via the IS --- README.rst | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/README.rst b/README.rst index a48a0802b2..285fc5aa8a 100644 --- a/README.rst +++ b/README.rst @@ -525,27 +525,6 @@ Logging In To An Existing Account Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into the form and click the Login button. -Password reset -============== - -Synapse does not yet support a password-reset function (see -https://matrix.org/jira/browse/SYN-11). In the meantime, it is possible to -manually reset a user's password via direct database access. - -First calculate the hash of the new password: - - $ source ~/.synapse/bin/activate - $ ./scripts/hash_password - Password: - Confirm password: - $2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - -Then update the `users` table in the database: - - UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' - WHERE name='@test:test.com'; - - Identity Servers ================ @@ -565,6 +544,26 @@ as the primary means of identity and E2E encryption is not complete. As such, we are running a single identity server (https://matrix.org) at the current time. +Password reset +============== + +If a user has registered an email address to their account using an identity +server, they can request a password-reset token via clients such as Vector. + +A manual password reset can be done via direct database access as follows. + +First calculate the hash of the new password: + + $ source ~/.synapse/bin/activate + $ ./scripts/hash_password + Password: + Confirm password: + $2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + +Then update the `users` table in the database: + + UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' + WHERE name='@test:test.com'; Where's the spec?! ================== From c12b9d719a3cf1eeb9c4c8d354dbaecab5e76233 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 11:56:24 +0000 Subject: [PATCH 328/349] Make registration idempotent: if you specify the same session, make it give you an access token for the user that was registered on previous uses of that session. Tweak the UI auth layer to not delete sessions when their auth has completed and hence expire themn so they don't hang around until server restart. Allow server-side data to be associated with UI auth sessions. --- synapse/handlers/auth.py | 58 +++++++++++++++++++----- synapse/rest/client/v2_alpha/register.py | 27 ++++++++++- 2 files changed, 73 insertions(+), 12 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 5c0ea636bc..5dc9d91757 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -27,6 +27,7 @@ import logging import bcrypt import pymacaroons import simplejson +import time import synapse.util.stringutils as stringutils @@ -35,6 +36,7 @@ logger = logging.getLogger(__name__) class AuthHandler(BaseHandler): + SESSION_EXPIRE_SECS = 48 * 60 * 60 def __init__(self, hs): super(AuthHandler, self).__init__(hs) @@ -66,15 +68,18 @@ class AuthHandler(BaseHandler): 'auth' key: this method prompts for auth if none is sent. clientip (str): The IP address of the client. Returns: - A tuple of (authed, dict, dict) where authed is true if the client - has successfully completed an auth flow. If it is true, the first - dict contains the authenticated credentials of each stage. + A tuple of (authed, dict, dict, session_id) where authed is true if + the client has successfully completed an auth flow. If it is true + the first dict contains the authenticated credentials of each stage. If authed is false, the first dictionary is the server response to the login request and should be passed back to the client. In either case, the second dict contains the parameters for this request (which may have been given only in a previous call). + + session_id is the ID of this session, either passed in by the client + or assigned by the call to check_auth """ authdict = None @@ -103,7 +108,10 @@ class AuthHandler(BaseHandler): if not authdict: defer.returnValue( - (False, self._auth_dict_for_flows(flows, session), clientdict) + ( + False, self._auth_dict_for_flows(flows, session), + clientdict, session['id'] + ) ) if 'creds' not in session: @@ -122,12 +130,11 @@ class AuthHandler(BaseHandler): for f in flows: if len(set(f) - set(creds.keys())) == 0: logger.info("Auth completed with creds: %r", creds) - self._remove_session(session) - defer.returnValue((True, creds, clientdict)) + defer.returnValue((True, creds, clientdict, session['id'])) ret = self._auth_dict_for_flows(flows, session) ret['completed'] = creds.keys() - defer.returnValue((False, ret, clientdict)) + defer.returnValue((False, ret, clientdict, session['id'])) @defer.inlineCallbacks def add_oob_auth(self, stagetype, authdict, clientip): @@ -154,6 +161,29 @@ class AuthHandler(BaseHandler): defer.returnValue(True) defer.returnValue(False) + def set_session_data(self, session_id, key, value): + """ + Store a key-value pair into the sessions data associated with this + request. This data is stored server-side and cannot be modified by + the client. + :param session_id: (string) The ID of this session as returned from check_auth + :param key: (string) The key to store the data under + :param value: (any) The data to store + """ + sess = self._get_session_info(session_id) + sess.setdefault('serverdict', {})[key] = value + self._save_session(sess) + + def get_session_data(self, session_id, key, default=None): + """ + Retrieve data stored with set_session_data + :param session_id: (string) The ID of this session as returned from check_auth + :param key: (string) The key to store the data under + :param default: (any) Value to return if the key has not been set + """ + sess = self._get_session_info(session_id) + return sess.setdefault('serverdict', {}).get(key, default) + @defer.inlineCallbacks def _check_password_auth(self, authdict, _): if "user" not in authdict or "password" not in authdict: @@ -263,7 +293,7 @@ class AuthHandler(BaseHandler): if not session_id: # create a new session while session_id is None or session_id in self.sessions: - session_id = stringutils.random_string(24) + session_id = stringutils.random_string_with_symbols(24) self.sessions[session_id] = { "id": session_id, } @@ -455,11 +485,17 @@ class AuthHandler(BaseHandler): def _save_session(self, session): # TODO: Persistent storage logger.debug("Saving session %s", session) + session["last_used"] = time.time() self.sessions[session["id"]] = session + self._prune_sessions() - def _remove_session(self, session): - logger.debug("Removing session %s", session) - del self.sessions[session["id"]] + def _prune_sessions(self): + for sid,sess in self.sessions.items(): + last_used = 0 + if 'last_used' in sess: + last_used = sess['last_used'] + if last_used < time.time() - AuthHandler.SESSION_EXPIRE_SECS: + del self.sessions[sid] def hash(self, password): """Computes a secure hash of password. diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 533ff136eb..649491bdf6 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -139,7 +139,7 @@ class RegisterRestServlet(RestServlet): [LoginType.EMAIL_IDENTITY] ] - authed, result, params = yield self.auth_handler.check_auth( + authed, result, params, session_id = yield self.auth_handler.check_auth( flows, body, self.hs.get_ip_from_request(request) ) @@ -147,6 +147,24 @@ class RegisterRestServlet(RestServlet): defer.returnValue((401, result)) return + # have we already registered a user for this session + registered_user_id = self.auth_handler.get_session_data( + session_id, "registered_user_id", None + ) + if registered_user_id is not None: + logger.info( + "Already registered user ID %r for this session", + registered_user_id + ) + access_token = yield self.auth_handler.issue_access_token(registered_user_id) + refresh_token = yield self.auth_handler.issue_refresh_token(registered_user_id) + defer.returnValue((200, { + "user_id": registered_user_id, + "access_token": access_token, + "home_server": self.hs.hostname, + "refresh_token": refresh_token, + })) + # NB: This may be from the auth handler and NOT from the POST if 'password' not in params: raise SynapseError(400, "Missing password.", Codes.MISSING_PARAM) @@ -161,6 +179,13 @@ class RegisterRestServlet(RestServlet): guest_access_token=guest_access_token, ) + # remember that we've now registered that user account, and with what + # user ID (since the user may not have specified) + logger.info("%r", body) + self.auth_handler.set_session_data( + session_id, "registered_user_id", user_id + ) + if result and LoginType.EMAIL_IDENTITY in result: threepid = result[LoginType.EMAIL_IDENTITY] From 99797947aa5a7cdf8fe12043b4f25a155bcf4555 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 12:51:34 +0000 Subject: [PATCH 329/349] pep8 & remove debug logging --- synapse/handlers/auth.py | 2 +- synapse/rest/client/v2_alpha/register.py | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 5dc9d91757..a9f5e3710b 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -490,7 +490,7 @@ class AuthHandler(BaseHandler): self._prune_sessions() def _prune_sessions(self): - for sid,sess in self.sessions.items(): + for sid, sess in self.sessions.items(): last_used = 0 if 'last_used' in sess: last_used = sess['last_used'] diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 649491bdf6..c440430e25 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -149,7 +149,7 @@ class RegisterRestServlet(RestServlet): # have we already registered a user for this session registered_user_id = self.auth_handler.get_session_data( - session_id, "registered_user_id", None + session_id, "registered_user_id", None ) if registered_user_id is not None: logger.info( @@ -157,7 +157,9 @@ class RegisterRestServlet(RestServlet): registered_user_id ) access_token = yield self.auth_handler.issue_access_token(registered_user_id) - refresh_token = yield self.auth_handler.issue_refresh_token(registered_user_id) + refresh_token = yield self.auth_handler.issue_refresh_token( + registered_user_id + ) defer.returnValue((200, { "user_id": registered_user_id, "access_token": access_token, @@ -181,9 +183,8 @@ class RegisterRestServlet(RestServlet): # remember that we've now registered that user account, and with what # user ID (since the user may not have specified) - logger.info("%r", body) self.auth_handler.set_session_data( - session_id, "registered_user_id", user_id + session_id, "registered_user_id", user_id ) if result and LoginType.EMAIL_IDENTITY in result: From ff7d3dc3a03538b08f36342b15492a348b2b0391 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 14:25:14 +0000 Subject: [PATCH 330/349] Fix tests --- tests/rest/client/v2_alpha/test_register.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 9a202e9dd7..affd42c015 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -22,9 +22,10 @@ class RegisterRestServletTestCase(unittest.TestCase): side_effect=lambda x: defer.succeed(self.appservice)) ) - self.auth_result = (False, None, None) + self.auth_result = (False, None, None, None) self.auth_handler = Mock( - check_auth=Mock(side_effect=lambda x, y, z: self.auth_result) + check_auth=Mock(side_effect=lambda x, y, z: self.auth_result), + get_session_data=Mock(return_value=None) ) self.registration_handler = Mock() self.identity_handler = Mock() @@ -112,7 +113,7 @@ class RegisterRestServletTestCase(unittest.TestCase): self.auth_result = (True, None, { "username": "kermit", "password": "monkey" - }) + }, None) self.registration_handler.register = Mock(return_value=(user_id, token)) (code, result) = yield self.servlet.on_POST(self.request) @@ -135,7 +136,7 @@ class RegisterRestServletTestCase(unittest.TestCase): self.auth_result = (True, None, { "username": "kermit", "password": "monkey" - }) + }, None) self.registration_handler.register = Mock(return_value=("@user:id", "t")) d = self.servlet.on_POST(self.request) return self.assertFailure(d, SynapseError) From f5e90422f5d70afaf9bdf97cc620b563cf31a8eb Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 14:33:19 +0000 Subject: [PATCH 331/349] take extra return val from check_auth in account too --- synapse/rest/client/v2_alpha/account.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index dd4ea45588..7f8a6a4cf7 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -43,7 +43,7 @@ class PasswordRestServlet(RestServlet): body = parse_json_object_from_request(request) - authed, result, params = yield self.auth_handler.check_auth([ + authed, result, params, _ = yield self.auth_handler.check_auth([ [LoginType.PASSWORD], [LoginType.EMAIL_IDENTITY] ], body, self.hs.get_ip_from_request(request)) From 742b6c6d158f46a71724821ce13b8ad535df08bc Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 15:42:35 +0000 Subject: [PATCH 332/349] Use hs get_clock instead of time.time() --- synapse/handlers/auth.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index a9f5e3710b..dba6c76dfc 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -36,7 +36,7 @@ logger = logging.getLogger(__name__) class AuthHandler(BaseHandler): - SESSION_EXPIRE_SECS = 48 * 60 * 60 + SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000 def __init__(self, hs): super(AuthHandler, self).__init__(hs) @@ -494,7 +494,7 @@ class AuthHandler(BaseHandler): last_used = 0 if 'last_used' in sess: last_used = sess['last_used'] - if last_used < time.time() - AuthHandler.SESSION_EXPIRE_SECS: + if last_used < self.hs.get_clock().time() - AuthHandler.SESSION_EXPIRE_MS: del self.sessions[sid] def hash(self, password): From 9671e6750c1756c7f76888b87eedfe7516ef748b Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 15:51:28 +0000 Subject: [PATCH 333/349] Replace other time.time(). --- synapse/handlers/auth.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index dba6c76dfc..9fa8347098 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -27,7 +27,6 @@ import logging import bcrypt import pymacaroons import simplejson -import time import synapse.util.stringutils as stringutils @@ -485,7 +484,7 @@ class AuthHandler(BaseHandler): def _save_session(self, session): # TODO: Persistent storage logger.debug("Saving session %s", session) - session["last_used"] = time.time() + session["last_used"] = self.hs.get_clock().time_msec() self.sessions[session["id"]] = session self._prune_sessions() From 3176aebf9d827eeb939438deea49e12ceddc5b3e Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 15:55:49 +0000 Subject: [PATCH 334/349] string with symbols is a bit too symboly. --- synapse/handlers/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 9fa8347098..85f9b82712 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -292,7 +292,7 @@ class AuthHandler(BaseHandler): if not session_id: # create a new session while session_id is None or session_id in self.sessions: - session_id = stringutils.random_string_with_symbols(24) + session_id = stringutils.random_string(24) self.sessions[session_id] = { "id": session_id, } From 3ee7d7dc7f1793beefee433f780af81a64dfa590 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 16:18:52 +0000 Subject: [PATCH 335/349] time_msec() --- synapse/handlers/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 85f9b82712..cdf9e9000c 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -493,7 +493,7 @@ class AuthHandler(BaseHandler): last_used = 0 if 'last_used' in sess: last_used = sess['last_used'] - if last_used < self.hs.get_clock().time() - AuthHandler.SESSION_EXPIRE_MS: + if last_used < self.hs.get_clock().time_msec() - AuthHandler.SESSION_EXPIRE_MS: del self.sessions[sid] def hash(self, password): From b58d10a87595bd64305f46c2ac86252a67d7b0e4 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 16:22:20 +0000 Subject: [PATCH 336/349] pep8 --- synapse/handlers/auth.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index cdf9e9000c..d7233cd0d6 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -493,7 +493,8 @@ class AuthHandler(BaseHandler): last_used = 0 if 'last_used' in sess: last_used = sess['last_used'] - if last_used < self.hs.get_clock().time_msec() - AuthHandler.SESSION_EXPIRE_MS: + now = self.hs.get_clock().time_msec() + if last_used < now - AuthHandler.SESSION_EXPIRE_MS: del self.sessions[sid] def hash(self, password): From a7daa5ae131cc860769d859cf03b48cefdc0500a Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 19:36:57 +0000 Subject: [PATCH 337/349] Make registration idempotent, part 2: be idempotent if the client specifies a username. --- synapse/handlers/auth.py | 14 ++++++++++++++ synapse/handlers/register.py | 12 +++++++++++- synapse/rest/client/v2_alpha/register.py | 22 +++++++++++++++++----- 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d7233cd0d6..82d458b424 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -160,6 +160,20 @@ class AuthHandler(BaseHandler): defer.returnValue(True) defer.returnValue(False) + def get_session_id(self, clientdict): + """ + Gets the session ID for a client given the client dictionary + :param clientdict: The dictionary sent by the client in the request + :return: The string session ID the client sent. If the client did not + send a session ID, returns None. + """ + sid = None + if clientdict and 'auth' in clientdict: + authdict = clientdict['auth'] + if 'session' in authdict: + sid = authdict['session'] + return sid + def set_session_data(self, session_id, key, value): """ Store a key-value pair into the sessions data associated with this diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 6ffb8c0da6..f287ee247b 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -47,7 +47,8 @@ class RegistrationHandler(BaseHandler): self._next_generated_user_id = None @defer.inlineCallbacks - def check_username(self, localpart, guest_access_token=None): + def check_username(self, localpart, guest_access_token=None, + assigned_user_id=None): yield run_on_reactor() if urllib.quote(localpart.encode('utf-8')) != localpart: @@ -60,6 +61,15 @@ class RegistrationHandler(BaseHandler): user = UserID(localpart, self.hs.hostname) user_id = user.to_string() + if assigned_user_id: + if user_id == assigned_user_id: + return + else: + raise SynapseError( + 400, + "A different user ID has already been registered for this session", + ) + yield self.check_user_id_not_appservice_exclusive(user_id) users = yield self.store.get_users_by_id_case_insensitive(user_id) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index c440430e25..b8590560d3 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -16,6 +16,7 @@ from twisted.internet import defer from synapse.api.constants import LoginType +from synapse.types import UserID from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -122,10 +123,25 @@ class RegisterRestServlet(RestServlet): guest_access_token = body.get("guest_access_token", None) + session_id = self.auth_handler.get_session_id(body) + logger.error("session id: %r", session_id) + registered_user_id = None + if session_id: + # if we get a registered user id out of here, it means we previously + # registered a user for this session, so we could just return the + # user here. We carry on and go through the auth checks though, + # for paranoia. + registered_user_id = self.auth_handler.get_session_data( + session_id, "registered_user_id", None + ) + logger.error("already regged: %r", registered_user_id) + logger.error("check: %r", desired_username) + if desired_username is not None: yield self.registration_handler.check_username( desired_username, - guest_access_token=guest_access_token + guest_access_token=guest_access_token, + assigned_user_id=registered_user_id, ) if self.hs.config.enable_registration_captcha: @@ -147,10 +163,6 @@ class RegisterRestServlet(RestServlet): defer.returnValue((401, result)) return - # have we already registered a user for this session - registered_user_id = self.auth_handler.get_session_data( - session_id, "registered_user_id", None - ) if registered_user_id is not None: logger.info( "Already registered user ID %r for this session", From f984decd6636baa4974a136e2ce8d4fecab3146f Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 19:40:48 +0000 Subject: [PATCH 338/349] Unused import --- synapse/rest/client/v2_alpha/register.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index b8590560d3..d3e66740ad 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -16,7 +16,6 @@ from twisted.internet import defer from synapse.api.constants import LoginType -from synapse.types import UserID from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError from synapse.http.servlet import RestServlet, parse_json_object_from_request From 5670205e2a0e4b87005be743eb6cdfd817fe89ae Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 16 Mar 2016 19:49:42 +0000 Subject: [PATCH 339/349] remove debug logging --- synapse/rest/client/v2_alpha/register.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index d3e66740ad..d32c06c882 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -123,7 +123,6 @@ class RegisterRestServlet(RestServlet): guest_access_token = body.get("guest_access_token", None) session_id = self.auth_handler.get_session_id(body) - logger.error("session id: %r", session_id) registered_user_id = None if session_id: # if we get a registered user id out of here, it means we previously @@ -133,8 +132,6 @@ class RegisterRestServlet(RestServlet): registered_user_id = self.auth_handler.get_session_data( session_id, "registered_user_id", None ) - logger.error("already regged: %r", registered_user_id) - logger.error("check: %r", desired_username) if desired_username is not None: yield self.registration_handler.check_username( From 4ebb688f4ffcef2784df0aa7f5b088e72b28c07b Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 17 Mar 2016 10:59:12 +0000 Subject: [PATCH 340/349] Add option to definitions.py to search for functions a function refers to --- scripts-dev/definitions.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py index 8340c72618..47dac7772d 100755 --- a/scripts-dev/definitions.py +++ b/scripts-dev/definitions.py @@ -86,9 +86,12 @@ def used_names(prefix, item, defs, names): for name, funcs in defs.get('class', {}).items(): used_names(prefix + name + ".", name, funcs, names) + path = prefix.rstrip('.') for used in defs.get('uses', ()): if used in names: - names[used].setdefault('used', {}).setdefault(item, []).append(prefix.rstrip('.')) + if item: + names[item].setdefault('uses', []).append(used) + names[used].setdefault('used', {}).setdefault(item, []).append(path) if __name__ == '__main__': @@ -113,6 +116,10 @@ if __name__ == '__main__': "--referrers", default=0, type=int, help="Include referrers up to the given depth" ) + parser.add_argument( + "--referred", default=0, type=int, + help="Include referred down to the given depth" + ) parser.add_argument( "--format", default="yaml", help="Output format, one of 'yaml' or 'dot'" @@ -161,6 +168,20 @@ if __name__ == '__main__': continue result[name] = definition + referred_depth = args.referred + referred = set() + while referred_depth: + referred_depth -= 1 + for entry in result.values(): + for uses in entry.get("uses", ()): + referred.add(uses) + for name, definition in names.items(): + if not name in referred: + continue + if ignore and any(pattern.match(name) for pattern in ignore): + continue + result[name] = definition + if args.format == 'yaml': yaml.dump(result, sys.stdout, default_flow_style=False) elif args.format == 'dot': From 673c96ce97052126f5bfd11c7dcc19880614ec25 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 17 Mar 2016 11:01:28 +0000 Subject: [PATCH 341/349] Remove dead code left over from presence changes --- synapse/handlers/events.py | 70 -------------------------------- synapse/handlers/presence.py | 4 -- synapse/storage/roommember.py | 24 ----------- tests/storage/test_roommember.py | 10 ----- 4 files changed, 108 deletions(-) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 72a31a9755..f25a252523 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -18,7 +18,6 @@ from twisted.internet import defer from synapse.util.logutils import log_function from synapse.types import UserID from synapse.events.utils import serialize_event -from synapse.util.logcontext import preserve_context_over_fn from synapse.api.constants import Membership, EventTypes from synapse.events import EventBase @@ -31,20 +30,6 @@ import random logger = logging.getLogger(__name__) -def started_user_eventstream(distributor, user): - return preserve_context_over_fn( - distributor.fire, - "started_user_eventstream", user - ) - - -def stopped_user_eventstream(distributor, user): - return preserve_context_over_fn( - distributor.fire, - "stopped_user_eventstream", user - ) - - class EventStreamHandler(BaseHandler): def __init__(self, hs): @@ -63,61 +48,6 @@ class EventStreamHandler(BaseHandler): self.notifier = hs.get_notifier() - @defer.inlineCallbacks - def started_stream(self, user): - """Tells the presence handler that we have started an eventstream for - the user: - - Args: - user (User): The user who started a stream. - Returns: - A deferred that completes once their presence has been updated. - """ - if user not in self._streams_per_user: - # Make sure we set the streams per user to 1 here rather than - # setting it to zero and incrementing the value below. - # Otherwise this may race with stopped_stream causing the - # user to be erased from the map before we have a chance - # to increment it. - self._streams_per_user[user] = 1 - if user in self._stop_timer_per_user: - try: - self.clock.cancel_call_later( - self._stop_timer_per_user.pop(user) - ) - except: - logger.exception("Failed to cancel event timer") - else: - yield started_user_eventstream(self.distributor, user) - else: - self._streams_per_user[user] += 1 - - def stopped_stream(self, user): - """If there are no streams for a user this starts a timer that will - notify the presence handler that we haven't got an event stream for - the user unless the user starts a new stream in 30 seconds. - - Args: - user (User): The user who stopped a stream. - """ - self._streams_per_user[user] -= 1 - if not self._streams_per_user[user]: - del self._streams_per_user[user] - - # 30 seconds of grace to allow the client to reconnect again - # before we think they're gone - def _later(): - logger.debug("_later stopped_user_eventstream %s", user) - - self._stop_timer_per_user.pop(user, None) - - return stopped_user_eventstream(self.distributor, user) - - logger.debug("Scheduling _later: for %s", user) - self._stop_timer_per_user[user] = ( - self.clock.call_later(30, _later) - ) - @defer.inlineCallbacks @log_function def get_stream(self, auth_user_id, pagin_config, timeout=0, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index f6cf343174..cfbcf2d32c 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -73,10 +73,6 @@ FEDERATION_PING_INTERVAL = 25 * 60 * 1000 assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER -def user_presence_changed(distributor, user, statuscache): - return distributor.fire("user_presence_changed", user, statuscache) - - def collect_presencelike_data(distributor, user, content): return distributor.fire("collect_presencelike_data", user, content) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 3065b0c1a5..0cd89260f2 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -251,30 +251,6 @@ class RoomMemberStore(SQLBaseStore): user_id, membership_list=[Membership.JOIN], ) - @defer.inlineCallbacks - def user_rooms_intersect(self, user_id_list): - """ Checks whether all the users whose IDs are given in a list share a - room. - - This is a "hot path" function that's called a lot, e.g. by presence for - generating the event stream. As such, it is implemented locally by - wrapping logic around heavily-cached database queries. - """ - if len(user_id_list) < 2: - defer.returnValue(True) - - deferreds = [self.get_rooms_for_user(u) for u in user_id_list] - - results = yield defer.DeferredList(deferreds, consumeErrors=True) - - # A list of sets of strings giving room IDs for each user - room_id_lists = [set([r.room_id for r in result[1]]) for result in results] - - # There isn't a setintersection(*list_of_sets) - ret = len(room_id_lists.pop(0).intersection(*room_id_lists)) > 0 - - defer.returnValue(ret) - @defer.inlineCallbacks def forget(self, user_id, room_id): """Indicate that user_id wishes to discard history for room_id.""" diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 677d11f68d..b029ff0584 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -91,11 +91,6 @@ class RoomMemberStoreTestCase(unittest.TestCase): ) )] ) - self.assertFalse( - (yield self.store.user_rooms_intersect( - [self.u_alice.to_string(), self.u_bob.to_string()] - )) - ) @defer.inlineCallbacks def test_two_members(self): @@ -108,11 +103,6 @@ class RoomMemberStoreTestCase(unittest.TestCase): yield self.store.get_room_members(self.room.to_string()) )} ) - self.assertTrue(( - yield self.store.user_rooms_intersect([ - self.u_alice.to_string(), self.u_bob.to_string() - ]) - )) @defer.inlineCallbacks def test_room_hosts(self): From 2cd9260500efa82713edd365f54d491ac0328fb0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 17 Mar 2016 11:09:03 +0000 Subject: [PATCH 342/349] Update aliases event after deletion Attempt to update the appropriate `m.room.aliases` event after deleting an alias. This may fail due to the deleter not being in the room. Will also check if the canonical alias of the event is set to the deleted alias, and if so will attempt to delete it. --- synapse/handlers/directory.py | 52 +++++++++++++++++++++++++---- synapse/rest/client/v1/directory.py | 3 +- 2 files changed, 48 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index c4aaa11918..be9f2a21b2 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -32,6 +32,8 @@ class DirectoryHandler(BaseHandler): def __init__(self, hs): super(DirectoryHandler, self).__init__(hs) + self.state = hs.get_state_handler() + self.federation = hs.get_replication_layer() self.federation.register_query_handler( "directory", self.on_directory_query @@ -93,7 +95,7 @@ class DirectoryHandler(BaseHandler): yield self._create_association(room_alias, room_id, servers) @defer.inlineCallbacks - def delete_association(self, user_id, room_alias): + def delete_association(self, requester, user_id, room_alias): # association deletion for human users can_delete = yield self._user_can_delete_alias(room_alias, user_id) @@ -112,7 +114,25 @@ class DirectoryHandler(BaseHandler): errcode=Codes.EXCLUSIVE ) - yield self._delete_association(room_alias) + room_id = yield self._delete_association(room_alias) + + try: + yield self.send_room_alias_update_event( + requester, + requester.user.to_string(), + room_id + ) + + yield self._update_canonical_alias( + requester, + requester.user.to_string(), + room_id, + room_alias, + ) + except AuthError as e: + logger.info("Failed to update alias events: %s", e) + + defer.returnValue(room_id) @defer.inlineCallbacks def delete_appservice_association(self, service, room_alias): @@ -129,11 +149,9 @@ class DirectoryHandler(BaseHandler): if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room alias must be local") - yield self.store.delete_room_alias(room_alias) + room_id = yield self.store.delete_room_alias(room_alias) - # TODO - Looks like _update_room_alias_event has never been implemented - # if room_id: - # yield self._update_room_alias_events(user_id, room_id) + defer.returnValue(room_id) @defer.inlineCallbacks def get_association(self, room_alias): @@ -233,6 +251,28 @@ class DirectoryHandler(BaseHandler): ratelimit=False ) + @defer.inlineCallbacks + def _update_canonical_alias(self, requester, user_id, room_id, room_alias): + alias_event = yield self.state.get_current_state( + room_id, EventTypes.CanonicalAlias, "" + ) + + if alias_event.content.get("alias", "") != room_alias.to_string(): + return + + msg_handler = self.hs.get_handlers().message_handler + yield msg_handler.create_and_send_nonmember_event( + requester, + { + "type": EventTypes.CanonicalAlias, + "state_key": "", + "room_id": room_id, + "sender": user_id, + "content": {}, + }, + ratelimit=False + ) + @defer.inlineCallbacks def get_association_from_room_alias(self, room_alias): result = yield self.store.get_association_from_room_alias( diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 60c5ec77aa..59a23d6cb6 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -127,8 +127,9 @@ class ClientDirectoryServer(ClientV1RestServlet): room_alias = RoomAlias.from_string(room_alias) yield dir_handler.delete_association( - user.to_string(), room_alias + requester, user.to_string(), room_alias ) + logger.info( "User %s deleted alias %s", user.to_string(), From 7a386126206d4fea2b2c561dde6577e5cff107f3 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 17 Mar 2016 11:54:19 +0000 Subject: [PATCH 343/349] Remove another unused function from presence --- synapse/handlers/presence.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index cfbcf2d32c..d0c8f1328b 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -73,10 +73,6 @@ FEDERATION_PING_INTERVAL = 25 * 60 * 1000 assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER -def collect_presencelike_data(distributor, user, content): - return distributor.fire("collect_presencelike_data", user, content) - - class PresenceHandler(BaseHandler): def __init__(self, hs): From 56aa4e7a9a6846a72e9031e29555b05ed119e679 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 17 Mar 2016 15:24:19 +0000 Subject: [PATCH 344/349] Check canonical alias event exists --- synapse/handlers/directory.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index be9f2a21b2..6bcc5a5e2b 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -257,7 +257,8 @@ class DirectoryHandler(BaseHandler): room_id, EventTypes.CanonicalAlias, "" ) - if alias_event.content.get("alias", "") != room_alias.to_string(): + alias_str = room_alias.to_string() + if not alias_event or alias_event.content.get("alias", "") != alias_str: return msg_handler = self.hs.get_handlers().message_handler From 3c5f25507b7a62165a782420add4f9dedcec0b3e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Mar 2016 13:55:16 +0000 Subject: [PATCH 345/349] Yield on EDU handling --- synapse/federation/federation_server.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index e8bfbe7cb5..a961b17aea 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -137,8 +137,8 @@ class FederationServer(FederationBase): logger.exception("Failed to handle PDU") if hasattr(transaction, "edus"): - for edu in [Edu(**x) for x in transaction.edus]: - self.received_edu( + for edu in (Edu(**x) for x in transaction.edus): + yield self.received_edu( transaction.origin, edu.edu_type, edu.content @@ -161,11 +161,12 @@ class FederationServer(FederationBase): ) defer.returnValue((200, response)) + @defer.inlineCallbacks def received_edu(self, origin, edu_type, content): received_edus_counter.inc() if edu_type in self.edu_handlers: - self.edu_handlers[edu_type](origin, content) + yield self.edu_handlers[edu_type](origin, content) else: logger.warn("Received EDU of type %s with no handler", edu_type) From 67ed8065dba960055c2e3d1740af12229b7d19a4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Mar 2016 14:31:31 +0000 Subject: [PATCH 346/349] Dedupe requested event list in _get_events --- synapse/storage/events.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 552e7ca35b..285c586cfe 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -526,6 +526,9 @@ class EventsStore(SQLBaseStore): if not event_ids: defer.returnValue([]) + event_id_list = event_ids + event_ids = set(event_ids) + event_map = self._get_events_from_cache( event_ids, check_redacted=check_redacted, @@ -535,23 +538,18 @@ class EventsStore(SQLBaseStore): missing_events_ids = [e for e in event_ids if e not in event_map] - if not missing_events_ids: - defer.returnValue([ - event_map[e_id] for e_id in event_ids - if e_id in event_map and event_map[e_id] - ]) + if missing_events_ids: + missing_events = yield self._enqueue_events( + missing_events_ids, + check_redacted=check_redacted, + get_prev_content=get_prev_content, + allow_rejected=allow_rejected, + ) - missing_events = yield self._enqueue_events( - missing_events_ids, - check_redacted=check_redacted, - get_prev_content=get_prev_content, - allow_rejected=allow_rejected, - ) - - event_map.update(missing_events) + event_map.update(missing_events) defer.returnValue([ - event_map[e_id] for e_id in event_ids + event_map[e_id] for e_id in event_id_list if e_id in event_map and event_map[e_id] ]) From 58e207cd77b3b68b1908078418c9c9e9c3830ea5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Mar 2016 14:31:44 +0000 Subject: [PATCH 347/349] Don't assume existence of event_id in __str__ --- synapse/events/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index bbfa5a7265..abed6b5e6b 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -168,5 +168,7 @@ class FrozenEvent(EventBase): def __repr__(self): return "" % ( - self.event_id, self.type, self.get("state_key", None), + self.get("event_id", None), + self.get("type", None), + self.get("state_key", None), ) From 9adf0e92bc3dbe4305beaf602406fc5ca51ea37e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Mar 2016 15:12:50 +0000 Subject: [PATCH 348/349] Catch exceptions from EDU handling --- synapse/federation/federation_server.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index a961b17aea..76820b924b 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -166,7 +166,12 @@ class FederationServer(FederationBase): received_edus_counter.inc() if edu_type in self.edu_handlers: - yield self.edu_handlers[edu_type](origin, content) + try: + yield self.edu_handlers[edu_type](origin, content) + except SynapseError as e: + logger.info("Failed to handle edu %r: %r", edu_type, e) + except Exception as e: + logger.exception("Failed to handle edu %r", edu_type, e) else: logger.warn("Received EDU of type %s with no handler", edu_type) From adafa24b0a8f539c114c7d45f36f7b62743557f6 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 25 Mar 2016 23:38:19 +0000 Subject: [PATCH 349/349] typo --- synapse/replication/resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/replication/resource.py b/synapse/replication/resource.py index 8c1ae0fbc7..37a1d3960c 100644 --- a/synapse/replication/resource.py +++ b/synapse/replication/resource.py @@ -76,7 +76,7 @@ class ReplicationResource(Resource): The response is a JSON object with keys for each stream with updates. Under each key is a JSON object with: - * "postion": The current position of the stream. + * "position": The current position of the stream. * "field_names": The names of the fields in each row. * "rows": The updates as an array of arrays.