From 923d9300ede819aa45da546fafc240f40263e7c5 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Sat, 17 Feb 2018 21:53:46 -0700 Subject: [PATCH 01/67] Add a blurb explaining the main synapse worker Signed-off-by: Travis Ralston --- docs/workers.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/workers.rst b/docs/workers.rst index dee04bbf3e..a5e084c22a 100644 --- a/docs/workers.rst +++ b/docs/workers.rst @@ -115,6 +115,18 @@ To manipulate a specific worker, you pass the -w option to synctl:: synctl -w $CONFIG/workers/synchrotron.yaml restart +After setting up your workers, you'll need to create a worker configuration for +the main synapse process. That worker configuration should look like this::: + + worker_app: synapse.app.homeserver + daemonize: true + +Be sure to keep this particular configuration limited as synapse may refuse to +start if the regular ``worker_*`` options are given. The ``homeserver.yaml`` +configuration will be used to set up the main synapse process. + +**You must have a worker configuration for the main synapse process!** + Available worker applications ----------------------------- From 47ce527f459e0a28a45a2299db799ea18d632021 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 13 Mar 2018 14:10:07 +0100 Subject: [PATCH 02/67] Add room_id to the response of `rooms/{roomId}/join` Fixes #2349 --- synapse/rest/client/v1/room.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index f8999d64d7..6dc31bf9ae 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -655,7 +655,12 @@ class RoomMembershipRestServlet(ClientV1RestServlet): content=event_content, ) - defer.returnValue((200, {})) + return_value = {} + + if membership_action == "join": + return_value["room_id"] = room_id + + defer.returnValue((200, return_value)) def _has_3pid_invite_keys(self, content): for key in {"id_server", "medium", "address"}: From 2cc9f76bc3cfa012dcdfe614bdda7e689b8b5e65 Mon Sep 17 00:00:00 2001 From: NotAFile Date: Thu, 15 Mar 2018 16:11:17 +0100 Subject: [PATCH 03/67] replace old style error catching with 'as' keyword This is both easier to read and compatible with python3 (not that that matters) Signed-off-by: Adrian Tschira --- synapse/app/synctl.py | 4 ++-- synapse/handlers/device.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py index 0f0ddfa78a..b0e1b5e66a 100755 --- a/synapse/app/synctl.py +++ b/synapse/app/synctl.py @@ -38,7 +38,7 @@ def pid_running(pid): try: os.kill(pid, 0) return True - except OSError, err: + except OSError as err: if err.errno == errno.EPERM: return True return False @@ -98,7 +98,7 @@ def stop(pidfile, app): try: os.kill(pid, signal.SIGTERM) write("stopped %s" % (app,), colour=GREEN) - except OSError, err: + except OSError as err: if err.errno == errno.ESRCH: write("%s not running" % (app,), colour=YELLOW) elif err.errno == errno.EPERM: diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 40f3d24678..f7457a7082 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -155,7 +155,7 @@ class DeviceHandler(BaseHandler): try: yield self.store.delete_device(user_id, device_id) - except errors.StoreError, e: + except errors.StoreError as e: if e.code == 404: # no match pass @@ -204,7 +204,7 @@ class DeviceHandler(BaseHandler): try: yield self.store.delete_devices(user_id, device_ids) - except errors.StoreError, e: + except errors.StoreError as e: if e.code == 404: # no match pass @@ -243,7 +243,7 @@ class DeviceHandler(BaseHandler): new_display_name=content.get("display_name") ) yield self.notify_device_update(user_id, [device_id]) - except errors.StoreError, e: + except errors.StoreError as e: if e.code == 404: raise errors.NotFoundError() else: From c2a5cf2fe32d2cd582711669b7c0ce74682e1c05 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 27 Mar 2018 17:07:31 +0100 Subject: [PATCH 04/67] factor out exception handling for keys/claim and keys/query this stuff is badly c&p'ed --- synapse/handlers/e2e_keys.py | 53 +++++++++++++++++------------------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 80b359b2e7..41521e6990 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -134,23 +135,8 @@ class E2eKeysHandler(object): if user_id in destination_query: results[user_id] = keys - except CodeMessageException as e: - failures[destination] = { - "status": e.code, "message": e.message - } - except NotRetryingDestination as e: - failures[destination] = { - "status": 503, "message": "Not ready for retry", - } - except FederationDeniedError as e: - failures[destination] = { - "status": 403, "message": "Federation Denied", - } except Exception as e: - # include ConnectionRefused and other errors - failures[destination] = { - "status": 503, "message": e.message - } + failures[destination] = _exception_to_failure(e) yield make_deferred_yieldable(defer.gatherResults([ preserve_fn(do_remote_query)(destination) @@ -252,19 +238,8 @@ class E2eKeysHandler(object): for user_id, keys in remote_result["one_time_keys"].items(): if user_id in device_keys: json_result[user_id] = keys - except CodeMessageException as e: - failures[destination] = { - "status": e.code, "message": e.message - } - except NotRetryingDestination as e: - failures[destination] = { - "status": 503, "message": "Not ready for retry", - } except Exception as e: - # include ConnectionRefused and other errors - failures[destination] = { - "status": 503, "message": e.message - } + failures[destination] = _exception_to_failure(e) yield make_deferred_yieldable(defer.gatherResults([ preserve_fn(claim_client_keys)(destination) @@ -362,6 +337,28 @@ class E2eKeysHandler(object): ) +def _exception_to_failure(e): + if isinstance(e, CodeMessageException): + return { + "status": e.code, "message": e.message, + } + + if isinstance(e, NotRetryingDestination): + return { + "status": 503, "message": "Not ready for retry", + } + + if isinstance(e, FederationDeniedError): + return { + "status": 403, "message": "Federation Denied", + } + + # include ConnectionRefused and other errors + return { + "status": 503, "message": e.message, + } + + def _one_time_keys_match(old_key_json, new_key): old_key = json.loads(old_key_json) From a134c572a6697fa6443525493e3fc13f74452d34 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 27 Mar 2018 17:15:06 +0100 Subject: [PATCH 05/67] Stringify exceptions for keys/{query,claim} Make sure we stringify any exceptions we return from keys/query and keys/claim, to avoid a 'not JSON serializable' error later Fixes #3010 --- synapse/handlers/e2e_keys.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 41521e6990..325c0c4a9f 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -354,8 +354,11 @@ def _exception_to_failure(e): } # include ConnectionRefused and other errors + # + # Note that some Exceptions (notably twisted's ResponseFailed etc) don't + # give a string for e.message, which simplejson then fails to serialize. return { - "status": 503, "message": e.message, + "status": 503, "message": str(e.message), } From ef520d8d0e152a24fb6660fdd2def214b6e9caae Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Tue, 27 Mar 2018 14:12:22 +0100 Subject: [PATCH 06/67] Include coarse CPU and Memory use in stats callbacks. This requires the psutil module, and is still opt-in based on the report_stats config option. --- UPGRADE.rst | 12 ++++++++++++ synapse/app/homeserver.py | 20 ++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/UPGRADE.rst b/UPGRADE.rst index 2efe7ea60f..f6bb1070b1 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -48,6 +48,18 @@ returned by the Client-Server API: # configured on port 443. curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:" +Upgrading to $NEXT_VERSION +==================== + +This release expands the anonymous usage stats sent if the opt-in +``report_stats`` configuration is set to ``true``. We now capture RSS memory +and cpu use at a very coarse level. This requires administrators to install +the optional ``psutil`` python module. + +We would appreciate it if you could assist by ensuring this module is available +and ``report_stats`` is enabled. This will let us see if performance changes to +synapse are having an impact to the general community. + Upgrading to v0.15.0 ==================== diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index c00afbba28..313be42ded 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -401,6 +401,7 @@ def run(hs): start_time = clock.time() stats = {} + stats_process = None @defer.inlineCallbacks def phone_stats_home(): @@ -427,6 +428,10 @@ def run(hs): daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages + if stats_process is not None: + with stats_process.oneshot(): + stats["memory_rss"] = stats_process.memory_info().rss + stats["cpu_average"] = int(stats_process.cpu_info(interval=None)) logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: @@ -438,6 +443,21 @@ def run(hs): logger.warn("Error reporting stats: %s", e) if hs.config.report_stats: + try: + import psutil + stats_process = psutil.Process() + # Ensure we can fetch both, and make the initial request for cpu_percent + # so the next request will use this as the initial point. + stats_process.memory_info().rss + stats_process.cpu_percent(interval=None) + except (ImportError, AttributeError): + logger.warn( + "report_stats enabled but psutil is not installed or incorrect version." + " Disabling reporting of memory/cpu stats." + " Ensuring psutil is available will help matrix track performance changes across releases." + ) + stats_process = None + logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000) From 9187e0762f0b4f028d15fac4502e458f513d6642 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 10:02:32 +0100 Subject: [PATCH 07/67] count_daily_users failed if db was sqlite due to type failure - presumably this prevcented all sqlite homeservers reporting home --- synapse/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index de00cae447..b97e5e5ff4 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -260,7 +260,7 @@ class DataStore(RoomMemberStore, RoomStore, ) u """ - txn.execute(sql, (yesterday,)) + txn.execute(sql, (str(yesterday),)) count, = txn.fetchone() return count From a32d2548d986f7075e8310184ce0b70c69513a02 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 10:39:13 +0100 Subject: [PATCH 08/67] query and call for r30 stats --- synapse/app/homeserver.py | 2 ++ synapse/storage/__init__.py | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index c00afbba28..8bce9f1ace 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -425,6 +425,8 @@ def run(hs): stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() + stats["r30_users"] = yield hs.get_datastore().count_r30_users() + daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index b97e5e5ff4..10f99c3cd5 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -267,6 +267,42 @@ class DataStore(RoomMemberStore, RoomStore, ret = yield self.runInteraction("count_users", _count_users) defer.returnValue(ret) + @defer.inlineCallbacks + def count_r30_users(self): + """ + Counts the number of 30 day retained users, defined as:- + * Users who have created their accounts more than 30 days + * Where last seen at most 30 days ago + * Where account creation and last_seen are > 30 days + """ + def _count_r30_users(txn): + thirty_days_in_secs = 86400 * 30 + now = int(self._clock.time_msec()) + thirty_days_ago_in_secs = now - thirty_days_in_secs + + sql = """ + SELECT COALESCE(count(*), 0) FROM ( + SELECT users.name, users.creation_ts * 1000, MAX(user_ips.last_seen) + FROM users, user_ips + WHERE users.name = user_ips.user_id + AND appservice_id is NULL + AND users.creation_ts < ? + AND user_ips.last_seen/1000 > ? + AND (user_ips.last_seen/1000) - users.creation_ts > ? + GROUP BY users.name, users.creation_ts + ) u + """ + + txn.execute(sql, (thirty_days_ago_in_secs, + thirty_days_ago_in_secs, + thirty_days_in_secs)) + + count, = txn.fetchone() + return count + + ret = yield self.runInteraction("count_r30_users", _count_r30_users) + defer.returnValue(ret) + def get_users(self): """Function to reterive a list of users in users table. From a9cb1a35c85f62bb0114dabd62d118c80d66e415 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 10:57:27 +0100 Subject: [PATCH 09/67] fix tests/storage/test_user_directory.py --- tests/storage/test_user_directory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 0891308f25..88add45217 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -62,7 +62,7 @@ class UserDirectoryStoreTestCase(unittest.TestCase): self.assertFalse(r["limited"]) self.assertEqual(1, len(r["results"])) self.assertDictEqual(r["results"][0], { - "user_id": BOB, + "d.user_id": BOB, "display_name": "bob", "avatar_url": None, }) From 01ccc9e6f25a87d7906d7907afd9e8527228215b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 28 Mar 2018 11:03:52 +0100 Subject: [PATCH 10/67] Measure time it takes to calculate state group ID --- synapse/state.py | 51 ++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/synapse/state.py b/synapse/state.py index a7f20350f1..26093c8434 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -483,33 +483,34 @@ class StateResolutionHandler(object): key: e_ids.pop() for key, e_ids in state.iteritems() } - # if the new state matches any of the input state groups, we can - # use that state group again. Otherwise we will generate a state_id - # which will be used as a cache key for future resolutions, but - # not get persisted. - state_group = None - new_state_event_ids = frozenset(new_state.itervalues()) - for sg, events in state_groups_ids.iteritems(): - if new_state_event_ids == frozenset(e_id for e_id in events): - state_group = sg - break + with Measure(self.clock, "state.create_group_ids"): + # if the new state matches any of the input state groups, we can + # use that state group again. Otherwise we will generate a state_id + # which will be used as a cache key for future resolutions, but + # not get persisted. + state_group = None + new_state_event_ids = frozenset(new_state.itervalues()) + for sg, events in state_groups_ids.iteritems(): + if new_state_event_ids == frozenset(e_id for e_id in events): + state_group = sg + break - # TODO: We want to create a state group for this set of events, to - # increase cache hits, but we need to make sure that it doesn't - # end up as a prev_group without being added to the database + # TODO: We want to create a state group for this set of events, to + # increase cache hits, but we need to make sure that it doesn't + # end up as a prev_group without being added to the database - prev_group = None - delta_ids = None - for old_group, old_ids in state_groups_ids.iteritems(): - if not set(new_state) - set(old_ids): - n_delta_ids = { - k: v - for k, v in new_state.iteritems() - if old_ids.get(k) != v - } - if not delta_ids or len(n_delta_ids) < len(delta_ids): - prev_group = old_group - delta_ids = n_delta_ids + prev_group = None + delta_ids = None + for old_group, old_ids in state_groups_ids.iteritems(): + if not set(new_state) - set(old_ids): + n_delta_ids = { + k: v + for k, v in new_state.iteritems() + if old_ids.get(k) != v + } + if not delta_ids or len(n_delta_ids) < len(delta_ids): + prev_group = old_group + delta_ids = n_delta_ids cache = _StateCacheEntry( state=new_state, From 545001b9e4b1d6710145d3efe2117fbdf823fb38 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 11:19:45 +0100 Subject: [PATCH 11/67] Fix search_user_dir multiple sqlite versions do different things --- synapse/storage/user_directory.py | 4 ++-- tests/storage/test_user_directory.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py index dfdcbb3181..d6e289ffbe 100644 --- a/synapse/storage/user_directory.py +++ b/synapse/storage/user_directory.py @@ -667,7 +667,7 @@ class UserDirectoryStore(SQLBaseStore): # The array of numbers are the weights for the various part of the # search: (domain, _, display name, localpart) sql = """ - SELECT d.user_id, display_name, avatar_url + SELECT d.user_id AS user_id, display_name, avatar_url FROM user_directory_search INNER JOIN user_directory AS d USING (user_id) %s @@ -702,7 +702,7 @@ class UserDirectoryStore(SQLBaseStore): search_query = _parse_query_sqlite(search_term) sql = """ - SELECT d.user_id, display_name, avatar_url + SELECT d.user_id AS user_id, display_name, avatar_url FROM user_directory_search INNER JOIN user_directory AS d USING (user_id) %s diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 88add45217..0891308f25 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -62,7 +62,7 @@ class UserDirectoryStoreTestCase(unittest.TestCase): self.assertFalse(r["limited"]) self.assertEqual(1, len(r["results"])) self.assertDictEqual(r["results"][0], { - "d.user_id": BOB, + "user_id": BOB, "display_name": "bob", "avatar_url": None, }) From 0f890f477eb2ed03b8fd48710d1960210f44a334 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 11:49:57 +0100 Subject: [PATCH 12/67] No need to cast in count_daily_users --- synapse/storage/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 10f99c3cd5..ba43b2d8ec 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -250,7 +250,7 @@ class DataStore(RoomMemberStore, RoomStore, Counts the number of users who used this homeserver in the last 24 hours. """ def _count_users(txn): - yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24), + yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) sql = """ SELECT COALESCE(count(*), 0) FROM ( @@ -260,7 +260,7 @@ class DataStore(RoomMemberStore, RoomStore, ) u """ - txn.execute(sql, (str(yesterday),)) + txn.execute(sql, (yesterday,)) count, = txn.fetchone() return count From 788e69098c93f2433ef907015666c624bb39318f Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 12:03:13 +0100 Subject: [PATCH 13/67] Add user_ips last seen index --- synapse/storage/client_ips.py | 7 +++++++ .../delta/48/add_user_ips_last_seen_index.sql | 17 +++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 synapse/storage/schema/delta/48/add_user_ips_last_seen_index.sql diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index a03d1d6104..7b44dae0fc 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -48,6 +48,13 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): columns=["user_id", "device_id", "last_seen"], ) + self.register_background_index_update( + "user_ips_last_seen_index", + index_name="user_ips_last_seen", + table="user_ips", + columns=["user_id", "last_seen"], + ) + # (user_id, access_token, ip) -> (user_agent, device_id, last_seen) self._batch_row_update = {} diff --git a/synapse/storage/schema/delta/48/add_user_ips_last_seen_index.sql b/synapse/storage/schema/delta/48/add_user_ips_last_seen_index.sql new file mode 100644 index 0000000000..9248b0b24a --- /dev/null +++ b/synapse/storage/schema/delta/48/add_user_ips_last_seen_index.sql @@ -0,0 +1,17 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT into background_updates (update_name, progress_json) + VALUES ('user_ips_last_seen_index', '{}'); From 4ceaa7433a324afab23c4a445cabe3da965e5846 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Wed, 28 Mar 2018 12:08:09 +0100 Subject: [PATCH 14/67] As daemonizing will make a new process, defer call to init. --- synapse/app/homeserver.py | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 313be42ded..0737945ede 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -401,7 +401,7 @@ def run(hs): start_time = clock.time() stats = {} - stats_process = None + stats_process = [] @defer.inlineCallbacks def phone_stats_home(): @@ -428,10 +428,13 @@ def run(hs): daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages - if stats_process is not None: - with stats_process.oneshot(): - stats["memory_rss"] = stats_process.memory_info().rss - stats["cpu_average"] = int(stats_process.cpu_info(interval=None)) + if len(stats_process) > 0: + stats["memory_rss"] = 0 + stats["cpu_average"] = 0 + for process in stats_process: + with process.oneshot(): + stats["memory_rss"] += process.memory_info().rss + stats["cpu_average"] += int(process.cpu_percent(interval=None)) logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: @@ -442,25 +445,32 @@ def run(hs): except Exception as e: logger.warn("Error reporting stats: %s", e) - if hs.config.report_stats: + def performance_stats_init(): try: import psutil - stats_process = psutil.Process() + process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. - stats_process.memory_info().rss - stats_process.cpu_percent(interval=None) + process.memory_info().rss + process.cpu_percent(interval=None) + logger.info("report_stats can use psutil") + stats_process.append(process) except (ImportError, AttributeError): logger.warn( - "report_stats enabled but psutil is not installed or incorrect version." - " Disabling reporting of memory/cpu stats." - " Ensuring psutil is available will help matrix track performance changes across releases." + "report_stats enabled but psutil is not installed or incorrect version." + " Disabling reporting of memory/cpu stats." + " Ensuring psutil is available will help matrix track performance changes" + " across releases." ) - stats_process = None + if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000) + # We need to defer this init for the cases that we daemonize + # otherwise the process ID we get is that of the non-daemon process + clock.call_later(15, performance_stats_init) + # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later(5 * 60, phone_stats_home) From 792d340572026becf48fe73421f0b73cf575fe46 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 12:25:02 +0100 Subject: [PATCH 15/67] rename stat to future proof --- synapse/app/homeserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 8bce9f1ace..286f4dcf7b 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -425,7 +425,7 @@ def run(hs): stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() - stats["r30_users"] = yield hs.get_datastore().count_r30_users() + stats["r30_users_all"] = yield hs.get_datastore().count_r30_users() daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages From 79452edeee94a09a826ee2b41a08811b823a3ad6 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:03:37 +0100 Subject: [PATCH 16/67] Add joinability for groups Adds API to set the 'joinable' flag, and corresponding flag in the table. --- synapse/federation/transport/client.py | 17 ++++++++++++++++ synapse/federation/transport/server.py | 20 ++++++++++++++++++ synapse/groups/groups_server.py | 19 +++++++++++++++++ synapse/handlers/groups_local.py | 3 +++ synapse/rest/client/v2_alpha/groups.py | 28 ++++++++++++++++++++++++++ synapse/storage/group_server.py | 13 ++++++++++++ synapse/storage/prepare_database.py | 3 ++- 7 files changed, 102 insertions(+), 1 deletion(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 5488e82985..46a797b4ba 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -856,6 +857,22 @@ class TransportLayerClient(object): ignore_backoff=True, ) + @log_function + def set_group_joinable(self, destination, group_id, requester_user_id, + content): + """Sets whether a group is joinable without an invite or knock + """ + path = PREFIX + "/groups/%s/joinable" % (group_id,) + + return self.client.post_json( + destination=destination, + path=path, + args={"requester_user_id": requester_user_id}, + data=content, + ignore_backoff=True, + ) + + @log_function def delete_group_summary_user(self, destination, group_id, requester_user_id, user_id, role_id): diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a66a6b0692..107deb4e1e 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1124,6 +1125,24 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet): defer.returnValue((200, resp)) +class FederationGroupsJoinableServlet(BaseFederationServlet): + """Sets whether a group is joinable without an invite or knock + """ + PATH = "/groups/(?P[^/]*)/joinable$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id): + requester_user_id = parse_string_from_args(query, "requester_user_id") + if get_domain_from_id(requester_user_id) != origin: + raise SynapseError(403, "requester_user_id doesn't match origin") + + new_content = yield self.handler.set_group_joinable( + group_id, requester_user_id, content + ) + + defer.returnValue((200, new_content)) + + FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, FederationPullServlet, @@ -1172,6 +1191,7 @@ GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsSummaryUsersServlet, FederationGroupsAddRoomsServlet, FederationGroupsAddRoomsConfigServlet, + FederationGroupsJoinableServlet, ) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 0b995aed70..25cbfb1691 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -205,6 +206,24 @@ class GroupsServerHandler(object): defer.returnValue({}) + @defer.inlineCallbacks + def set_group_joinable(self, group_id, requester_user_id, content): + """Sets whether a group is joinable without an invite or knock + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + is_joinable = content.get('joinable') + if is_joinable is None: + raise SynapseError( + 400, "No value specified for 'joinable'" + ) + + yield self.store.set_group_joinable(group_id, is_joinable=is_joinable) + + defer.returnValue({}) + @defer.inlineCallbacks def get_group_categories(self, group_id, requester_user_id): """Get all categories in a group (as seen by user) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index e4d0cc8b02..c9671b9046 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -90,6 +91,8 @@ class GroupsLocalHandler(object): get_group_role = _create_rerouter("get_group_role") get_group_roles = _create_rerouter("get_group_roles") + set_group_joinable = _create_rerouter("set_group_joinable") + @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): """Get the group summary for a group. diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index f762dbfa9a..dc8247d172 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -401,6 +402,32 @@ class GroupInvitedUsersServlet(RestServlet): defer.returnValue((200, result)) +class GroupJoinableServlet(RestServlet): + """Set whether a group is joinable without an invite + """ + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/joinable$") + + def __init__(self, hs): + super(GroupJoinableServlet, self).__init__() + self.auth = hs.get_auth() + self.groups_handler = hs.get_groups_local_handler() + + @defer.inlineCallbacks + def on_POST(self, request, group_id): + requester = yield self.auth.get_user_by_req(request) + requester_user_id = requester.user.to_string() + + content = parse_json_object_from_request(request) + + result = yield self.groups_handler.set_group_joinable( + group_id, + requester_user_id, + content, + ) + + defer.returnValue((200, result)) + + class GroupCreateServlet(RestServlet): """Create a group """ @@ -738,6 +765,7 @@ def register_servlets(hs, http_server): GroupInvitedUsersServlet(hs).register(http_server) GroupUsersServlet(hs).register(http_server) GroupRoomServlet(hs).register(http_server) + GroupJoinableServlet(hs).register(http_server) GroupCreateServlet(hs).register(http_server) GroupAdminRoomsServlet(hs).register(http_server) GroupAdminRoomsConfigServlet(hs).register(http_server) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 8fde1aab8e..96553d4fb1 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,6 +30,18 @@ _DEFAULT_ROLE_ID = "" class GroupServerStore(SQLBaseStore): + def set_group_joinable(self, group_id, is_joinable): + return self._simple_update_one( + table="groups", + keyvalues={ + "group_id": group_id, + }, + updatevalues={ + "is_joinable": is_joinable, + }, + desc="set_group_joinable", + ) + def get_group(self, group_id): return self._simple_select_one( table="groups", diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c845a0cec5..04411a665f 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,7 +26,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 47 +SCHEMA_VERSION = 48 dir_path = os.path.abspath(os.path.dirname(__file__)) From 352e1ff9ed945fd7f2655bf47d591184fc980afb Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:07:57 +0100 Subject: [PATCH 17/67] Add schema delta file --- .../storage/schema/delta/48/groups_joinable.sql | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 synapse/storage/schema/delta/48/groups_joinable.sql diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql new file mode 100644 index 0000000000..fb9c7a8d1c --- /dev/null +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -0,0 +1,16 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE groups ADD COLUMN is_joinable BOOLEAN NOT NULL DEFAULT 0; From a1642708331ef64e38f4d2708cee9eefbc3d391e Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:23:00 +0100 Subject: [PATCH 18/67] Make column definition that works on both dbs --- synapse/storage/schema/delta/48/groups_joinable.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index fb9c7a8d1c..39c8fed46c 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -13,4 +13,4 @@ * limitations under the License. */ -ALTER TABLE groups ADD COLUMN is_joinable BOOLEAN NOT NULL DEFAULT 0; +ALTER TABLE groups ADD COLUMN is_joinable BOOLEAN NOT NULL DEFAULT (CAST(0 AS BOOLEAN)); From 33f6195d9ae91520aee9d108d60245b5265ac714 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Wed, 28 Mar 2018 14:25:25 +0100 Subject: [PATCH 19/67] Handle review comments --- synapse/app/homeserver.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 0737945ede..b935beb974 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -401,6 +401,9 @@ def run(hs): start_time = clock.time() stats = {} + + # Contains the list of processes we will be monitoring + # currently either 0 or 1 stats_process = [] @defer.inlineCallbacks @@ -428,13 +431,13 @@ def run(hs): daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages + if len(stats_process) > 0: stats["memory_rss"] = 0 stats["cpu_average"] = 0 for process in stats_process: - with process.oneshot(): - stats["memory_rss"] += process.memory_info().rss - stats["cpu_average"] += int(process.cpu_percent(interval=None)) + stats["memory_rss"] += process.memory_info().rss + stats["cpu_average"] += int(process.cpu_percent(interval=None)) logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: @@ -459,8 +462,8 @@ def run(hs): logger.warn( "report_stats enabled but psutil is not installed or incorrect version." " Disabling reporting of memory/cpu stats." - " Ensuring psutil is available will help matrix track performance changes" - " across releases." + " Ensuring psutil is available will help matrix.org track performance" + " changes across releases." ) if hs.config.report_stats: @@ -469,7 +472,7 @@ def run(hs): # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process - clock.call_later(15, performance_stats_init) + clock.call_later(0, performance_stats_init) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes From 32260baa410e1ae8200f636861a57bf2039e2cf0 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:29:42 +0100 Subject: [PATCH 20/67] pep8 --- synapse/federation/transport/client.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 46a797b4ba..5a6b63350b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -859,7 +859,7 @@ class TransportLayerClient(object): @log_function def set_group_joinable(self, destination, group_id, requester_user_id, - content): + content): """Sets whether a group is joinable without an invite or knock """ path = PREFIX + "/groups/%s/joinable" % (group_id,) @@ -872,7 +872,6 @@ class TransportLayerClient(object): ignore_backoff=True, ) - @log_function def delete_group_summary_user(self, destination, group_id, requester_user_id, user_id, role_id): From 86932be2cb1837688d154ff78fb6418f78483133 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 14:36:53 +0100 Subject: [PATCH 21/67] Support multi client R30 for psql --- synapse/app/homeserver.py | 4 +++- synapse/storage/__init__.py | 34 +++++++++++++++++++++++++++------- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 286f4dcf7b..35e2b00f1b 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -425,7 +425,9 @@ def run(hs): stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() - stats["r30_users_all"] = yield hs.get_datastore().count_r30_users() + r30_results = yield hs.get_datastore().count_r30_users() + for name, count in r30_results.items(): + stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index ba43b2d8ec..b651973c79 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -280,6 +280,15 @@ class DataStore(RoomMemberStore, RoomStore, now = int(self._clock.time_msec()) thirty_days_ago_in_secs = now - thirty_days_in_secs + # Are these filters sufficiently robust? + filters = { + "ALL": "", + "IOS": "^(Vector|Riot|Riot\.im)\/.* iOS", + "ANDROID": "^(Dalvik|Riot|Riot\.im)\/.* Android", + "ELECTRON": "Electron", + "WEB": "(Gecko|Mozilla)", + } + sql = """ SELECT COALESCE(count(*), 0) FROM ( SELECT users.name, users.creation_ts * 1000, MAX(user_ips.last_seen) @@ -289,16 +298,27 @@ class DataStore(RoomMemberStore, RoomStore, AND users.creation_ts < ? AND user_ips.last_seen/1000 > ? AND (user_ips.last_seen/1000) - users.creation_ts > ? - GROUP BY users.name, users.creation_ts - ) u """ - txn.execute(sql, (thirty_days_ago_in_secs, - thirty_days_ago_in_secs, - thirty_days_in_secs)) + if isinstance(self.database_engine, PostgresEngine): + sql = sql + "AND user_ips.user_agent ~ ? " + sql = sql + "GROUP BY users.name, users.creation_ts ) u" - count, = txn.fetchone() - return count + results = {} + if isinstance(self.database_engine, PostgresEngine): + for filter_name, user_agent_filter in filters.items(): + txn.execute(sql, (thirty_days_ago_in_secs, + thirty_days_ago_in_secs, + thirty_days_in_secs, + user_agent_filter)) + results[filter_name], = txn.fetchone() + + else: + txn.execute(sql, (thirty_days_ago_in_secs, + thirty_days_ago_in_secs, + thirty_days_in_secs)) + results["ALL"], = txn.fetchone() + return results ret = yield self.runInteraction("count_r30_users", _count_r30_users) defer.returnValue(ret) From 4262aba17b643bc82c5cce92298dac0a27b2727c Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 14:40:03 +0100 Subject: [PATCH 22/67] bump schema version --- synapse/storage/prepare_database.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c845a0cec5..68675e15d2 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 47 +SCHEMA_VERSION = 48 dir_path = os.path.abspath(os.path.dirname(__file__)) From a838444a70195588de55a514524c4af720099177 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:50:30 +0100 Subject: [PATCH 23/67] Grr. Copy the definition from is_admin --- synapse/storage/schema/delta/48/groups_joinable.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index 39c8fed46c..9e106e909c 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -13,4 +13,4 @@ * limitations under the License. */ -ALTER TABLE groups ADD COLUMN is_joinable BOOLEAN NOT NULL DEFAULT (CAST(0 AS BOOLEAN)); +ALTER TABLE groups ADD COLUMN is_joinable BOOL DEFAULT 0 NOT NULL; From 929b34963d320f571512453dac980ef235914956 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 14:53:55 +0100 Subject: [PATCH 24/67] OK, smallint it is then --- synapse/storage/schema/delta/48/groups_joinable.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index 9e106e909c..ace7d0a723 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -13,4 +13,4 @@ * limitations under the License. */ -ALTER TABLE groups ADD COLUMN is_joinable BOOL DEFAULT 0 NOT NULL; +ALTER TABLE groups ADD COLUMN is_joinable SMALLINT DEFAULT 0 NOT NULL; From 241e4e86873d5880f564791e3768247fa55c3fa8 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 16:25:53 +0100 Subject: [PATCH 25/67] remove twisted deferral cruft --- synapse/storage/__init__.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index b651973c79..b2b85e266d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -244,7 +244,6 @@ class DataStore(RoomMemberStore, RoomStore, return [UserPresenceState(**row) for row in rows] - @defer.inlineCallbacks def count_daily_users(self): """ Counts the number of users who used this homeserver in the last 24 hours. @@ -264,10 +263,9 @@ class DataStore(RoomMemberStore, RoomStore, count, = txn.fetchone() return count - ret = yield self.runInteraction("count_users", _count_users) - defer.returnValue(ret) + return self.runInteraction("count_users", _count_users) + - @defer.inlineCallbacks def count_r30_users(self): """ Counts the number of 30 day retained users, defined as:- @@ -320,8 +318,7 @@ class DataStore(RoomMemberStore, RoomStore, results["ALL"], = txn.fetchone() return results - ret = yield self.runInteraction("count_r30_users", _count_r30_users) - defer.returnValue(ret) + return self.runInteraction("count_r30_users", _count_r30_users) def get_users(self): """Function to reterive a list of users in users table. From c5de6987c210cce906cf279d85cbd98cd14bfc52 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 16:44:11 +0100 Subject: [PATCH 26/67] This should probably be a PUT --- synapse/rest/client/v2_alpha/groups.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index dc8247d172..aa94130e57 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -413,7 +413,7 @@ class GroupJoinableServlet(RestServlet): self.groups_handler = hs.get_groups_local_handler() @defer.inlineCallbacks - def on_POST(self, request, group_id): + def on_PUT(self, request, group_id): requester = yield self.auth.get_user_by_req(request) requester_user_id = requester.user.to_string() From dc7c020b33dc9606089fa66fdec2dacb7f807f6d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 28 Mar 2018 17:25:15 +0100 Subject: [PATCH 27/67] fix pep8 errors --- synapse/storage/__init__.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index b2b85e266d..70c6171404 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - from synapse.storage.devices import DeviceStore from .appservice import ( ApplicationServiceStore, ApplicationServiceTransactionStore @@ -265,7 +263,6 @@ class DataStore(RoomMemberStore, RoomStore, return self.runInteraction("count_users", _count_users) - def count_r30_users(self): """ Counts the number of 30 day retained users, defined as:- From 9ee44a372d4fcf6a461b610230a285610613e8ac Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 29 Mar 2018 16:45:34 +0100 Subject: [PATCH 28/67] Remove need for sqlite specific query --- synapse/storage/__init__.py | 85 ++++++++++++++++++++++++------------- 1 file changed, 56 insertions(+), 29 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 70c6171404..0b4693041f 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -269,50 +269,77 @@ class DataStore(RoomMemberStore, RoomStore, * Users who have created their accounts more than 30 days * Where last seen at most 30 days ago * Where account creation and last_seen are > 30 days + + Returns counts globaly for a given user as well as breaking + by platform """ def _count_r30_users(txn): thirty_days_in_secs = 86400 * 30 now = int(self._clock.time_msec()) thirty_days_ago_in_secs = now - thirty_days_in_secs - # Are these filters sufficiently robust? - filters = { - "ALL": "", - "IOS": "^(Vector|Riot|Riot\.im)\/.* iOS", - "ANDROID": "^(Dalvik|Riot|Riot\.im)\/.* Android", - "ELECTRON": "Electron", - "WEB": "(Gecko|Mozilla)", - } + sql = """ + SELECT platform, COALESCE(count(*), 0) FROM ( + SELECT users.name, platform, users.creation_ts * 1000, MAX(uip.last_seen) + FROM users + INNER JOIN ( + SELECT + user_id, + last_seen, + CASE + WHEN user_agent LIKE '%Android%' THEN 'android' + WHEN user_agent LIKE '%iOS%' THEN 'ios' + WHEN user_agent LIKE '%Electron%' THEN 'electron' + WHEN user_agent LIKE '%Mozilla%' THEN 'web' + WHEN user_agent LIKE '%Gecko%' THEN 'web' + ELSE 'unknown' + END + AS platform + FROM user_ips + ) uip + ON users.name = uip.user_id + AND users.appservice_id is NULL + AND users.creation_ts < ? + AND uip.last_seen/1000 > ? + AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 + GROUP BY users.name, platform, users.creation_ts + ) u GROUP BY platform + """ + + results = {} + txn.execute(sql, (thirty_days_ago_in_secs, + thirty_days_ago_in_secs)) + rows = txn.fetchall() + for row in rows: + if row[0] is 'unknown': + pass + results[row[0]] = row[1] sql = """ SELECT COALESCE(count(*), 0) FROM ( - SELECT users.name, users.creation_ts * 1000, MAX(user_ips.last_seen) - FROM users, user_ips - WHERE users.name = user_ips.user_id + SELECT users.name, users.creation_ts * 1000, MAX(uip.last_seen) + FROM users + INNER JOIN ( + SELECT + user_id, + last_seen + FROM user_ips + ) uip + ON users.name = uip.user_id AND appservice_id is NULL AND users.creation_ts < ? - AND user_ips.last_seen/1000 > ? - AND (user_ips.last_seen/1000) - users.creation_ts > ? + AND uip.last_seen/1000 > ? + AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 + GROUP BY users.name, users.creation_ts + ) u """ - if isinstance(self.database_engine, PostgresEngine): - sql = sql + "AND user_ips.user_agent ~ ? " - sql = sql + "GROUP BY users.name, users.creation_ts ) u" + txn.execute(sql, (thirty_days_ago_in_secs, + thirty_days_ago_in_secs)) - results = {} - if isinstance(self.database_engine, PostgresEngine): - for filter_name, user_agent_filter in filters.items(): - txn.execute(sql, (thirty_days_ago_in_secs, - thirty_days_ago_in_secs, - thirty_days_in_secs, - user_agent_filter)) - results[filter_name], = txn.fetchone() + count, = txn.fetchone() + results['all'] = count - else: - txn.execute(sql, (thirty_days_ago_in_secs, - thirty_days_ago_in_secs, - thirty_days_in_secs)) - results["ALL"], = txn.fetchone() return results return self.runInteraction("count_r30_users", _count_r30_users) From b4e37c6f50b91dd0ea90c773185884659e3a738a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 29 Mar 2018 17:27:39 +0100 Subject: [PATCH 29/67] pep8 --- synapse/storage/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 0b4693041f..f68e436df0 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -280,7 +280,8 @@ class DataStore(RoomMemberStore, RoomStore, sql = """ SELECT platform, COALESCE(count(*), 0) FROM ( - SELECT users.name, platform, users.creation_ts * 1000, MAX(uip.last_seen) + SELECT users.name, platform, users.creation_ts * 1000, + MAX(uip.last_seen) FROM users INNER JOIN ( SELECT @@ -317,7 +318,8 @@ class DataStore(RoomMemberStore, RoomStore, sql = """ SELECT COALESCE(count(*), 0) FROM ( - SELECT users.name, users.creation_ts * 1000, MAX(uip.last_seen) + SELECT users.name, users.creation_ts * 1000, + MAX(uip.last_seen) FROM users INNER JOIN ( SELECT From fcfe7f6ad3a2a9c285ac96008395fc47e096ff4b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 29 Mar 2018 22:45:52 +0100 Subject: [PATCH 30/67] Use simplejson throughout Let's use simplejson rather than json, for consistency. --- synapse/api/errors.py | 3 ++- synapse/handlers/identity.py | 8 +++++--- synapse/storage/schema/delta/14/upgrade_appservice_db.py | 3 ++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index aa15f73f36..bee59e80dd 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -15,9 +15,10 @@ """Contains exceptions and error codes.""" -import json import logging +import simplejson as json + logger = logging.getLogger(__name__) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 9efcdff1d6..91a0898860 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -15,6 +15,11 @@ # limitations under the License. """Utilities for interacting with Identity Servers""" + +import logging + +import simplejson as json + from twisted.internet import defer from synapse.api.errors import ( @@ -24,9 +29,6 @@ from ._base import BaseHandler from synapse.util.async import run_on_reactor from synapse.api.errors import SynapseError, Codes -import json -import logging - logger = logging.getLogger(__name__) diff --git a/synapse/storage/schema/delta/14/upgrade_appservice_db.py b/synapse/storage/schema/delta/14/upgrade_appservice_db.py index 8755bb2e49..4d725b92fe 100644 --- a/synapse/storage/schema/delta/14/upgrade_appservice_db.py +++ b/synapse/storage/schema/delta/14/upgrade_appservice_db.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json import logging +import simplejson as json + logger = logging.getLogger(__name__) From 05630758f25d958bf60fde4df5f80a89e4a9a0ac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 29 Mar 2018 22:57:28 +0100 Subject: [PATCH 31/67] Use static JSONEncoders using json.dumps with custom options requires us to create a new JSONEncoder on each call. It's more efficient to create one upfront and reuse it. --- synapse/handlers/message.py | 4 ++-- synapse/replication/tcp/commands.py | 8 +++++--- synapse/storage/events.py | 23 ++++++++--------------- synapse/util/frozenutils.py | 19 +++++++++++++++++++ 4 files changed, 34 insertions(+), 20 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5a8ddc253e..6de6e13b7b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -27,7 +27,7 @@ from synapse.types import ( from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter from synapse.util.logcontext import preserve_fn, run_in_background from synapse.util.metrics import measure_func -from synapse.util.frozenutils import unfreeze +from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.stringutils import random_string from synapse.visibility import filter_events_for_client from synapse.replication.http.send_event import send_event_to_master @@ -678,7 +678,7 @@ class EventCreationHandler(object): # Ensure that we can round trip before trying to persist in db try: - dump = simplejson.dumps(unfreeze(event.content)) + dump = frozendict_json_encoder.encode(event.content) simplejson.loads(dump) except Exception: logger.exception("Failed to encode content: %r", event.content) diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 0005ad5879..34bcf903a3 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -24,6 +24,8 @@ import simplejson logger = logging.getLogger(__name__) +_json_encoder = simplejson.JSONEncoder(namedtuple_as_object=False) + class Command(object): """The base command class. @@ -107,7 +109,7 @@ class RdataCommand(Command): return " ".join(( self.stream_name, str(self.token) if self.token is not None else "batch", - simplejson.dumps(self.row, namedtuple_as_object=False), + _json_encoder.dumps(self.row), )) @@ -302,7 +304,7 @@ class InvalidateCacheCommand(Command): def to_line(self): return " ".join(( - self.cache_func, simplejson.dumps(self.keys, namedtuple_as_object=False) + self.cache_func, _json_encoder.encode(self.keys), )) @@ -334,7 +336,7 @@ class UserIpCommand(Command): ) def to_line(self): - return self.user_id + " " + simplejson.dumps(( + return self.user_id + " " + _json_encoder.encode(( self.access_token, self.ip, self.user_agent, self.device_id, self.last_seen, )) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index f3d65f4338..ece5e6c41f 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -14,15 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.events_worker import EventsWorkerStore +from collections import OrderedDict, deque, namedtuple +from functools import wraps +import logging +import simplejson as json from twisted.internet import defer -from synapse.events import USE_FROZEN_DICTS +from synapse.storage.events_worker import EventsWorkerStore from synapse.util.async import ObservableDeferred +from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.logcontext import ( - PreserveLoggingContext, make_deferred_yieldable + PreserveLoggingContext, make_deferred_yieldable, ) from synapse.util.logutils import log_function from synapse.util.metrics import Measure @@ -30,16 +34,8 @@ from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.types import get_domain_from_id - -from canonicaljson import encode_canonical_json -from collections import deque, namedtuple, OrderedDict -from functools import wraps - import synapse.metrics -import logging -import simplejson as json - # these are only included to make the type annotations work from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 @@ -71,10 +67,7 @@ state_delta_reuse_delta_counter = metrics.register_counter( def encode_json(json_object): - if USE_FROZEN_DICTS: - return encode_canonical_json(json_object) - else: - return json.dumps(json_object, ensure_ascii=False) + return frozendict_json_encoder.encode(json_object) class _EventPeristenceQueue(object): diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 6322f0f55c..f497b51f4a 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -14,6 +14,7 @@ # limitations under the License. from frozendict import frozendict +import simplejson as json def freeze(o): @@ -49,3 +50,21 @@ def unfreeze(o): pass return o + + +def _handle_frozendict(obj): + """Helper for EventEncoder. Makes frozendicts serializable by returning + the underlying dict + """ + if type(obj) is frozendict: + # fishing the protected dict out of the object is a bit nasty, + # but we don't really want the overhead of copying the dict. + return obj._dict + raise TypeError('Object of type %s is not JSON serializable' % + obj.__class__.__name__) + + +# A JSONEncoder which is capable of encoding frozendics without barfing +frozendict_json_encoder = json.JSONEncoder( + default=_handle_frozendict, +) From 2fe3f848b92ee9493a724935167fad84678a7eb2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 29 Mar 2018 23:05:33 +0100 Subject: [PATCH 32/67] Remove uses of events.content --- synapse/storage/room.py | 7 ++++--- synapse/storage/roommember.py | 6 ++++-- synapse/storage/search.py | 6 ++++-- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 908551d6d9..740c036975 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -594,7 +594,8 @@ class RoomStore(RoomWorkerStore, SearchStore): while next_token: sql = """ - SELECT stream_ordering, content FROM events + SELECT stream_ordering, json FROM events + JOIN event_json USING (event_id) WHERE room_id = ? AND stream_ordering < ? AND contains_url = ? AND outlier = ? @@ -606,8 +607,8 @@ class RoomStore(RoomWorkerStore, SearchStore): next_token = None for stream_ordering, content_json in txn: next_token = stream_ordering - content = json.loads(content_json) - + event_json = json.loads(content_json) + content = event_json["content"] content_url = content.get("url") thumbnail_url = content.get("info", {}).get("thumbnail_url") diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index d662d1cfc0..6a861943a2 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -645,8 +645,9 @@ class RoomMemberStore(RoomMemberWorkerStore): def add_membership_profile_txn(txn): sql = (""" - SELECT stream_ordering, event_id, events.room_id, content + SELECT stream_ordering, event_id, events.room_id, event_json.json FROM events + INNER JOIN event_json USING (event_id) INNER JOIN room_memberships USING (event_id) WHERE ? <= stream_ordering AND stream_ordering < ? AND type = 'm.room.member' @@ -667,7 +668,8 @@ class RoomMemberStore(RoomMemberWorkerStore): event_id = row["event_id"] room_id = row["room_id"] try: - content = json.loads(row["content"]) + event_json = json.loads(row["json"]) + content = event_json['content'] except Exception: continue diff --git a/synapse/storage/search.py b/synapse/storage/search.py index 984643b057..426cbe6e1a 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -75,8 +75,9 @@ class SearchStore(BackgroundUpdateStore): def reindex_search_txn(txn): sql = ( - "SELECT stream_ordering, event_id, room_id, type, content, " + "SELECT stream_ordering, event_id, room_id, type, json, " " origin_server_ts FROM events" + " JOIN event_json USING (event_id)" " WHERE ? <= stream_ordering AND stream_ordering < ?" " AND (%s)" " ORDER BY stream_ordering DESC" @@ -104,7 +105,8 @@ class SearchStore(BackgroundUpdateStore): stream_ordering = row["stream_ordering"] origin_server_ts = row["origin_server_ts"] try: - content = json.loads(row["content"]) + event_json = json.loads(row["json"]) + content = event_json["content"] except Exception: continue From 11597ddea5c43fdd2c6593b6bf4619a7bbdf3122 Mon Sep 17 00:00:00 2001 From: Adrian Tschira Date: Fri, 30 Mar 2018 23:59:02 +0200 Subject: [PATCH 33/67] improve mxid check performance ~4x Signed-off-by: Adrian Tschira --- synapse/types.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/types.py b/synapse/types.py index 7cb24cecb2..f1f41ccf90 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -12,11 +12,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import string from synapse.api.errors import SynapseError from collections import namedtuple +import re class Requester(namedtuple("Requester", [ @@ -214,7 +214,8 @@ class GroupID(DomainSpecificString): return group_id -mxid_localpart_allowed_characters = set("_-./=" + string.ascii_lowercase + string.digits) +# A regex that matches any valid mxid characters +MXID_LOCALPART_REGEX = re.compile("^[_\-./=a-z0-9]*$") def contains_invalid_mxid_characters(localpart): @@ -226,7 +227,7 @@ def contains_invalid_mxid_characters(localpart): Returns: bool: True if there are any naughty characters """ - return any(c not in mxid_localpart_allowed_characters for c in localpart) + return not MXID_LOCALPART_REGEX.match(localpart) class StreamToken( From 3ee4ad09eb9bcd0214da83b66214afa3ddb08116 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 3 Apr 2018 15:09:48 +0100 Subject: [PATCH 34/67] Fix json encoding bug in replication json encoders have an encode method, not a dumps method. --- synapse/replication/tcp/commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 34bcf903a3..12aac3cc6b 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -109,7 +109,7 @@ class RdataCommand(Command): return " ".join(( self.stream_name, str(self.token) if self.token is not None else "batch", - _json_encoder.dumps(self.row), + _json_encoder.encode(self.row), )) From eb8d8d6f57c7f6017548aa95409bb8cc346a5ae0 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Tue, 3 Apr 2018 15:40:43 +0100 Subject: [PATCH 35/67] Use join_policy API instead of joinable The API is now under /groups/$group_id/setting/m.join_policy and expects a JSON blob of the shape ```json { "m.join_policy": { "type": "invite" } } ``` where "invite" could alternatively be "open". --- synapse/federation/transport/client.py | 4 +- synapse/federation/transport/server.py | 8 ++-- synapse/groups/groups_server.py | 41 ++++++++++++++++--- synapse/handlers/groups_local.py | 2 +- synapse/rest/client/v2_alpha/groups.py | 12 +++--- synapse/storage/group_server.py | 6 +-- .../schema/delta/48/groups_joinable.sql | 8 +++- 7 files changed, 58 insertions(+), 23 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 5a6b63350b..0f7f656824 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -860,9 +860,9 @@ class TransportLayerClient(object): @log_function def set_group_joinable(self, destination, group_id, requester_user_id, content): - """Sets whether a group is joinable without an invite or knock + """Sets the join policy for a group """ - path = PREFIX + "/groups/%s/joinable" % (group_id,) + path = PREFIX + "/groups/%s/setting/m.join_policy" % (group_id,) return self.client.post_json( destination=destination, diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 107deb4e1e..a52d3948f4 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1125,10 +1125,10 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet): defer.returnValue((200, resp)) -class FederationGroupsJoinableServlet(BaseFederationServlet): +class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): """Sets whether a group is joinable without an invite or knock """ - PATH = "/groups/(?P[^/]*)/joinable$" + PATH = "/groups/(?P[^/]*)/setting/m.join_policy$" @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id): @@ -1136,7 +1136,7 @@ class FederationGroupsJoinableServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.set_group_joinable( + new_content = yield self.handler.set_group_join_policy( group_id, requester_user_id, content ) @@ -1191,7 +1191,7 @@ GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsSummaryUsersServlet, FederationGroupsAddRoomsServlet, FederationGroupsAddRoomsConfigServlet, - FederationGroupsJoinableServlet, + FederationGroupsSettingJoinPolicyServlet, ) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 25cbfb1691..70781e1854 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -207,20 +207,24 @@ class GroupsServerHandler(object): defer.returnValue({}) @defer.inlineCallbacks - def set_group_joinable(self, group_id, requester_user_id, content): - """Sets whether a group is joinable without an invite or knock + def set_group_join_policy(self, group_id, requester_user_id, content): + """Sets the group join policy. + + Currently supported policies are: + - "invite": an invite must be received and accepted in order to join. + - "open": anyone can join. """ yield self.check_group_is_ours( group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id ) - is_joinable = content.get('joinable') - if is_joinable is None: + join_policy = _parse_join_policy_from_contents(content) + if join_policy is None: raise SynapseError( - 400, "No value specified for 'joinable'" + 400, "No value specified for 'm.join_policy'" ) - yield self.store.set_group_joinable(group_id, is_joinable=is_joinable) + yield self.store.set_group_join_policy(group_id, join_policy=join_policy) defer.returnValue({}) @@ -854,6 +858,31 @@ class GroupsServerHandler(object): }) +def _parse_join_policy_from_contents(content): + """Given a content for a request, return the specified join policy or None + """ + + join_policy_dict = content.get("m.join_policy") + if join_policy_dict: + return _parse_join_policy_dict(join_policy_dict) + else: + return None + + +def _parse_join_policy_dict(join_policy_dict): + """Given a dict for the "m.join_policy" config return the join policy specified + """ + join_policy_type = join_policy_dict.get("type") + if not join_policy_type: + return True + + if join_policy_type not in ("invite", "open"): + raise SynapseError( + 400, "Synapse only supports 'invite'/'open' join rule" + ) + return join_policy_type + + def _parse_visibility_from_contents(content): """Given a content for a request parse out whether the entity should be public or not diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index c9671b9046..5f7b0ff305 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -91,7 +91,7 @@ class GroupsLocalHandler(object): get_group_role = _create_rerouter("get_group_role") get_group_roles = _create_rerouter("get_group_roles") - set_group_joinable = _create_rerouter("set_group_joinable") + set_group_join_policy = _create_rerouter("set_group_join_policy") @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index aa94130e57..8faaa1d6a0 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -402,13 +402,13 @@ class GroupInvitedUsersServlet(RestServlet): defer.returnValue((200, result)) -class GroupJoinableServlet(RestServlet): - """Set whether a group is joinable without an invite +class GroupSettingJoinPolicyServlet(RestServlet): + """Set group join policy """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/joinable$") + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/setting/m.join_policy$") def __init__(self, hs): - super(GroupJoinableServlet, self).__init__() + super(GroupSettingJoinPolicyServlet, self).__init__() self.auth = hs.get_auth() self.groups_handler = hs.get_groups_local_handler() @@ -419,7 +419,7 @@ class GroupJoinableServlet(RestServlet): content = parse_json_object_from_request(request) - result = yield self.groups_handler.set_group_joinable( + result = yield self.groups_handler.set_group_join_policy( group_id, requester_user_id, content, @@ -765,7 +765,7 @@ def register_servlets(hs, http_server): GroupInvitedUsersServlet(hs).register(http_server) GroupUsersServlet(hs).register(http_server) GroupRoomServlet(hs).register(http_server) - GroupJoinableServlet(hs).register(http_server) + GroupSettingJoinPolicyServlet(hs).register(http_server) GroupCreateServlet(hs).register(http_server) GroupAdminRoomsServlet(hs).register(http_server) GroupAdminRoomsConfigServlet(hs).register(http_server) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 96553d4fb1..db66ea1eb0 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -30,16 +30,16 @@ _DEFAULT_ROLE_ID = "" class GroupServerStore(SQLBaseStore): - def set_group_joinable(self, group_id, is_joinable): + def set_group_join_policy(self, group_id, join_policy): return self._simple_update_one( table="groups", keyvalues={ "group_id": group_id, }, updatevalues={ - "is_joinable": is_joinable, + "join_policy": join_policy, }, - desc="set_group_joinable", + desc="set_group_join_policy", ) def get_group(self, group_id): diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index ace7d0a723..ab3b00286d 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -13,4 +13,10 @@ * limitations under the License. */ -ALTER TABLE groups ADD COLUMN is_joinable SMALLINT DEFAULT 0 NOT NULL; +/* + * This isn't a real ENUM because sqlite doesn't support it + * and we use a default of NULL for inserted rows and interpret + * NULL at the python store level as necessary so that existing + * rows are given the correct default policy. + */ +ALTER TABLE groups ADD COLUMN join_policy TEXT DEFAULT NULL; From f92963f5db236c1afb2a489a44c9afdae7d61edc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 4 Apr 2018 12:08:29 +0100 Subject: [PATCH 36/67] Revert "improve mxid check performance" --- synapse/types.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/types.py b/synapse/types.py index f1f41ccf90..7cb24cecb2 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -12,11 +12,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import string from synapse.api.errors import SynapseError from collections import namedtuple -import re class Requester(namedtuple("Requester", [ @@ -214,8 +214,7 @@ class GroupID(DomainSpecificString): return group_id -# A regex that matches any valid mxid characters -MXID_LOCALPART_REGEX = re.compile("^[_\-./=a-z0-9]*$") +mxid_localpart_allowed_characters = set("_-./=" + string.ascii_lowercase + string.digits) def contains_invalid_mxid_characters(localpart): @@ -227,7 +226,7 @@ def contains_invalid_mxid_characters(localpart): Returns: bool: True if there are any naughty characters """ - return not MXID_LOCALPART_REGEX.match(localpart) + return any(c not in mxid_localpart_allowed_characters for c in localpart) class StreamToken( From 301b339494f473fddd04cad9a9b107615e9dfa8d Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 4 Apr 2018 08:45:51 -0600 Subject: [PATCH 37/67] Move the mention of the main synapse worker higher up Signed-off-by: Travis Ralston --- docs/workers.rst | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/docs/workers.rst b/docs/workers.rst index a5e084c22a..bf8dd1ee48 100644 --- a/docs/workers.rst +++ b/docs/workers.rst @@ -55,7 +55,12 @@ synapse process.) You then create a set of configs for the various worker processes. These should be worker configuration files, and should be stored in a dedicated -subdirectory, to allow synctl to manipulate them. +subdirectory, to allow synctl to manipulate them. An additional configuration +for the master synapse process will need to be created because the process will +not be started automatically. That configuration should look like this:: + + worker_app: synapse.app.homeserver + daemonize: true Each worker configuration file inherits the configuration of the main homeserver configuration file. You can then override configuration specific to that worker, @@ -115,18 +120,6 @@ To manipulate a specific worker, you pass the -w option to synctl:: synctl -w $CONFIG/workers/synchrotron.yaml restart -After setting up your workers, you'll need to create a worker configuration for -the main synapse process. That worker configuration should look like this::: - - worker_app: synapse.app.homeserver - daemonize: true - -Be sure to keep this particular configuration limited as synapse may refuse to -start if the regular ``worker_*`` options are given. The ``homeserver.yaml`` -configuration will be used to set up the main synapse process. - -**You must have a worker configuration for the main synapse process!** - Available worker applications ----------------------------- From 204fc985204f0c24574ad2bf9fa9518d4fa7552d Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 4 Apr 2018 08:46:17 -0600 Subject: [PATCH 38/67] Document the additional routes for the event_creator worker Fixes https://github.com/matrix-org/synapse/issues/3018 Signed-off-by: Travis Ralston --- docs/workers.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/workers.rst b/docs/workers.rst index bf8dd1ee48..c3868d6e41 100644 --- a/docs/workers.rst +++ b/docs/workers.rst @@ -235,9 +235,11 @@ file. For example:: ``synapse.app.event_creator`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Handles non-state event creation. It can handle REST endpoints matching: +Handles some event creation. It can handle REST endpoints matching: ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send + ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$ + ^/_matrix/client/(api/v1|r0|unstable)/join/ It will create events locally and then send them on to the main synapse instance to be persisted and handled. From e4570c53dd35f00103e2353884d1dd446fc4c0f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Wed, 4 Apr 2018 16:46:58 +0100 Subject: [PATCH 39/67] phone home cache size configurations --- synapse/app/homeserver.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index b935beb974..464799ac90 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -48,6 +48,7 @@ from synapse.server import HomeServer from synapse.storage import are_all_users_on_domain from synapse.storage.engines import IncorrectDatabaseSetup, create_engine from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database +from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.httpresourcetree import create_resource_tree from synapse.util.logcontext import LoggingContext from synapse.util.manhole import manhole @@ -431,6 +432,8 @@ def run(hs): daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages + stats["cache_factor"] = CACHE_SIZE_FACTOR + stats["event_cache_size"] = hs.config.event_cache_size if len(stats_process) > 0: stats["memory_rss"] = 0 From 518f6de0881378b1fa356e21256436491d43c93c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 4 Apr 2018 19:46:28 +0100 Subject: [PATCH 40/67] Remove redundant metrics which were deprecated in 0.27.0. --- CHANGES.rst | 9 +++++++++ UPGRADE.rst | 9 ++++++++- docs/metrics-howto.rst | 11 +++++++++++ synapse/http/server.py | 26 -------------------------- synapse/util/metrics.py | 25 ------------------------- 5 files changed, 28 insertions(+), 52 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 38372381ac..5fbad54427 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,12 @@ +Changes in synapse v0.28.0 (2018-xx-xx) +======================================= + +As previously advised, this release removes a number of redundant Prometheus +metrics. Administrators may need to update their dashboards and alerting rules +to use the updated metric names, if they have not already done so. See +`docs/metrics-howto.rst `_ +for more details. + Changes in synapse v0.27.2 (2018-03-26) ======================================= diff --git a/UPGRADE.rst b/UPGRADE.rst index f6bb1070b1..39a16b1c0c 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -52,7 +52,7 @@ Upgrading to $NEXT_VERSION ==================== This release expands the anonymous usage stats sent if the opt-in -``report_stats`` configuration is set to ``true``. We now capture RSS memory +``report_stats`` configuration is set to ``true``. We now capture RSS memory and cpu use at a very coarse level. This requires administrators to install the optional ``psutil`` python module. @@ -60,6 +60,13 @@ We would appreciate it if you could assist by ensuring this module is available and ``report_stats`` is enabled. This will let us see if performance changes to synapse are having an impact to the general community. +This release also removes a number of redundant Prometheus metrics. +Administrators may need to update their dashboards and alerting rules to use +the updated metric names, if they have not already done so. See +`docs/metrics-howto.rst `_ +for more details. + + Upgrading to v0.15.0 ==================== diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 8acc479bc3..5e2d7c52ec 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -34,6 +34,17 @@ How to monitor Synapse metrics using Prometheus Restart prometheus. +Deprecated metrics removed in 0.28.0 +------------------------------------ + +Synapse 0.28.0 removes all of the metrics deprecated by 0.27.0, which are those +listed under "Old name" below. This has been done to reduce the bandwidth used +by gathering metrics and the storage requirements for the Prometheus server, as +well as reducing CPU overhead for both Synapse and Prometheus. + +Administrators should update any alerts or monitoring dashboards to use the +"New name" listed below. + Block and response metrics renamed for 0.27.0 --------------------------------------------- diff --git a/synapse/http/server.py b/synapse/http/server.py index f19c068ef6..02c7e46f08 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -47,17 +47,6 @@ metrics = synapse.metrics.get_metrics_for(__name__) response_count = metrics.register_counter( "response_count", labels=["method", "servlet", "tag"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_requests", - "_response_time:count", - "_response_ru_utime:count", - "_response_ru_stime:count", - "_response_db_txn_count:count", - "_response_db_txn_duration:count", - ) - ) ) requests_counter = metrics.register_counter( @@ -73,39 +62,24 @@ outgoing_responses_counter = metrics.register_counter( response_timer = metrics.register_counter( "response_time_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_time:total", - ), ) response_ru_utime = metrics.register_counter( "response_ru_utime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_utime:total", - ), ) response_ru_stime = metrics.register_counter( "response_ru_stime_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_ru_stime:total", - ), ) response_db_txn_count = metrics.register_counter( "response_db_txn_count", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_count:total", - ), ) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request response_db_txn_duration = metrics.register_counter( "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], - alternative_names=( - metrics.name_prefix + "_response_db_txn_duration:total", - ), ) # seconds spent waiting for a db connection, when processing this request diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index e4b5687a4b..c3d8237e8f 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -31,53 +31,28 @@ metrics = synapse.metrics.get_metrics_for(__name__) block_counter = metrics.register_counter( "block_count", labels=["block_name"], - alternative_names=( - # the following are all deprecated aliases for the same metric - metrics.name_prefix + x for x in ( - "_block_timer:count", - "_block_ru_utime:count", - "_block_ru_stime:count", - "_block_db_txn_count:count", - "_block_db_txn_duration:count", - ) - ) ) block_timer = metrics.register_counter( "block_time_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_timer:total", - ), ) block_ru_utime = metrics.register_counter( "block_ru_utime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_utime:total", - ), ) block_ru_stime = metrics.register_counter( "block_ru_stime_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_ru_stime:total", - ), ) block_db_txn_count = metrics.register_counter( "block_db_txn_count", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_count:total", - ), ) # seconds spent waiting for db txns, excluding scheduling time, in this block block_db_txn_duration = metrics.register_counter( "block_db_txn_duration_seconds", labels=["block_name"], - alternative_names=( - metrics.name_prefix + "_block_db_txn_duration:total", - ), ) # seconds spent waiting for a db connection, in this block From 0e5f479fc05ef9257c1bfce033c8fb91e6244ffe Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 5 Apr 2018 12:16:46 +0100 Subject: [PATCH 41/67] Review comments Use iteritems over item to loop over dict formatting --- synapse/app/homeserver.py | 2 +- synapse/storage/__init__.py | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 35e2b00f1b..777e9c529a 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -426,7 +426,7 @@ def run(hs): stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() r30_results = yield hs.get_datastore().count_r30_users() - for name, count in r30_results.items(): + for name, count in r30_results.iteritems(): stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index f68e436df0..4800584b59 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -280,8 +280,9 @@ class DataStore(RoomMemberStore, RoomStore, sql = """ SELECT platform, COALESCE(count(*), 0) FROM ( - SELECT users.name, platform, users.creation_ts * 1000, - MAX(uip.last_seen) + SELECT + users.name, platform, users.creation_ts * 1000, + MAX(uip.last_seen) FROM users INNER JOIN ( SELECT @@ -310,8 +311,8 @@ class DataStore(RoomMemberStore, RoomStore, results = {} txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - rows = txn.fetchall() - for row in rows: + + for row in txn: if row[0] is 'unknown': pass results[row[0]] = row[1] From b214a04ffc1200535f1d9c6ec45717cd266f36e5 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 13:29:16 +0100 Subject: [PATCH 42/67] Document set_group_join_policy --- synapse/storage/group_server.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index db66ea1eb0..ab4f710f7d 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -31,6 +31,12 @@ _DEFAULT_ROLE_ID = "" class GroupServerStore(SQLBaseStore): def set_group_join_policy(self, group_id, join_policy): + """Set the join policy of a group. + + join_policy can be one of: + * "invite" + * "open" + """ return self._simple_update_one( table="groups", keyvalues={ From 700e5e719875dd7008791f52828bb3cd92d6ce21 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 14:01:17 +0100 Subject: [PATCH 43/67] Use DEFAULT join_policy of "invite" in db --- synapse/storage/schema/delta/48/groups_joinable.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index ab3b00286d..53add94367 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -19,4 +19,4 @@ * NULL at the python store level as necessary so that existing * rows are given the correct default policy. */ -ALTER TABLE groups ADD COLUMN join_policy TEXT DEFAULT NULL; +ALTER TABLE groups ADD COLUMN join_policy TEXT NON NULL DEFAULT 'invite'; From 104c0bc1d5d1f2a487c50d63b22caa477b091976 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 14:07:16 +0100 Subject: [PATCH 44/67] Use "/settings/" (plural) --- synapse/federation/transport/client.py | 2 +- synapse/federation/transport/server.py | 2 +- synapse/rest/client/v2_alpha/groups.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 0f7f656824..1fe162d55b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -862,7 +862,7 @@ class TransportLayerClient(object): content): """Sets the join policy for a group """ - path = PREFIX + "/groups/%s/setting/m.join_policy" % (group_id,) + path = PREFIX + "/groups/%s/settings/m.join_policy" % (group_id,) return self.client.post_json( destination=destination, diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a52d3948f4..3658ca75f3 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1128,7 +1128,7 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet): class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): """Sets whether a group is joinable without an invite or knock """ - PATH = "/groups/(?P[^/]*)/setting/m.join_policy$" + PATH = "/groups/(?P[^/]*)/settings/m.join_policy$" @defer.inlineCallbacks def on_POST(self, origin, content, query, group_id): diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 8faaa1d6a0..3bb1ec2af6 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -405,7 +405,7 @@ class GroupInvitedUsersServlet(RestServlet): class GroupSettingJoinPolicyServlet(RestServlet): """Set group join policy """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/setting/m.join_policy$") + PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") def __init__(self, hs): super(GroupSettingJoinPolicyServlet, self).__init__() From 917380e89d2d323be1a6ea03e53a31ed335c80df Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 14:32:12 +0100 Subject: [PATCH 45/67] NON NULL -> NOT NULL --- synapse/storage/schema/delta/48/groups_joinable.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/delta/48/groups_joinable.sql index 53add94367..ce26eaf0c9 100644 --- a/synapse/storage/schema/delta/48/groups_joinable.sql +++ b/synapse/storage/schema/delta/48/groups_joinable.sql @@ -19,4 +19,4 @@ * NULL at the python store level as necessary so that existing * rows are given the correct default policy. */ -ALTER TABLE groups ADD COLUMN join_policy TEXT NON NULL DEFAULT 'invite'; +ALTER TABLE groups ADD COLUMN join_policy TEXT NOT NULL DEFAULT 'invite'; From 01afc563c39006c21bb7752831cd62c146edc135 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 5 Apr 2018 16:24:04 +0100 Subject: [PATCH 46/67] Fix overzealous cache invalidation Fixes an issue where a cache invalidation would invalidate *all* pending entries, rather than just the entry that we intended to invalidate. --- synapse/util/caches/descriptors.py | 64 ++++++++++++++++----------- tests/util/caches/test_descriptors.py | 46 +++++++++++++++++++ 2 files changed, 84 insertions(+), 26 deletions(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index bf3a66eae4..68285a7594 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,12 +40,11 @@ _CacheSentinel = object() class CacheEntry(object): __slots__ = [ - "deferred", "sequence", "callbacks", "invalidated" + "deferred", "callbacks", "invalidated" ] - def __init__(self, deferred, sequence, callbacks): + def __init__(self, deferred, callbacks): self.deferred = deferred - self.sequence = sequence self.callbacks = set(callbacks) self.invalidated = False @@ -62,7 +62,6 @@ class Cache(object): "max_entries", "name", "keylen", - "sequence", "thread", "metrics", "_pending_deferred_cache", @@ -80,7 +79,6 @@ class Cache(object): self.name = name self.keylen = keylen - self.sequence = 0 self.thread = None self.metrics = register_cache(name, self.cache) @@ -113,11 +111,10 @@ class Cache(object): callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _CacheSentinel) if val is not _CacheSentinel: - if val.sequence == self.sequence: - val.callbacks.update(callbacks) - if update_metrics: - self.metrics.inc_hits() - return val.deferred + val.callbacks.update(callbacks) + if update_metrics: + self.metrics.inc_hits() + return val.deferred val = self.cache.get(key, _CacheSentinel, callbacks=callbacks) if val is not _CacheSentinel: @@ -137,12 +134,9 @@ class Cache(object): self.check_thread() entry = CacheEntry( deferred=value, - sequence=self.sequence, callbacks=callbacks, ) - entry.callbacks.update(callbacks) - existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry: existing_entry.invalidate() @@ -150,13 +144,25 @@ class Cache(object): self._pending_deferred_cache[key] = entry def shuffle(result): - if self.sequence == entry.sequence: - existing_entry = self._pending_deferred_cache.pop(key, None) - if existing_entry is entry: - self.cache.set(key, result, entry.callbacks) - else: - entry.invalidate() + existing_entry = self._pending_deferred_cache.pop(key, None) + if existing_entry is entry: + self.cache.set(key, result, entry.callbacks) else: + # oops, the _pending_deferred_cache has been updated since + # we started our query, so we are out of date. + # + # Better put back whatever we took out. (We do it this way + # round, rather than peeking into the _pending_deferred_cache + # and then removing on a match, to make the common case faster) + if existing_entry is not None: + self._pending_deferred_cache[key] = existing_entry + + # we're not going to put this entry into the cache, so need + # to make sure that the invalidation callbacks are called. + # That was probably done when _pending_deferred_cache was + # updated, but it's possible that `set` was called without + # `invalidate` being previously called, in which case it may + # not have been. Either way, let's double-check now. entry.invalidate() return result @@ -168,25 +174,29 @@ class Cache(object): def invalidate(self, key): self.check_thread() + self.cache.pop(key, None) - # Increment the sequence number so that any SELECT statements that - # raced with the INSERT don't update the cache (SYN-369) - self.sequence += 1 + # if we have a pending lookup for this key, remove it from the + # _pending_deferred_cache, which will (a) stop it being returned + # for future queries and (b) stop it being persisted as a proper entry + # in self.cache. entry = self._pending_deferred_cache.pop(key, None) + + # run the invalidation callbacks now, rather than waiting for the + # deferred to resolve. if entry: entry.invalidate() - self.cache.pop(key, None) - def invalidate_many(self, key): self.check_thread() if not isinstance(key, tuple): raise TypeError( "The cache key must be a tuple not %r" % (type(key),) ) - self.sequence += 1 self.cache.del_multi(key) + # if we have a pending lookup for this key, remove it from the + # _pending_deferred_cache, as above entry_dict = self._pending_deferred_cache.pop(key, None) if entry_dict is not None: for entry in iterate_tree_cache_entry(entry_dict): @@ -194,8 +204,10 @@ class Cache(object): def invalidate_all(self): self.check_thread() - self.sequence += 1 self.cache.clear() + for entry in self._pending_deferred_cache.itervalues(): + entry.invalidate() + self._pending_deferred_cache.clear() class _CacheDescriptorBase(object): diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 3f14ab503f..2516fe40f4 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from functools import partial import logging import mock @@ -25,6 +27,50 @@ from tests import unittest logger = logging.getLogger(__name__) +class CacheTestCase(unittest.TestCase): + def test_invalidate_all(self): + cache = descriptors.Cache("testcache") + + callback_record = [False, False] + + def record_callback(idx): + callback_record[idx] = True + + # add a couple of pending entries + d1 = defer.Deferred() + cache.set("key1", d1, partial(record_callback, 0)) + + d2 = defer.Deferred() + cache.set("key2", d2, partial(record_callback, 1)) + + # lookup should return the deferreds + self.assertIs(cache.get("key1"), d1) + self.assertIs(cache.get("key2"), d2) + + # let one of the lookups complete + d2.callback("result2") + self.assertEqual(cache.get("key2"), "result2") + + # now do the invalidation + cache.invalidate_all() + + # lookup should return none + self.assertIsNone(cache.get("key1", None)) + self.assertIsNone(cache.get("key2", None)) + + # both callbacks should have been callbacked + self.assertTrue( + callback_record[0], "Invalidation callback for key1 not called", + ) + self.assertTrue( + callback_record[1], "Invalidation callback for key2 not called", + ) + + # letting the other lookup complete should do nothing + d1.callback("result1") + self.assertIsNone(cache.get("key1", None)) + + class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def test_cache(self): From 15e8ed874ff13f30b79e6a47cf814b5a2a7dfc9a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Apr 2018 09:28:36 +0100 Subject: [PATCH 47/67] more verbosity in synctl --- synapse/app/synctl.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py index b0e1b5e66a..712dfa870e 100755 --- a/synapse/app/synctl.py +++ b/synapse/app/synctl.py @@ -252,6 +252,7 @@ def main(): for running_pid in running_pids: while pid_running(running_pid): time.sleep(0.2) + write("All processes exited; now restarting...") if action == "start" or action == "restart": if start_stop_synapse: From 1d71f484d4ec00fd41e3ef195622d0d5dba6d372 Mon Sep 17 00:00:00 2001 From: Krombel Date: Fri, 6 Apr 2018 12:54:09 +0200 Subject: [PATCH 48/67] use PUT instead of POST for federating groups/m.join_policy --- synapse/federation/transport/client.py | 2 +- synapse/federation/transport/server.py | 2 +- synapse/http/matrixfederationclient.py | 6 +++++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1fe162d55b..3beab47832 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -864,7 +864,7 @@ class TransportLayerClient(object): """ path = PREFIX + "/groups/%s/settings/m.join_policy" % (group_id,) - return self.client.post_json( + return self.client.put_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 3658ca75f3..b98e30459c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1131,7 +1131,7 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/settings/m.join_policy$" @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id): + def on_PUT(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 9145405cb0..60a29081e8 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -286,7 +286,8 @@ class MatrixFederationHttpClient(object): headers_dict[b"Authorization"] = auth_headers @defer.inlineCallbacks - def put_json(self, destination, path, data={}, json_data_callback=None, + def put_json(self, destination, path, args={}, data={}, + json_data_callback=None, long_retries=False, timeout=None, ignore_backoff=False, backoff_on_404=False): @@ -296,6 +297,7 @@ class MatrixFederationHttpClient(object): destination (str): The remote server to send the HTTP request to. path (str): The HTTP path. + args (dict): query params data (dict): A dict containing the data that will be used as the request body. This will be encoded as JSON. json_data_callback (callable): A callable returning the dict to @@ -342,6 +344,7 @@ class MatrixFederationHttpClient(object): path, body_callback=body_callback, headers_dict={"Content-Type": ["application/json"]}, + query_bytes=encode_query_args(args), long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, @@ -373,6 +376,7 @@ class MatrixFederationHttpClient(object): giving up. None indicates no timeout. ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + args (dict): query params Returns: Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. From 7b824f147590c6519ca8889e6d406928de7f1e6e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 6 Apr 2018 13:20:05 +0100 Subject: [PATCH 49/67] Add response size metrics --- synapse/http/server.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/synapse/http/server.py b/synapse/http/server.py index 02c7e46f08..ac75206ef5 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -87,6 +87,11 @@ response_db_sched_duration = metrics.register_counter( "response_db_sched_duration_seconds", labels=["method", "servlet", "tag"] ) +# size in bytes of the response written +response_size = metrics.register_counter( + "response_size", labels=["method", "servlet", "tag"] +) + _next_request_id = 0 @@ -400,6 +405,8 @@ class RequestMetrics(object): context.db_sched_duration_ms / 1000., request.method, self.name, tag ) + response_size.inc_by(request.sentLength, request.method, self.name, tag) + class RootRedirect(resource.Resource): """Redirects the root '/' path to another path.""" From e01ba5bda315ed80f2a809e2f2e917eaf7488fac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Apr 2018 13:45:10 +0100 Subject: [PATCH 50/67] Port script: avoid nasty errors when setting up We really shouldn't spit out "Failed to create port table", it looks scary. --- scripts/synapse_port_db | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index d46581e4e1..7fb8be3abd 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -1,6 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -491,7 +492,7 @@ class Porter(object): def create_port_table(txn): txn.execute( - "CREATE TABLE port_from_sqlite3 (" + "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" " table_name varchar(100) NOT NULL UNIQUE," " forward_rowid bigint NOT NULL," " backward_rowid bigint NOT NULL" @@ -517,14 +518,11 @@ class Porter(object): "alter_table", alter_table ) except Exception as e: - logger.info("Failed to create port table: %s", e) + pass - try: - yield self.postgres_store.runInteraction( - "create_port_table", create_port_table - ) - except Exception as e: - logger.info("Failed to create port table: %s", e) + yield self.postgres_store.runInteraction( + "create_port_table", create_port_table + ) self.progress.set_state("Setting up") From 01579384ccd26d112df5169522fd16234e8316c7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Apr 2018 13:47:11 +0100 Subject: [PATCH 51/67] Port script: clean up a bit Improve logging and comments. Group all the stuff to do with inspecting tables together rather than creating the port tables in the middle. --- scripts/synapse_port_db | 54 ++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 7fb8be3abd..dc1a10bf28 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -251,6 +251,12 @@ class Porter(object): @defer.inlineCallbacks def handle_table(self, table, postgres_size, table_size, forward_chunk, backward_chunk): + logger.info( + "Table %s: %i/%i (rows %i-%i) already ported", + table, postgres_size, table_size, + backward_chunk+1, forward_chunk-1, + ) + if not table_size: return @@ -468,28 +474,7 @@ class Porter(object): self.progress.set_state("Preparing PostgreSQL") self.setup_db(postgres_config, postgres_engine) - # Step 2. Get tables. - self.progress.set_state("Fetching tables") - sqlite_tables = yield self.sqlite_store._simple_select_onecol( - table="sqlite_master", - keyvalues={ - "type": "table", - }, - retcol="name", - ) - - postgres_tables = yield self.postgres_store._simple_select_onecol( - table="information_schema.tables", - keyvalues={}, - retcol="distinct table_name", - ) - - tables = set(sqlite_tables) & set(postgres_tables) - - self.progress.set_state("Creating tables") - - logger.info("Found %d tables", len(tables)) - + self.progress.set_state("Creating port tables") def create_port_table(txn): txn.execute( "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" @@ -524,9 +509,27 @@ class Porter(object): "create_port_table", create_port_table ) - self.progress.set_state("Setting up") + # Step 2. Get tables. + self.progress.set_state("Fetching tables") + sqlite_tables = yield self.sqlite_store._simple_select_onecol( + table="sqlite_master", + keyvalues={ + "type": "table", + }, + retcol="name", + ) - # Set up tables. + postgres_tables = yield self.postgres_store._simple_select_onecol( + table="information_schema.tables", + keyvalues={}, + retcol="distinct table_name", + ) + + tables = set(sqlite_tables) & set(postgres_tables) + logger.info("Found %d tables", len(tables)) + + # Step 3. Figure out what still needs copying + self.progress.set_state("Checking on port progress") setup_res = yield defer.gatherResults( [ self.setup_table(table) @@ -537,7 +540,8 @@ class Porter(object): consumeErrors=True, ) - # Process tables. + # Step 4. Do the copying. + self.progress.set_state("Copying to postgres") yield defer.gatherResults( [ self.handle_table(*res) From 6a9777ba028af1d9803d13e879eaf62773c5bd83 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 6 Apr 2018 13:48:40 +0100 Subject: [PATCH 52/67] Port script: Set up state_group_id_seq Fixes https://github.com/matrix-org/synapse/issues/3050. --- scripts/synapse_port_db | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index dc1a10bf28..7b23a44854 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -550,6 +550,9 @@ class Porter(object): consumeErrors=True, ) + # Step 5. Do final post-processing + yield self._setup_state_group_id_seq() + self.progress.done() except: global end_error_exec_info @@ -709,6 +712,16 @@ class Porter(object): defer.returnValue((done, remaining + done)) + def _setup_state_group_id_seq(self): + def r(txn): + txn.execute("SELECT MAX(id) FROM state_groups") + next_id = txn.fetchone()[0]+1 + txn.execute( + "ALTER SEQUENCE state_group_id_seq RESTART WITH %s", + (next_id,), + ) + return self.postgres_store.runInteraction("setup_state_group_id_seq", r) + ############################################## ###### The following is simply UI stuff ###### From b370fe61c0aeccac7745ad404dad925ec217ba6d Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 17:18:02 +0100 Subject: [PATCH 53/67] Implement group join API --- synapse/federation/transport/client.py | 13 ++++++++ synapse/federation/transport/server.py | 18 +++++++++++ synapse/groups/groups_server.py | 45 ++++++++++++++++++++++++++ synapse/handlers/groups_local.py | 40 ++++++++++++++++++++++- synapse/storage/group_server.py | 12 +++++-- 5 files changed, 124 insertions(+), 4 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 3beab47832..370f7ba78b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -614,6 +614,19 @@ class TransportLayerClient(object): ignore_backoff=True, ) + @log_function + def join_group(self, destination, group_id, user_id, content): + """Attempts to join a group + """ + path = PREFIX + "/groups/%s/users/%s/join" % (group_id, user_id) + + return self.client.post_json( + destination=destination, + path=path, + data=content, + ignore_backoff=True, + ) + @log_function def invite_to_group(self, destination, group_id, user_id, requester_user_id, content): """Invite a user to a group diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index b98e30459c..4c94d5a36c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -803,6 +803,23 @@ class FederationGroupsAcceptInviteServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class FederationGroupsJoinServlet(BaseFederationServlet): + """Attempt to join a group + """ + PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/join$" + + @defer.inlineCallbacks + def on_POST(self, origin, content, query, group_id, user_id): + if get_domain_from_id(user_id) != origin: + raise SynapseError(403, "user_id doesn't match origin") + + new_content = yield self.handler.join_group( + group_id, user_id, content, + ) + + defer.returnValue((200, new_content)) + + class FederationGroupsRemoveUserServlet(BaseFederationServlet): """Leave or kick a user from the group """ @@ -1182,6 +1199,7 @@ GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsInvitedUsersServlet, FederationGroupsInviteServlet, FederationGroupsAcceptInviteServlet, + FederationGroupsJoinServlet, FederationGroupsRemoveUserServlet, FederationGroupsSummaryRoomsServlet, FederationGroupsCategoriesServlet, diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 70781e1854..77b6273e39 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -723,6 +723,51 @@ class GroupsServerHandler(object): "attestation": local_attestation, }) + @defer.inlineCallbacks + def join_group(self, group_id, requester_user_id, content): + """User tries to join the group. + + This will error if the group requires an invite/knock to join + """ + + yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) + + group_info = yield self.store.get_group( + group_id, + ) + if not group_info['is_joinable']: + raise SynapseError(403, "Group is not publicly joinable") + + if not self.hs.is_mine_id(requester_user_id): + local_attestation = self.attestations.create_attestation( + group_id, requester_user_id, + ) + remote_attestation = content["attestation"] + + yield self.attestations.verify_attestation( + remote_attestation, + user_id=requester_user_id, + group_id=group_id, + ) + else: + local_attestation = None + remote_attestation = None + + is_public = _parse_visibility_from_contents(content) + + yield self.store.add_user_to_group( + group_id, requester_user_id, + is_admin=False, + is_public=is_public, + local_attestation=local_attestation, + remote_attestation=remote_attestation, + ) + + defer.returnValue({ + "state": "join", + "attestation": local_attestation, + }) + @defer.inlineCallbacks def knock(self, group_id, requester_user_id, content): """A user requests becoming a member of the group diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 5f7b0ff305..977993e7d4 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -229,7 +229,45 @@ class GroupsLocalHandler(object): def join_group(self, group_id, user_id, content): """Request to join a group """ - raise NotImplementedError() # TODO + if self.is_mine_id(group_id): + yield self.groups_server_handler.join_group( + group_id, user_id, content + ) + local_attestation = None + remote_attestation = None + else: + local_attestation = self.attestations.create_attestation(group_id, user_id) + content["attestation"] = local_attestation + + res = yield self.transport_client.join_group( + get_domain_from_id(group_id), group_id, user_id, content, + ) + + remote_attestation = res["attestation"] + + yield self.attestations.verify_attestation( + remote_attestation, + group_id=group_id, + user_id=user_id, + server_name=get_domain_from_id(group_id), + ) + + # TODO: Check that the group is public and we're being added publically + is_publicised = content.get("publicise", False) + + token = yield self.store.register_user_group_membership( + group_id, user_id, + membership="join", + is_admin=False, + local_attestation=local_attestation, + remote_attestation=remote_attestation, + is_publicised=is_publicised, + ) + self.notifier.on_new_event( + "groups_key", token, users=[user_id], + ) + + defer.returnValue({}) @defer.inlineCallbacks def accept_invite(self, group_id, user_id, content): diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index db316a27ec..16c11a056f 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -48,19 +48,25 @@ class GroupServerStore(SQLBaseStore): desc="set_group_join_policy", ) + @defer.inlineCallbacks def get_group(self, group_id): - return self._simple_select_one( + ret = yield self._simple_select_one( table="groups", keyvalues={ "group_id": group_id, }, retcols=( - "name", "short_description", "long_description", "avatar_url", "is_public" + "name", "short_description", "long_description", "avatar_url", "is_public", "is_joinable", ), allow_none=True, - desc="is_user_in_group", + desc="get_group", ) + if ret and 'is_joinable' in ret: + ret['is_joinable'] = bool(ret['is_joinable']) + + defer.returnValue(ret) + def get_users_in_group(self, group_id, include_private=False): # TODO: Pagination From edb45aae38b7bdeae1cbf18b435be5f8610c2d48 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 28 Mar 2018 17:18:50 +0100 Subject: [PATCH 54/67] pep8 --- synapse/storage/group_server.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 16c11a056f..5fbe0ada4e 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -56,7 +56,8 @@ class GroupServerStore(SQLBaseStore): "group_id": group_id, }, retcols=( - "name", "short_description", "long_description", "avatar_url", "is_public", "is_joinable", + "name", "short_description", "long_description", + "avatar_url", "is_public", "is_joinable", ), allow_none=True, desc="get_group", From 6eb3aa94b6befcc89f6fee426053b3feee316bb9 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Tue, 3 Apr 2018 11:48:17 +0100 Subject: [PATCH 55/67] Factor out add_user from accept_invite and join_group --- synapse/groups/groups_server.py | 84 ++++++++++++++------------------- 1 file changed, 36 insertions(+), 48 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 77b6273e39..2c02da4725 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -677,6 +677,40 @@ class GroupsServerHandler(object): else: raise SynapseError(502, "Unknown state returned by HS") + @defer.inlineCallbacks + def add_user(self, group_id, user_id, content): + """Add a user to a group based on a content dict. + + See accept_invite, join_group. + """ + if not self.hs.is_mine_id(user_id): + local_attestation = self.attestations.create_attestation( + group_id, user_id, + ) + + remote_attestation = content["attestation"] + + yield self.attestations.verify_attestation( + remote_attestation, + user_id=user_id, + group_id=group_id, + ) + else: + local_attestation = None + remote_attestation = None + + is_public = _parse_visibility_from_contents(content) + + yield self.store.add_user_to_group( + group_id, user_id, + is_admin=False, + is_public=is_public, + local_attestation=local_attestation, + remote_attestation=remote_attestation, + ) + + defer.returnValue(local_attestation) + @defer.inlineCallbacks def accept_invite(self, group_id, requester_user_id, content): """User tries to accept an invite to the group. @@ -693,30 +727,7 @@ class GroupsServerHandler(object): if not is_invited: raise SynapseError(403, "User not invited to group") - if not self.hs.is_mine_id(requester_user_id): - local_attestation = self.attestations.create_attestation( - group_id, requester_user_id, - ) - remote_attestation = content["attestation"] - - yield self.attestations.verify_attestation( - remote_attestation, - user_id=requester_user_id, - group_id=group_id, - ) - else: - local_attestation = None - remote_attestation = None - - is_public = _parse_visibility_from_contents(content) - - yield self.store.add_user_to_group( - group_id, requester_user_id, - is_admin=False, - is_public=is_public, - local_attestation=local_attestation, - remote_attestation=remote_attestation, - ) + local_attestation = yield self.add_user(group_id, requester_user_id, content) defer.returnValue({ "state": "join", @@ -738,30 +749,7 @@ class GroupsServerHandler(object): if not group_info['is_joinable']: raise SynapseError(403, "Group is not publicly joinable") - if not self.hs.is_mine_id(requester_user_id): - local_attestation = self.attestations.create_attestation( - group_id, requester_user_id, - ) - remote_attestation = content["attestation"] - - yield self.attestations.verify_attestation( - remote_attestation, - user_id=requester_user_id, - group_id=group_id, - ) - else: - local_attestation = None - remote_attestation = None - - is_public = _parse_visibility_from_contents(content) - - yield self.store.add_user_to_group( - group_id, requester_user_id, - is_admin=False, - is_public=is_public, - local_attestation=local_attestation, - remote_attestation=remote_attestation, - ) + local_attestation = yield self.add_user(group_id, requester_user_id, content) defer.returnValue({ "state": "join", From f8d1917fce3b08b613f76131d6a82a7576379251 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 16:31:24 +0100 Subject: [PATCH 56/67] Fix federation client `set_group_joinable` typo --- synapse/federation/transport/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 370f7ba78b..67ff7da51b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -871,7 +871,7 @@ class TransportLayerClient(object): ) @log_function - def set_group_joinable(self, destination, group_id, requester_user_id, + def set_group_join_policy(self, destination, group_id, requester_user_id, content): """Sets the join policy for a group """ From ae85c7804e733aa1adaed06a9de51445a084858e Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 16:31:57 +0100 Subject: [PATCH 57/67] is_joinable -> join_rule --- synapse/groups/groups_server.py | 2 +- synapse/storage/group_server.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 2c02da4725..507ec232c5 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -746,7 +746,7 @@ class GroupsServerHandler(object): group_info = yield self.store.get_group( group_id, ) - if not group_info['is_joinable']: + if group_info['join_policy'] != "open": raise SynapseError(403, "Group is not publicly joinable") local_attestation = yield self.add_user(group_id, requester_user_id, content) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 5fbe0ada4e..d81609dd16 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -57,15 +57,12 @@ class GroupServerStore(SQLBaseStore): }, retcols=( "name", "short_description", "long_description", - "avatar_url", "is_public", "is_joinable", + "avatar_url", "is_public", "join_rule", ), allow_none=True, desc="get_group", ) - if ret and 'is_joinable' in ret: - ret['is_joinable'] = bool(ret['is_joinable']) - defer.returnValue(ret) def get_users_in_group(self, group_id, include_private=False): From 87c864b6984f6876f224e0d4ac2e2b872d2723ce Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 16:49:22 +0100 Subject: [PATCH 58/67] join_rule -> join_policy --- synapse/storage/group_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index d81609dd16..8bd8ca8124 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -57,7 +57,7 @@ class GroupServerStore(SQLBaseStore): }, retcols=( "name", "short_description", "long_description", - "avatar_url", "is_public", "join_rule", + "avatar_url", "is_public", "join_policy", ), allow_none=True, desc="get_group", From cd087a265db8037ce2fe158ddc0e9b35a612d4bb Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 17:14:27 +0100 Subject: [PATCH 59/67] Don't use redundant inlineCallbacks --- synapse/storage/group_server.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 8bd8ca8124..da05ccb027 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -48,9 +48,8 @@ class GroupServerStore(SQLBaseStore): desc="set_group_join_policy", ) - @defer.inlineCallbacks def get_group(self, group_id): - ret = yield self._simple_select_one( + return self._simple_select_one( table="groups", keyvalues={ "group_id": group_id, @@ -63,8 +62,6 @@ class GroupServerStore(SQLBaseStore): desc="get_group", ) - defer.returnValue(ret) - def get_users_in_group(self, group_id, include_private=False): # TODO: Pagination From 6850f8aea3482443dff1c330fb63f7ca8dde0025 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 17:16:31 +0100 Subject: [PATCH 60/67] Get group_info from existing call to check_group_is_ours --- synapse/groups/groups_server.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 507ec232c5..1b516011d8 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -741,11 +741,7 @@ class GroupsServerHandler(object): This will error if the group requires an invite/knock to join """ - yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - group_info = yield self.store.get_group( - group_id, - ) + group_info = yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) if group_info['join_policy'] != "open": raise SynapseError(403, "Group is not publicly joinable") From 112c2253e2846b48b75f4171d9dd94bfe6ecc8a1 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Thu, 5 Apr 2018 17:32:20 +0100 Subject: [PATCH 61/67] pep8 --- synapse/federation/transport/client.py | 2 +- synapse/groups/groups_server.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 67ff7da51b..50a967a7ec 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -872,7 +872,7 @@ class TransportLayerClient(object): @log_function def set_group_join_policy(self, destination, group_id, requester_user_id, - content): + content): """Sets the join policy for a group """ path = PREFIX + "/groups/%s/settings/m.join_policy" % (group_id,) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 1b516011d8..15d31549de 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -741,7 +741,9 @@ class GroupsServerHandler(object): This will error if the group requires an invite/knock to join """ - group_info = yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) + group_info = yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True + ) if group_info['join_policy'] != "open": raise SynapseError(403, "Group is not publicly joinable") From b4478e586fe52392a439c94f525c3d885454f0a8 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 6 Apr 2018 11:37:49 +0100 Subject: [PATCH 62/67] add_user -> _add_user --- synapse/groups/groups_server.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 15d31549de..72dfed7dbb 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -678,7 +678,7 @@ class GroupsServerHandler(object): raise SynapseError(502, "Unknown state returned by HS") @defer.inlineCallbacks - def add_user(self, group_id, user_id, content): + def _add_user(self, group_id, user_id, content): """Add a user to a group based on a content dict. See accept_invite, join_group. @@ -727,7 +727,7 @@ class GroupsServerHandler(object): if not is_invited: raise SynapseError(403, "User not invited to group") - local_attestation = yield self.add_user(group_id, requester_user_id, content) + local_attestation = yield self._add_user(group_id, requester_user_id, content) defer.returnValue({ "state": "join", @@ -747,7 +747,7 @@ class GroupsServerHandler(object): if group_info['join_policy'] != "open": raise SynapseError(403, "Group is not publicly joinable") - local_attestation = yield self.add_user(group_id, requester_user_id, content) + local_attestation = yield self._add_user(group_id, requester_user_id, content) defer.returnValue({ "state": "join", From 6bd1b7053e41350fcc1b451e40da82257e2120b5 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 6 Apr 2018 11:44:18 +0100 Subject: [PATCH 63/67] By default, join policy is "invite" --- synapse/groups/groups_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 72dfed7dbb..8310dea031 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -905,7 +905,7 @@ def _parse_join_policy_dict(join_policy_dict): """ join_policy_type = join_policy_dict.get("type") if not join_policy_type: - return True + return "invite" if join_policy_type not in ("invite", "open"): raise SynapseError( From 7945435587704c5abd075140bdd46c11db93c255 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 6 Apr 2018 14:06:32 +0100 Subject: [PATCH 64/67] When exposing group state, return is_openly_joinable as opposed to join_policy, which is really only pertinent to the synapse implementation of the group server. By doing this we keep the group server concept extensible by allowing arbitrarily complex rules for deciding whether a group is openly joinable. --- synapse/groups/groups_server.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 8310dea031..290eec7127 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -407,6 +407,11 @@ class GroupsServerHandler(object): group_description = yield self.store.get_group(group_id) if group_description: + join_policy = group_description['join_policy'] + del group_description['join_policy'] + + group_description['is_openly_joinable'] = join_policy == "open" + defer.returnValue(group_description) else: raise SynapseError(404, "Unknown group") From db2fd801f722ae8341b36314fb8929a80fd53996 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 6 Apr 2018 15:51:15 +0100 Subject: [PATCH 65/67] Explicitly grab individual columns from group object --- synapse/groups/groups_server.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 290eec7127..ad937c1721 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -404,13 +404,15 @@ class GroupsServerHandler(object): yield self.check_group_is_ours(group_id, requester_user_id) - group_description = yield self.store.get_group(group_id) + group = yield self.store.get_group(group_id) - if group_description: - join_policy = group_description['join_policy'] - del group_description['join_policy'] - - group_description['is_openly_joinable'] = join_policy == "open" + if group: + cols = [ + "name", "short_description", "long_description", + "avatar_url", "is_public", + ] + group_description = { key: group[key] for key in cols } + group_description["is_openly_joinable"] = group['join_policy'] == "open" defer.returnValue(group_description) else: From 020a5013544b1f7046faa97b83eb2acc613334b1 Mon Sep 17 00:00:00 2001 From: Luke Barnard Date: Fri, 6 Apr 2018 16:02:06 +0100 Subject: [PATCH 66/67] de-lint, quote consistency --- synapse/groups/groups_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index ad937c1721..2d95b04e0c 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -411,8 +411,8 @@ class GroupsServerHandler(object): "name", "short_description", "long_description", "avatar_url", "is_public", ] - group_description = { key: group[key] for key in cols } - group_description["is_openly_joinable"] = group['join_policy'] == "open" + group_description = {key: group[key] for key in cols} + group_description["is_openly_joinable"] = group["join_policy"] == "open" defer.returnValue(group_description) else: From 13decdbf96981782616a3ee1826fce1213a1bc89 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 9 Apr 2018 12:58:37 +0100 Subject: [PATCH 67/67] Revert "Merge pull request #3066 from matrix-org/rav/remove_redundant_metrics" We aren't ready to release this yet, so I'm reverting it for now. This reverts commit d1679a4ed7947b0814e0f2af9b888a16c588f1a1, reversing changes made to e089100c6231541c446e37e157dec8feed02d283. --- CHANGES.rst | 9 --------- UPGRADE.rst | 9 +-------- docs/metrics-howto.rst | 11 ----------- synapse/http/server.py | 26 ++++++++++++++++++++++++++ synapse/util/metrics.py | 25 +++++++++++++++++++++++++ 5 files changed, 52 insertions(+), 28 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 5fbad54427..38372381ac 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,12 +1,3 @@ -Changes in synapse v0.28.0 (2018-xx-xx) -======================================= - -As previously advised, this release removes a number of redundant Prometheus -metrics. Administrators may need to update their dashboards and alerting rules -to use the updated metric names, if they have not already done so. See -`docs/metrics-howto.rst `_ -for more details. - Changes in synapse v0.27.2 (2018-03-26) ======================================= diff --git a/UPGRADE.rst b/UPGRADE.rst index 39a16b1c0c..f6bb1070b1 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -52,7 +52,7 @@ Upgrading to $NEXT_VERSION ==================== This release expands the anonymous usage stats sent if the opt-in -``report_stats`` configuration is set to ``true``. We now capture RSS memory +``report_stats`` configuration is set to ``true``. We now capture RSS memory and cpu use at a very coarse level. This requires administrators to install the optional ``psutil`` python module. @@ -60,13 +60,6 @@ We would appreciate it if you could assist by ensuring this module is available and ``report_stats`` is enabled. This will let us see if performance changes to synapse are having an impact to the general community. -This release also removes a number of redundant Prometheus metrics. -Administrators may need to update their dashboards and alerting rules to use -the updated metric names, if they have not already done so. See -`docs/metrics-howto.rst `_ -for more details. - - Upgrading to v0.15.0 ==================== diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 5e2d7c52ec..8acc479bc3 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -34,17 +34,6 @@ How to monitor Synapse metrics using Prometheus Restart prometheus. -Deprecated metrics removed in 0.28.0 ------------------------------------- - -Synapse 0.28.0 removes all of the metrics deprecated by 0.27.0, which are those -listed under "Old name" below. This has been done to reduce the bandwidth used -by gathering metrics and the storage requirements for the Prometheus server, as -well as reducing CPU overhead for both Synapse and Prometheus. - -Administrators should update any alerts or monitoring dashboards to use the -"New name" listed below. - Block and response metrics renamed for 0.27.0 --------------------------------------------- diff --git a/synapse/http/server.py b/synapse/http/server.py index ac75206ef5..64e083ebfc 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -47,6 +47,17 @@ metrics = synapse.metrics.get_metrics_for(__name__) response_count = metrics.register_counter( "response_count", labels=["method", "servlet", "tag"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_requests", + "_response_time:count", + "_response_ru_utime:count", + "_response_ru_stime:count", + "_response_db_txn_count:count", + "_response_db_txn_duration:count", + ) + ) ) requests_counter = metrics.register_counter( @@ -62,24 +73,39 @@ outgoing_responses_counter = metrics.register_counter( response_timer = metrics.register_counter( "response_time_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_time:total", + ), ) response_ru_utime = metrics.register_counter( "response_ru_utime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_utime:total", + ), ) response_ru_stime = metrics.register_counter( "response_ru_stime_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_ru_stime:total", + ), ) response_db_txn_count = metrics.register_counter( "response_db_txn_count", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_count:total", + ), ) # seconds spent waiting for db txns, excluding scheduling time, when processing # this request response_db_txn_duration = metrics.register_counter( "response_db_txn_duration_seconds", labels=["method", "servlet", "tag"], + alternative_names=( + metrics.name_prefix + "_response_db_txn_duration:total", + ), ) # seconds spent waiting for a db connection, when processing this request diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index c3d8237e8f..e4b5687a4b 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -31,28 +31,53 @@ metrics = synapse.metrics.get_metrics_for(__name__) block_counter = metrics.register_counter( "block_count", labels=["block_name"], + alternative_names=( + # the following are all deprecated aliases for the same metric + metrics.name_prefix + x for x in ( + "_block_timer:count", + "_block_ru_utime:count", + "_block_ru_stime:count", + "_block_db_txn_count:count", + "_block_db_txn_duration:count", + ) + ) ) block_timer = metrics.register_counter( "block_time_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_timer:total", + ), ) block_ru_utime = metrics.register_counter( "block_ru_utime_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_ru_utime:total", + ), ) block_ru_stime = metrics.register_counter( "block_ru_stime_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_ru_stime:total", + ), ) block_db_txn_count = metrics.register_counter( "block_db_txn_count", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_db_txn_count:total", + ), ) # seconds spent waiting for db txns, excluding scheduling time, in this block block_db_txn_duration = metrics.register_counter( "block_db_txn_duration_seconds", labels=["block_name"], + alternative_names=( + metrics.name_prefix + "_block_db_txn_duration:total", + ), ) # seconds spent waiting for a db connection, in this block